diff --git a/.coveragerc b/.coveragerc index 25d85b805..2130ec2ad 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,6 +3,7 @@ exclude_lines = if __name__ == .__main__.: raise NotImplemented. + return NotImplemented def __repr__ [run] diff --git a/.gitignore b/.gitignore index fb9bd51de..04480a290 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,5 @@ tests/file.tmp .eggs/ .mypy_cache/ *.tmp +.venv/ +htmlcov/ \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index ac9322211..824eb0edc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,4 @@ -dist: bionic +dist: focal language: python services: - docker @@ -26,11 +26,13 @@ install: fi docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${PYTHON_DOCKER_TAG} /moto/travis_moto_server.sh & fi + travis_retry pip install -r requirements-dev.txt + travis_retry pip install docker>=2.5.1 travis_retry pip install boto==2.45.0 travis_retry pip install boto3 travis_retry pip install dist/moto*.gz travis_retry pip install coveralls==1.1 - travis_retry pip install -r requirements-dev.txt + travis_retry pip install coverage==4.5.4 if [ "$TEST_SERVER_MODE" = "true" ]; then python wait_for.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 732dad23a..8d31409f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,224 @@ Moto Changelog =================== +Unreleased +----- + * Reduced dependency overhead. + It is now possible to install dependencies for only specific services using: + pip install moto[service1,service1]. + See the README for more information. + + + +1.3.16 +----- +Full list of PRs merged in this release: +https://github.com/spulec/moto/pulls?q=is%3Apr+is%3Aclosed+merged%3A2019-11-14..2020-09-07 + + + General Changes: + * The scaffold.py-script has been fixed to make it easier to scaffold new services. + See the README for an introduction. + + New Services: + * Application Autoscaling + * Code Commit + * Code Pipeline + * Elastic Beanstalk + * Kinesis Video + * Kinesis Video Archived Media + * Managed BlockChain + * Resource Access Manager (ram) + * Sagemaker + + New Methods: + * Athena: + * create_named_query + * get_named_query + * get_work_group + * start_query_execution + * stop_query_execution + * API Gateway: + * create_authorizer + * create_domain_name + * create_model + * delete_authorizer + * get_authorizer + * get_authorizers + * get_domain_name + * get_domain_names + * get_model + * get_models + * update_authorizer + * Autoscaling: + * enter_standby + * exit_standby + * terminate_instance_in_auto_scaling_group + * CloudFormation: + * get_template_summary + * CloudWatch: + * describe_alarms_for_metric + * get_metric_data + * CloudWatch Logs: + * delete_subscription_filter + * describe_subscription_filters + * put_subscription_filter + * Cognito IDP: + * associate_software_token + * create_resource_server + * confirm_sign_up + * initiate_auth + * set_user_mfa_preference + * sign_up + * verify_software_token + * DynamoDB: + * describe_continuous_backups + * transact_get_items + * transact_write_items + * update_continuous_backups + * EC2: + * create_vpc_endpoint + * describe_vpc_classic_link + * describe_vpc_classic_link_dns_support + * describe_vpc_endpoint_services + * disable_vpc_classic_link + * disable_vpc_classic_link_dns_support + * enable_vpc_classic_link + * enable_vpc_classic_link_dns_support + * register_image + * ECS: + * create_task_set + * delete_task_set + * describe_task_set + * update_service_primary_task_set + * update_task_set + * Events: + * delete_event_bus + * create_event_bus + * list_event_buses + * list_tags_for_resource + * tag_resource + * untag_resource + * Glue: + * get_databases + * IAM: + * delete_group + * delete_instance_profile + * delete_ssh_public_key + * get_account_summary + * get_ssh_public_key + * list_user_tags + * list_ssh_public_keys + * update_ssh_public_key + * upload_ssh_public_key + * IOT: + * cancel_job + * cancel_job_execution + * create_policy_version + * delete_job + * delete_job_execution + * describe_endpoint + * describe_job_execution + * delete_policy_version + * get_policy_version + * get_job_document + * list_attached_policies + * list_job_executions_for_job + * list_job_executions_for_thing + * list_jobs + * list_policy_versions + * set_default_policy_version + * register_certificate_without_ca + * KMS: + * untag_resource + * Lambda: + * delete_function_concurrency + * get_function_concurrency + * put_function_concurrency + * Organisations: + * describe_create_account_status + * deregister_delegated_administrator + * disable_policy_type + * enable_policy_type + * list_delegated_administrators + * list_delegated_services_for_account + * list_tags_for_resource + * register_delegated_administrator + * tag_resource + * untag_resource + * update_organizational_unit + * S3: + * delete_bucket_encryption + * delete_public_access_block + * get_bucket_encryption + * get_public_access_block + * put_bucket_encryption + * put_public_access_block + * S3 Control: + * delete_public_access_block + * get_public_access_block + * put_public_access_block + * SecretsManager: + * get_resource_policy + * update_secret + * SES: + * create_configuration_set + * create_configuration_set_event_destination + * create_receipt_rule_set + * create_receipt_rule + * create_template + * get_template + * get_send_statistics + * list_templates + * STS: + * assume_role_with_saml + * SSM: + * create_documen + * delete_document + * describe_document + * get_document + * list_documents + * update_document + * update_document_default_version + * SWF: + * undeprecate_activity_type + * undeprecate_domain + * undeprecate_workflow_type + + General Updates: + * API Gateway - create_rest_api now supports policy-parameter + * Autoscaling - describe_auto_scaling_instances now supports InstanceIds-parameter + * AutoScalingGroups - now support launch templates + * CF - Now supports DependsOn-configuration + * CF - Now supports FN::Transform AWS::Include mapping + * CF - Now supports update and deletion of Lambdas + * CF - Now supports creation, update and deletion of EventBus (Events) + * CF - Now supports update of Rules (Events) + * CF - Now supports creation, update and deletion of EventSourceMappings (AWS Lambda) + * CF - Now supports update and deletion of Kinesis Streams + * CF - Now supports creation of DynamoDB streams + * CF - Now supports deletion of DynamoDB tables + * CF - list_stacks now supports the status_filter-parameter + * Cognito IDP - list_users now supports filter-parameter + * DynamoDB - GSI/LSI's now support ProjectionType=KEYS_ONLY + * EC2 - create_route now supports the NetworkInterfaceId-parameter + * EC2 - describe_instances now supports additional filters (owner-id) + * EC2 - describe_instance_status now supports additional filters (instance-state-name, instance-state-code) + * EC2 - describe_nat_gateways now supports additional filters (nat-gateway-id, vpc-id, subnet-id, state) + * EC2 - describe_vpn_gateways now supports additional filters (attachment.vpc_id, attachment.state, vpn-gateway-id, type) + * IAM - list_users now supports path_prefix-parameter + * IOT - list_thing_groups now supports parent_group, name_prefix_filter, recursive-parameters + * S3 - delete_objects now supports deletion of specific VersionIds + * SecretsManager - list_secrets now supports filters-parameter + * SFN - start_execution now receives and validates input + * SNS - Now supports sending a message directly to a phone number + * SQS - MessageAttributes now support labeled DataTypes + +1.3.15 +----- + +This release broke dependency management for a lot of services - please upgrade to 1.3.16. + 1.3.14 ----- diff --git a/CONFIG_README.md b/CONFIG_README.md index 356bb87a0..b0ae42181 100644 --- a/CONFIG_README.md +++ b/CONFIG_README.md @@ -23,8 +23,8 @@ However, this will only work on resource types that have this enabled. ### Current enabled resource types: -1. S3 - +1. S3 (all) +1. IAM (Role, Policy) ## Developer Guide @@ -53,15 +53,14 @@ An example of the above is implemented for S3. You can see that by looking at: 1. `moto/s3/config.py` 1. `moto/config/models.py` -As well as the corresponding unit tests in: +### Testing +For each resource type, you will need to test write tests for a few separate areas: -1. `tests/s3/test_s3.py` -1. `tests/config/test_config.py` +- Test the backend queries to ensure discovered resources come back (ie for `IAM::Policy`, write `tests.tests_iam.test_policy_list_config_discovered_resources`). For writing these tests, you must not make use of `boto` to create resources. You will need to use the backend model methods to provision the resources. This is to make tests compatible with the moto server. You must make tests for the resource type to test listing and object fetching. -Note for unit testing, you will want to add a test to ensure that you can query all the resources effectively. For testing this feature, -the unit tests for the `ConfigQueryModel` will not make use of `boto` to create resources, such as S3 buckets. You will need to use the -backend model methods to provision the resources. This is to make tests compatible with the moto server. You should absolutely make tests -in the resource type to test listing and object fetching. +- Test the config dict for all scenarios (ie for `IAM::Policy`, write `tests.tests_iam.test_policy_config_dict`). For writing this test, you'll need to create resources in the same way as the first test (without using `boto`), in every meaningful configuration that would produce a different config dict. Then, query the backend and ensure each of the dicts are as you expect. + +- Test that everything works end to end with the `boto` clients. (ie for `IAM::Policy`, write `tests.tests_iam.test_policy_config_client`). The main two items to test will be the `boto.client('config').list_discovered_resources()`, `boto.client('config').list_aggregate_discovered_resources()`, `moto.client('config').batch_get_resource_config()`, and `moto.client('config').batch_aggregate_get_resource_config()`. This test doesn't have to be super thorough, but it basically tests that the front end and backend logic all works together and returns correct resources. Beware the aggregate methods all have capital first letters (ie `Limit`), while non-aggregate methods have lowercase first letters (ie `limit`) ### Listing S3 is currently the model implementation, but it also odd in that S3 is a global resource type with regional resource residency. @@ -117,4 +116,4 @@ return for it. When implementing resource config fetching, you will need to return at a minimum `None` if the resource is not found, or a `dict` that looks like what AWS Config would return. -It's recommended to read the comment for the `ConfigQueryModel` 's `get_config_resource` function in [base class here](moto/core/models.py). +It's recommended to read the comment for the `ConfigQueryModel` 's `get_config_resource` function in [base class here](moto/core/models.py). \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 40da55ccf..e4a189e5e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,29 +1,96 @@ -### Contributing code +# Contributing code Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_CONDUCT.md), you can expect to be treated with respect at all times when interacting with this project. ## Running the tests locally -Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests. +Moto has a [Makefile](./Makefile) which has some helpful commands for getting set up. +You should be able to run `make init` to install the dependencies and then `make test` to run the tests. -## Is there a missing feature? +*NB. On first run, some tests might take a while to execute, especially the Lambda ones, because they may need to download a Docker image before they can execute.* + +## Linting + +Run `make lint` or `black --check moto tests` to verify whether your code confirms to the guidelines. + +## Getting to grips with the codebase + +Moto maintains a list of [good first issues](https://github.com/spulec/moto/contribute) which you may want to look at before +implementing a whole new endpoint. + +## Missing features Moto is easier to contribute to than you probably think. There's [a list of which endpoints have been implemented](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) and we invite you to add new endpoints to existing services or to add new services. How to teach Moto to support a new AWS endpoint: -* Create an issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. +* Search for an existing [issue](https://github.com/spulec/moto/issues) that matches what you want to achieve. +* If one doesn't already exist, create a new issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. * Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description. * Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`. -* If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you. +* Implementing the feature itself can be done by creating a method called `import_certificate` in `moto/acm/responses.py`. It's considered good practice to deal with input/output formatting and validation in `responses.py`, and create a method `import_certificate` in `moto/acm/models.py` that handles the actual import logic. +* If you can also implement the code that gets that test passing then great! If not, just ask the community for a hand and somebody will assist you. -# Maintainers +## Before pushing changes to GitHub -## Releasing a new version of Moto +1. Run `black moto/ tests/` over your code to ensure that it is properly formatted +1. Run `make test` to ensure your tests are passing -You'll need a PyPi account and a Dockerhub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. +## Python versions + +moto currently supports both Python 2 and 3, so make sure your tests pass against both major versions of Python. + +## Missing services + +Implementing a new service from scratch is more work, but still quite straightforward. All the code that intercepts network requests to `*.amazonaws.com` is already handled for you in `moto/core` - all that's necessary for new services to be recognized is to create a new decorator and determine which URLs should be intercepted. + +See this PR for an example of what's involved in creating a new service: https://github.com/spulec/moto/pull/2409/files + +Note the `urls.py` that redirects all incoming URL requests to a generic `dispatch` method, which in turn will call the appropriate method in `responses.py`. + +If you want more control over incoming requests or their bodies, it is possible to redirect specific requests to a custom method. See this PR for an example: https://github.com/spulec/moto/pull/2957/files + +### Generating template code of services. + +By using `scripts/scaffold.py`, you can automatically generate template code of new services and new method of existing service. The script looks up API specification of given boto3 method and adds necessary codes includng request parameters and response parameters. In some cases, it fails to generate codes. +Please try out by runninig `python scripts/scaffold.py` + +```bash +$ python scripts/scaffold.py +Select service: codedeploy + +==Current Implementation Status== +[ ] add_tags_to_on_premises_instances +... +[ ] create_deployment +...[ +[ ] update_deployment_group +================================= +Select Operation: create_deployment + + + Initializing service codedeploy + creating moto/codedeploy + creating moto/codedeploy/models.py + creating moto/codedeploy/exceptions.py + creating moto/codedeploy/__init__.py + creating moto/codedeploy/responses.py + creating moto/codedeploy/urls.py + creating tests/test_codedeploy + creating tests/test_codedeploy/test_server.py + creating tests/test_codedeploy/test_codedeploy.py + inserting code moto/codedeploy/responses.py + inserting code moto/codedeploy/models.py +You will still need to add the mock into "__init__.py" +``` + + +## Maintainers + +### Releasing a new version of Moto + +You'll need a PyPi account and a DockerHub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. * First, `scripts/bump_version` modifies the version and opens a PR * Then, merge the new pull request * Finally, generate and ship the new artifacts with `make publish` - diff --git a/Dockerfile b/Dockerfile index 24d7c34ff..3c159633e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,22 +1,12 @@ -FROM alpine:3.6 - -RUN apk add --no-cache --update \ - gcc \ - musl-dev \ - python3-dev \ - libffi-dev \ - openssl-dev \ - python3 +FROM python:3.7-slim ADD . /moto/ ENV PYTHONUNBUFFERED 1 WORKDIR /moto/ -RUN python3 -m ensurepip && \ - rm -r /usr/lib/python*/ensurepip && \ - pip3 --no-cache-dir install --upgrade pip setuptools && \ +RUN pip3 --no-cache-dir install --upgrade pip setuptools && \ pip3 --no-cache-dir install ".[server]" -ENTRYPOINT ["/usr/bin/moto_server", "-H", "0.0.0.0"] +ENTRYPOINT ["/usr/local/bin/moto_server", "-H", "0.0.0.0"] EXPOSE 5000 diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index a863d483d..4ccc4e2dc 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,6 +1,8 @@ ## accessanalyzer -0% implemented +
+0% implemented + - [ ] create_analyzer - [ ] create_archive_rule - [ ] delete_analyzer @@ -19,9 +21,12 @@ - [ ] untag_resource - [ ] update_archive_rule - [ ] update_findings +
## acm -38% implemented +
+38% implemented + - [X] add_tags_to_certificate - [X] delete_certificate - [ ] describe_certificate @@ -35,9 +40,12 @@ - [X] request_certificate - [ ] resend_validation_email - [ ] update_certificate_options +
## acm-pca -0% implemented +
+0% implemented + - [ ] create_certificate_authority - [ ] create_certificate_authority_audit_report - [ ] create_permission @@ -58,9 +66,12 @@ - [ ] tag_certificate_authority - [ ] untag_certificate_authority - [ ] update_certificate_authority +
## alexaforbusiness -0% implemented +
+0% implemented + - [ ] approve_skill - [ ] associate_contact_with_address_book - [ ] associate_device_with_network_profile @@ -154,9 +165,12 @@ - [ ] update_profile - [ ] update_room - [ ] update_skill_group +
## amplify -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_backend_environment - [ ] create_branch @@ -194,17 +208,20 @@ - [ ] update_branch - [ ] update_domain_association - [ ] update_webhook +
## apigateway -25% implemented +
+34% implemented + - [ ] create_api_key -- [ ] create_authorizer +- [X] create_authorizer - [ ] create_base_path_mapping - [X] create_deployment - [ ] create_documentation_part - [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model +- [X] create_domain_name +- [X] create_model - [ ] create_request_validator - [X] create_resource - [X] create_rest_api @@ -213,7 +230,7 @@ - [X] create_usage_plan_key - [ ] create_vpc_link - [ ] delete_api_key -- [ ] delete_authorizer +- [X] delete_authorizer - [ ] delete_base_path_mapping - [ ] delete_client_certificate - [X] delete_deployment @@ -239,8 +256,8 @@ - [ ] get_account - [ ] get_api_key - [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers +- [X] get_authorizer +- [X] get_authorizers - [ ] get_base_path_mapping - [ ] get_base_path_mappings - [ ] get_client_certificate @@ -251,8 +268,8 @@ - [ ] get_documentation_parts - [ ] get_documentation_version - [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names +- [X] get_domain_name +- [X] get_domain_names - [ ] get_export - [ ] get_gateway_response - [ ] get_gateway_responses @@ -260,9 +277,9 @@ - [X] get_integration_response - [X] get_method - [X] get_method_response -- [ ] get_model +- [X] get_model - [ ] get_model_template -- [ ] get_models +- [X] get_models - [ ] get_request_validator - [ ] get_request_validators - [X] get_resource @@ -297,7 +314,7 @@ - [ ] untag_resource - [ ] update_account - [ ] update_api_key -- [ ] update_authorizer +- [X] update_authorizer - [ ] update_base_path_mapping - [ ] update_client_certificate - [ ] update_deployment @@ -317,15 +334,21 @@ - [ ] update_usage - [ ] update_usage_plan - [ ] update_vpc_link +
## apigatewaymanagementapi -0% implemented +
+0% implemented + - [ ] delete_connection - [ ] get_connection - [ ] post_to_connection +
## apigatewayv2 -0% implemented +
+0% implemented + - [ ] create_api - [ ] create_api_mapping - [ ] create_authorizer @@ -337,6 +360,8 @@ - [ ] create_route - [ ] create_route_response - [ ] create_stage +- [ ] create_vpc_link +- [ ] delete_access_log_settings - [ ] delete_api - [ ] delete_api_mapping - [ ] delete_authorizer @@ -347,9 +372,12 @@ - [ ] delete_integration_response - [ ] delete_model - [ ] delete_route +- [ ] delete_route_request_parameter - [ ] delete_route_response - [ ] delete_route_settings - [ ] delete_stage +- [ ] delete_vpc_link +- [ ] export_api - [ ] get_api - [ ] get_api_mapping - [ ] get_api_mappings @@ -374,6 +402,8 @@ - [ ] get_stage - [ ] get_stages - [ ] get_tags +- [ ] get_vpc_link +- [ ] get_vpc_links - [ ] import_api - [ ] reimport_api - [ ] tag_resource @@ -389,9 +419,13 @@ - [ ] update_route - [ ] update_route_response - [ ] update_stage +- [ ] update_vpc_link +
## appconfig -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_configuration_profile - [ ] create_deployment_strategy @@ -421,22 +455,28 @@ - [ ] update_deployment_strategy - [ ] update_environment - [ ] validate_configuration +
## application-autoscaling -0% implemented +
+20% implemented + - [ ] delete_scaling_policy - [ ] delete_scheduled_action - [ ] deregister_scalable_target -- [ ] describe_scalable_targets +- [X] describe_scalable_targets - [ ] describe_scaling_activities - [ ] describe_scaling_policies - [ ] describe_scheduled_actions - [ ] put_scaling_policy - [ ] put_scheduled_action -- [ ] register_scalable_target +- [X] register_scalable_target +
## application-insights -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_component - [ ] create_log_pattern @@ -453,6 +493,7 @@ - [ ] describe_problem_observations - [ ] list_applications - [ ] list_components +- [ ] list_configuration_history - [ ] list_log_pattern_sets - [ ] list_log_patterns - [ ] list_problems @@ -463,9 +504,12 @@ - [ ] update_component - [ ] update_component_configuration - [ ] update_log_pattern +
## appmesh -0% implemented +
+0% implemented + - [ ] create_mesh - [ ] create_route - [ ] create_virtual_node @@ -494,9 +538,12 @@ - [ ] update_virtual_node - [ ] update_virtual_router - [ ] update_virtual_service +
## appstream -0% implemented +
+0% implemented + - [ ] associate_fleet - [ ] batch_associate_user_stack - [ ] batch_disassociate_user_stack @@ -544,9 +591,12 @@ - [ ] update_fleet - [ ] update_image_permissions - [ ] update_stack +
## appsync -0% implemented +
+0% implemented + - [ ] create_api_cache - [ ] create_api_key - [ ] create_data_source @@ -588,31 +638,37 @@ - [ ] update_graphql_api - [ ] update_resolver - [ ] update_type +
## athena -10% implemented +
+36% implemented + - [ ] batch_get_named_query - [ ] batch_get_query_execution -- [ ] create_named_query +- [X] create_named_query - [X] create_work_group - [ ] delete_named_query - [ ] delete_work_group -- [ ] get_named_query +- [X] get_named_query - [ ] get_query_execution - [ ] get_query_results -- [ ] get_work_group +- [X] get_work_group - [ ] list_named_queries - [ ] list_query_executions - [ ] list_tags_for_resource - [X] list_work_groups -- [ ] start_query_execution -- [ ] stop_query_execution +- [X] start_query_execution +- [X] stop_query_execution - [ ] tag_resource - [ ] untag_resource - [ ] update_work_group +
## autoscaling -44% implemented +
+44% implemented + - [X] attach_instances - [X] attach_load_balancer_target_groups - [X] attach_load_balancers @@ -667,18 +723,24 @@ - [X] suspend_processes - [ ] terminate_instance_in_auto_scaling_group - [X] update_auto_scaling_group +
## autoscaling-plans -0% implemented +
+0% implemented + - [ ] create_scaling_plan - [ ] delete_scaling_plan - [ ] describe_scaling_plan_resources - [ ] describe_scaling_plans - [ ] get_scaling_plan_resource_forecast_data - [ ] update_scaling_plan +
## backup -0% implemented +
+0% implemented + - [ ] create_backup_plan - [ ] create_backup_selection - [ ] create_backup_vault @@ -690,8 +752,10 @@ - [ ] delete_recovery_point - [ ] describe_backup_job - [ ] describe_backup_vault +- [ ] describe_copy_job - [ ] describe_protected_resource - [ ] describe_recovery_point +- [ ] describe_region_settings - [ ] describe_restore_job - [ ] export_backup_plan_template - [ ] get_backup_plan @@ -708,6 +772,7 @@ - [ ] list_backup_plans - [ ] list_backup_selections - [ ] list_backup_vaults +- [ ] list_copy_jobs - [ ] list_protected_resources - [ ] list_recovery_points_by_backup_vault - [ ] list_recovery_points_by_resource @@ -716,15 +781,20 @@ - [ ] put_backup_vault_access_policy - [ ] put_backup_vault_notifications - [ ] start_backup_job +- [ ] start_copy_job - [ ] start_restore_job - [ ] stop_backup_job - [ ] tag_resource - [ ] untag_resource - [ ] update_backup_plan - [ ] update_recovery_point_lifecycle +- [ ] update_region_settings +
## batch -93% implemented +
+93% implemented + - [ ] cancel_job - [X] create_compute_environment - [X] create_job_queue @@ -741,9 +811,12 @@ - [X] terminate_job - [X] update_compute_environment - [X] update_job_queue +
## budgets -0% implemented +
+0% implemented + - [ ] create_budget - [ ] create_notification - [ ] create_subscriber @@ -758,9 +831,12 @@ - [ ] update_budget - [ ] update_notification - [ ] update_subscriber +
## ce -0% implemented +
+0% implemented + - [ ] create_cost_category_definition - [ ] delete_cost_category_definition - [ ] describe_cost_category_definition @@ -780,12 +856,16 @@ - [ ] get_usage_forecast - [ ] list_cost_category_definitions - [ ] update_cost_category_definition +
## chime -0% implemented +
+0% implemented + - [ ] associate_phone_number_with_user - [ ] associate_phone_numbers_with_voice_connector - [ ] associate_phone_numbers_with_voice_connector_group +- [ ] associate_signin_delegate_groups_with_account - [ ] batch_create_attendee - [ ] batch_create_room_membership - [ ] batch_delete_phone_number @@ -798,8 +878,10 @@ - [ ] create_bot - [ ] create_meeting - [ ] create_phone_number_order +- [ ] create_proxy_session - [ ] create_room - [ ] create_room_membership +- [ ] create_user - [ ] create_voice_connector - [ ] create_voice_connector_group - [ ] delete_account @@ -807,17 +889,20 @@ - [ ] delete_events_configuration - [ ] delete_meeting - [ ] delete_phone_number +- [ ] delete_proxy_session - [ ] delete_room - [ ] delete_room_membership - [ ] delete_voice_connector - [ ] delete_voice_connector_group - [ ] delete_voice_connector_origination +- [ ] delete_voice_connector_proxy - [ ] delete_voice_connector_streaming_configuration - [ ] delete_voice_connector_termination - [ ] delete_voice_connector_termination_credentials - [ ] disassociate_phone_number_from_user - [ ] disassociate_phone_numbers_from_voice_connector - [ ] disassociate_phone_numbers_from_voice_connector_group +- [ ] disassociate_signin_delegate_groups_from_account - [ ] get_account - [ ] get_account_settings - [ ] get_attendee @@ -828,6 +913,8 @@ - [ ] get_phone_number - [ ] get_phone_number_order - [ ] get_phone_number_settings +- [ ] get_proxy_session +- [ ] get_retention_settings - [ ] get_room - [ ] get_user - [ ] get_user_settings @@ -835,48 +922,67 @@ - [ ] get_voice_connector_group - [ ] get_voice_connector_logging_configuration - [ ] get_voice_connector_origination +- [ ] get_voice_connector_proxy - [ ] get_voice_connector_streaming_configuration - [ ] get_voice_connector_termination - [ ] get_voice_connector_termination_health - [ ] invite_users - [ ] list_accounts +- [ ] list_attendee_tags - [ ] list_attendees - [ ] list_bots +- [ ] list_meeting_tags - [ ] list_meetings - [ ] list_phone_number_orders - [ ] list_phone_numbers +- [ ] list_proxy_sessions - [ ] list_room_memberships - [ ] list_rooms +- [ ] list_tags_for_resource - [ ] list_users - [ ] list_voice_connector_groups - [ ] list_voice_connector_termination_credentials - [ ] list_voice_connectors - [ ] logout_user - [ ] put_events_configuration +- [ ] put_retention_settings - [ ] put_voice_connector_logging_configuration - [ ] put_voice_connector_origination +- [ ] put_voice_connector_proxy - [ ] put_voice_connector_streaming_configuration - [ ] put_voice_connector_termination - [ ] put_voice_connector_termination_credentials +- [ ] redact_conversation_message +- [ ] redact_room_message - [ ] regenerate_security_token - [ ] reset_personal_pin - [ ] restore_phone_number - [ ] search_available_phone_numbers +- [ ] tag_attendee +- [ ] tag_meeting +- [ ] tag_resource +- [ ] untag_attendee +- [ ] untag_meeting +- [ ] untag_resource - [ ] update_account - [ ] update_account_settings - [ ] update_bot - [ ] update_global_settings - [ ] update_phone_number - [ ] update_phone_number_settings +- [ ] update_proxy_session - [ ] update_room - [ ] update_room_membership - [ ] update_user - [ ] update_user_settings - [ ] update_voice_connector - [ ] update_voice_connector_group +
## cloud9 -0% implemented +
+0% implemented + - [ ] create_environment_ec2 - [ ] create_environment_membership - [ ] delete_environment @@ -885,11 +991,17 @@ - [ ] describe_environment_status - [ ] describe_environments - [ ] list_environments +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource - [ ] update_environment - [ ] update_environment_membership +
## clouddirectory -0% implemented +
+0% implemented + - [ ] add_facet_to_object - [ ] apply_schema - [ ] attach_object @@ -956,9 +1068,12 @@ - [ ] update_typed_link_facet - [ ] upgrade_applied_schema - [ ] upgrade_published_schema +
## cloudformation -32% implemented +
+32% implemented + - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -1014,9 +1129,12 @@ - [X] update_stack_set - [ ] update_termination_protection - [X] validate_template +
## cloudfront -0% implemented +
+0% implemented + - [ ] create_cloud_front_origin_access_identity - [ ] create_distribution - [ ] create_distribution_with_tags @@ -1062,9 +1180,12 @@ - [ ] update_field_level_encryption_profile - [ ] update_public_key - [ ] update_streaming_distribution +
## cloudhsm -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] create_hapg - [ ] create_hsm @@ -1085,9 +1206,12 @@ - [ ] modify_hsm - [ ] modify_luna_client - [ ] remove_tags_from_resource +
## cloudhsmv2 -0% implemented +
+0% implemented + - [ ] copy_backup_to_region - [ ] create_cluster - [ ] create_hsm @@ -1101,9 +1225,12 @@ - [ ] restore_backup - [ ] tag_resource - [ ] untag_resource +
## cloudsearch -0% implemented +
+0% implemented + - [ ] build_suggesters - [ ] create_domain - [ ] define_analysis_scheme @@ -1130,15 +1257,21 @@ - [ ] update_domain_endpoint_options - [ ] update_scaling_parameters - [ ] update_service_access_policies +
## cloudsearchdomain -0% implemented +
+0% implemented + - [ ] search - [ ] suggest - [ ] upload_documents +
## cloudtrail -0% implemented +
+0% implemented + - [ ] add_tags - [ ] create_trail - [ ] delete_trail @@ -1157,9 +1290,12 @@ - [ ] start_logging - [ ] stop_logging - [ ] update_trail +
## cloudwatch -34% implemented +
+36% implemented + - [X] delete_alarms - [ ] delete_anomaly_detector - [X] delete_dashboards @@ -1175,13 +1311,14 @@ - [ ] enable_insight_rules - [X] get_dashboard - [ ] get_insight_rule_report -- [ ] get_metric_data +- [X] get_metric_data - [X] get_metric_statistics - [ ] get_metric_widget_image - [X] list_dashboards - [X] list_metrics - [ ] list_tags_for_resource - [ ] put_anomaly_detector +- [ ] put_composite_alarm - [X] put_dashboard - [ ] put_insight_rule - [X] put_metric_alarm @@ -1189,9 +1326,12 @@ - [X] set_alarm_state - [ ] tag_resource - [ ] untag_resource +
## codebuild -0% implemented +
+0% implemented + - [ ] batch_delete_builds - [ ] batch_get_builds - [ ] batch_get_projects @@ -1226,9 +1366,12 @@ - [ ] update_project - [ ] update_report_group - [ ] update_webhook +
## codecommit -0% implemented +
+4% implemented + - [ ] associate_approval_rule_template_with_repository - [ ] batch_associate_approval_rule_template_with_repositories - [ ] batch_describe_merge_conflicts @@ -1304,9 +1447,12 @@ - [ ] update_pull_request_title - [ ] update_repository_description - [ ] update_repository_name +
## codedeploy -0% implemented +
+0% implemented + - [ ] add_tags_to_on_premises_instances - [ ] batch_get_application_revisions - [ ] batch_get_applications @@ -1324,6 +1470,7 @@ - [ ] delete_deployment_config - [ ] delete_deployment_group - [ ] delete_git_hub_account_token +- [ ] delete_resources_by_external_id - [ ] deregister_on_premises_instance - [ ] get_application - [ ] get_application_revision @@ -1353,28 +1500,46 @@ - [ ] untag_resource - [ ] update_application - [ ] update_deployment_group +
## codeguru-reviewer -0% implemented +
+0% implemented + - [ ] associate_repository +- [ ] describe_code_review +- [ ] describe_recommendation_feedback - [ ] describe_repository_association - [ ] disassociate_repository +- [ ] list_code_reviews +- [ ] list_recommendation_feedback +- [ ] list_recommendations - [ ] list_repository_associations +- [ ] put_recommendation_feedback +
## codeguruprofiler -0% implemented +
+0% implemented + - [ ] configure_agent - [ ] create_profiling_group - [ ] delete_profiling_group - [ ] describe_profiling_group +- [ ] get_policy - [ ] get_profile - [ ] list_profile_times - [ ] list_profiling_groups - [ ] post_agent_profile +- [ ] put_permission +- [ ] remove_permission - [ ] update_profiling_group +
## codepipeline -22% implemented +
+21% implemented + - [ ] acknowledge_job - [ ] acknowledge_third_party_job - [ ] create_custom_action_type @@ -1408,12 +1573,16 @@ - [ ] register_webhook_with_third_party - [ ] retry_stage_execution - [ ] start_pipeline_execution +- [ ] stop_pipeline_execution - [X] tag_resource - [X] untag_resource - [X] update_pipeline +
## codestar -0% implemented +
+0% implemented + - [ ] associate_team_member - [ ] create_project - [ ] create_user_profile @@ -1432,9 +1601,25 @@ - [ ] update_project - [ ] update_team_member - [ ] update_user_profile +
+ +## codestar-connections +
+0% implemented + +- [ ] create_connection +- [ ] delete_connection +- [ ] get_connection +- [ ] list_connections +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +
## codestar-notifications -0% implemented +
+0% implemented + - [ ] create_notification_rule - [ ] delete_notification_rule - [ ] delete_target @@ -1448,9 +1633,12 @@ - [ ] unsubscribe - [ ] untag_resource - [ ] update_notification_rule +
## cognito-identity -28% implemented +
+28% implemented + - [X] create_identity_pool - [ ] delete_identities - [ ] delete_identity_pool @@ -1472,9 +1660,12 @@ - [ ] unlink_identity - [ ] untag_resource - [ ] update_identity_pool +
## cognito-idp -37% implemented +
+44% implemented + - [ ] add_custom_attributes - [X] admin_add_user_to_group - [ ] admin_confirm_sign_up @@ -1496,20 +1687,20 @@ - [ ] admin_reset_user_password - [ ] admin_respond_to_auth_challenge - [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_password +- [X] admin_set_user_password - [ ] admin_set_user_settings - [ ] admin_update_auth_event_feedback - [ ] admin_update_device_status - [X] admin_update_user_attributes - [ ] admin_user_global_sign_out -- [ ] associate_software_token +- [X] associate_software_token - [X] change_password - [ ] confirm_device - [X] confirm_forgot_password -- [ ] confirm_sign_up +- [X] confirm_sign_up - [X] create_group - [X] create_identity_provider -- [ ] create_resource_server +- [X] create_resource_server - [ ] create_user_import_job - [X] create_user_pool - [X] create_user_pool_client @@ -1541,7 +1732,7 @@ - [ ] get_user_attribute_verification_code - [ ] get_user_pool_mfa_config - [ ] global_sign_out -- [ ] initiate_auth +- [X] initiate_auth - [ ] list_devices - [X] list_groups - [X] list_identity_providers @@ -1556,10 +1747,10 @@ - [X] respond_to_auth_challenge - [ ] set_risk_configuration - [ ] set_ui_customization -- [ ] set_user_mfa_preference +- [X] set_user_mfa_preference - [ ] set_user_pool_mfa_config - [ ] set_user_settings -- [ ] sign_up +- [X] sign_up - [ ] start_user_import_job - [ ] stop_user_import_job - [ ] tag_resource @@ -1573,11 +1764,14 @@ - [ ] update_user_pool - [X] update_user_pool_client - [X] update_user_pool_domain -- [ ] verify_software_token +- [X] verify_software_token - [ ] verify_user_attribute +
## cognito-sync -0% implemented +
+0% implemented + - [ ] bulk_publish - [ ] delete_dataset - [ ] describe_dataset @@ -1595,9 +1789,12 @@ - [ ] subscribe_to_dataset - [ ] unsubscribe_from_dataset - [ ] update_records +
## comprehend -0% implemented +
+0% implemented + - [ ] batch_detect_dominant_language - [ ] batch_detect_entities - [ ] batch_detect_key_phrases @@ -1649,32 +1846,51 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_endpoint +
## comprehendmedical -0% implemented +
+0% implemented + - [ ] describe_entities_detection_v2_job +- [ ] describe_icd10_cm_inference_job - [ ] describe_phi_detection_job +- [ ] describe_rx_norm_inference_job - [ ] detect_entities - [ ] detect_entities_v2 - [ ] detect_phi +- [ ] infer_icd10_cm +- [ ] infer_rx_norm - [ ] list_entities_detection_v2_jobs +- [ ] list_icd10_cm_inference_jobs - [ ] list_phi_detection_jobs +- [ ] list_rx_norm_inference_jobs - [ ] start_entities_detection_v2_job +- [ ] start_icd10_cm_inference_job - [ ] start_phi_detection_job +- [ ] start_rx_norm_inference_job - [ ] stop_entities_detection_v2_job +- [ ] stop_icd10_cm_inference_job - [ ] stop_phi_detection_job +- [ ] stop_rx_norm_inference_job +
## compute-optimizer -0% implemented +
+0% implemented + - [ ] get_auto_scaling_group_recommendations - [ ] get_ec2_instance_recommendations - [ ] get_ec2_recommendation_projected_metrics - [ ] get_enrollment_status - [ ] get_recommendation_summaries - [ ] update_enrollment_status +
## config -25% implemented +
+32% implemented + - [X] batch_get_aggregate_resource_config - [X] batch_get_resource_config - [X] delete_aggregation_authorization @@ -1685,7 +1901,7 @@ - [X] delete_delivery_channel - [ ] delete_evaluation_results - [ ] delete_organization_config_rule -- [ ] delete_organization_conformance_pack +- [X] delete_organization_conformance_pack - [ ] delete_pending_aggregation_request - [ ] delete_remediation_configuration - [ ] delete_remediation_exceptions @@ -1709,8 +1925,8 @@ - [X] describe_delivery_channels - [ ] describe_organization_config_rule_statuses - [ ] describe_organization_config_rules -- [ ] describe_organization_conformance_pack_statuses -- [ ] describe_organization_conformance_packs +- [X] describe_organization_conformance_pack_statuses +- [X] describe_organization_conformance_packs - [ ] describe_pending_aggregation_requests - [ ] describe_remediation_configurations - [ ] describe_remediation_exceptions @@ -1728,7 +1944,7 @@ - [ ] get_conformance_pack_compliance_summary - [ ] get_discovered_resource_counts - [ ] get_organization_config_rule_detailed_status -- [ ] get_organization_conformance_pack_detailed_status +- [X] get_organization_conformance_pack_detailed_status - [X] get_resource_config_history - [X] list_aggregate_discovered_resources - [X] list_discovered_resources @@ -1739,13 +1955,14 @@ - [X] put_configuration_recorder - [ ] put_conformance_pack - [X] put_delivery_channel -- [ ] put_evaluations +- [X] put_evaluations - [ ] put_organization_config_rule -- [ ] put_organization_conformance_pack +- [X] put_organization_conformance_pack - [ ] put_remediation_configurations - [ ] put_remediation_exceptions - [ ] put_resource_config - [ ] put_retention_configuration +- [ ] select_aggregate_resource_config - [ ] select_resource_config - [ ] start_config_rules_evaluation - [X] start_configuration_recorder @@ -1753,9 +1970,12 @@ - [X] stop_configuration_recorder - [ ] tag_resource - [ ] untag_resource +
## connect -0% implemented +
+0% implemented + - [ ] create_user - [ ] delete_user - [ ] describe_user @@ -1785,24 +2005,33 @@ - [ ] update_user_phone_config - [ ] update_user_routing_profile - [ ] update_user_security_profiles +
## connectparticipant -0% implemented +
+0% implemented + - [ ] create_participant_connection - [ ] disconnect_participant - [ ] get_transcript - [ ] send_event - [ ] send_message +
## cur -0% implemented +
+0% implemented + - [ ] delete_report_definition - [ ] describe_report_definitions - [ ] modify_report_definition - [ ] put_report_definition +
## dataexchange -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_data_set - [ ] create_job @@ -1825,9 +2054,12 @@ - [ ] update_asset - [ ] update_data_set - [ ] update_revision +
## datapipeline -42% implemented +
+42% implemented + - [X] activate_pipeline - [ ] add_tags - [X] create_pipeline @@ -1847,12 +2079,16 @@ - [ ] set_status - [ ] set_task_status - [ ] validate_pipeline_definition +
## datasync -22% implemented +
+20% implemented + - [X] cancel_task_execution - [ ] create_agent - [ ] create_location_efs +- [ ] create_location_fsx_windows - [ ] create_location_nfs - [ ] create_location_s3 - [ ] create_location_smb @@ -1862,6 +2098,7 @@ - [X] delete_task - [ ] describe_agent - [ ] describe_location_efs +- [ ] describe_location_fsx_windows - [ ] describe_location_nfs - [ ] describe_location_s3 - [ ] describe_location_smb @@ -1877,9 +2114,12 @@ - [ ] untag_resource - [ ] update_agent - [X] update_task +
## dax -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_parameter_group - [ ] create_subnet_group @@ -1901,9 +2141,12 @@ - [ ] update_cluster - [ ] update_parameter_group - [ ] update_subnet_group +
## detective -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] create_graph - [ ] create_members @@ -1915,14 +2158,20 @@ - [ ] list_invitations - [ ] list_members - [ ] reject_invitation +- [ ] start_monitoring_member +
## devicefarm -0% implemented +
+0% implemented + - [ ] create_device_pool - [ ] create_instance_profile - [ ] create_network_profile - [ ] create_project - [ ] create_remote_access_session +- [ ] create_test_grid_project +- [ ] create_test_grid_url - [ ] create_upload - [ ] create_vpce_configuration - [ ] delete_device_pool @@ -1931,6 +2180,7 @@ - [ ] delete_project - [ ] delete_remote_access_session - [ ] delete_run +- [ ] delete_test_grid_project - [ ] delete_upload - [ ] delete_vpce_configuration - [ ] get_account_settings @@ -1947,6 +2197,8 @@ - [ ] get_run - [ ] get_suite - [ ] get_test +- [ ] get_test_grid_project +- [ ] get_test_grid_session - [ ] get_upload - [ ] get_vpce_configuration - [ ] install_to_remote_access_session @@ -1966,6 +2218,10 @@ - [ ] list_samples - [ ] list_suites - [ ] list_tags_for_resource +- [ ] list_test_grid_projects +- [ ] list_test_grid_session_actions +- [ ] list_test_grid_session_artifacts +- [ ] list_test_grid_sessions - [ ] list_tests - [ ] list_unique_problems - [ ] list_uploads @@ -1983,11 +2239,15 @@ - [ ] update_instance_profile - [ ] update_network_profile - [ ] update_project +- [ ] update_test_grid_project - [ ] update_upload - [ ] update_vpce_configuration +
## directconnect -0% implemented +
+0% implemented + - [ ] accept_direct_connect_gateway_association_proposal - [ ] allocate_connection_on_interconnect - [ ] allocate_hosted_connection @@ -2041,9 +2301,12 @@ - [ ] update_direct_connect_gateway_association - [ ] update_lag - [ ] update_virtual_interface_attributes +
## discovery -0% implemented +
+0% implemented + - [ ] associate_configuration_items_to_application - [ ] batch_delete_import_data - [ ] create_application @@ -2069,9 +2332,12 @@ - [ ] stop_continuous_export - [ ] stop_data_collection_by_agent_ids - [ ] update_application +
## dlm -0% implemented +
+0% implemented + - [ ] create_lifecycle_policy - [ ] delete_lifecycle_policy - [ ] get_lifecycle_policies @@ -2080,9 +2346,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_lifecycle_policy +
## dms -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] apply_pending_maintenance_action - [ ] create_endpoint @@ -2130,9 +2399,12 @@ - [ ] start_replication_task_assessment - [ ] stop_replication_task - [ ] test_connection +
## docdb -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] apply_pending_maintenance_action - [ ] copy_db_cluster_parameter_group @@ -2175,9 +2447,12 @@ - [ ] restore_db_cluster_to_point_in_time - [ ] start_db_cluster - [ ] stop_db_cluster +
## ds -0% implemented +
+0% implemented + - [ ] accept_shared_directory - [ ] add_ip_routes - [ ] add_tags_to_resource @@ -2235,11 +2510,14 @@ - [ ] update_radius - [ ] update_trust - [ ] verify_trust +
## dynamodb -17% implemented -- [ ] batch_get_item -- [ ] batch_write_item +
+53% implemented + +- [X] batch_get_item +- [X] batch_write_item - [ ] create_backup - [ ] create_global_table - [X] create_table @@ -2247,54 +2525,63 @@ - [X] delete_item - [X] delete_table - [ ] describe_backup -- [ ] describe_continuous_backups +- [X] describe_continuous_backups - [ ] describe_contributor_insights - [ ] describe_endpoints - [ ] describe_global_table - [ ] describe_global_table_settings - [ ] describe_limits -- [ ] describe_table +- [X] describe_table - [ ] describe_table_replica_auto_scaling -- [ ] describe_time_to_live +- [X] describe_time_to_live - [X] get_item - [ ] list_backups - [ ] list_contributor_insights - [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource +- [X] list_tables +- [X] list_tags_of_resource - [X] put_item - [X] query - [ ] restore_table_from_backup - [ ] restore_table_to_point_in_time - [X] scan -- [ ] tag_resource -- [ ] transact_get_items -- [ ] transact_write_items -- [ ] untag_resource -- [ ] update_continuous_backups +- [X] tag_resource +- [X] transact_get_items +- [X] transact_write_items +- [X] untag_resource +- [X] update_continuous_backups - [ ] update_contributor_insights - [ ] update_global_table - [ ] update_global_table_settings -- [ ] update_item -- [ ] update_table +- [X] update_item +- [X] update_table - [ ] update_table_replica_auto_scaling -- [ ] update_time_to_live +- [X] update_time_to_live +
## dynamodbstreams -100% implemented +
+100% implemented + - [X] describe_stream - [X] get_records - [X] get_shard_iterator - [X] list_streams +
## ebs -0% implemented +
+0% implemented + - [ ] get_snapshot_block - [ ] list_changed_blocks - [ ] list_snapshot_blocks +
## ec2 -26% implemented +
+27% implemented + - [ ] accept_reserved_instances_exchange_quote - [ ] accept_transit_gateway_peering_attachment - [ ] accept_transit_gateway_vpc_attachment @@ -2345,7 +2632,7 @@ - [X] create_dhcp_options - [ ] create_egress_only_internet_gateway - [ ] create_fleet -- [ ] create_flow_logs +- [X] create_flow_logs - [ ] create_fpga_image - [X] create_image - [ ] create_instance_export_task @@ -2382,7 +2669,7 @@ - [ ] create_transit_gateway_vpc_attachment - [X] create_volume - [X] create_vpc -- [ ] create_vpc_endpoint +- [X] create_vpc_endpoint - [ ] create_vpc_endpoint_connection_notification - [ ] create_vpc_endpoint_service_configuration - [X] create_vpc_peering_connection @@ -2395,7 +2682,7 @@ - [ ] delete_dhcp_options - [ ] delete_egress_only_internet_gateway - [ ] delete_fleets -- [ ] delete_flow_logs +- [X] delete_flow_logs - [ ] delete_fpga_image - [X] delete_internet_gateway - [X] delete_key_pair @@ -2438,6 +2725,7 @@ - [X] delete_vpn_gateway - [ ] deprovision_byoip_cidr - [X] deregister_image +- [ ] deregister_instance_event_notification_attributes - [ ] deregister_transit_gateway_multicast_group_members - [ ] deregister_transit_gateway_multicast_group_sources - [ ] describe_account_attributes @@ -2465,7 +2753,7 @@ - [ ] describe_fleet_history - [ ] describe_fleet_instances - [ ] describe_fleets -- [ ] describe_flow_logs +- [X] describe_flow_logs - [ ] describe_fpga_image_attribute - [ ] describe_fpga_images - [ ] describe_host_reservation_offerings @@ -2479,12 +2767,14 @@ - [ ] describe_import_image_tasks - [ ] describe_import_snapshot_tasks - [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications +- [X] describe_instance_credit_specifications +- [ ] describe_instance_event_notification_attributes - [ ] describe_instance_status - [ ] describe_instance_type_offerings - [ ] describe_instance_types - [ ] describe_instances - [X] describe_internet_gateways +- [ ] describe_ipv6_pools - [X] describe_key_pairs - [ ] describe_launch_template_versions - [ ] describe_launch_templates @@ -2496,7 +2786,7 @@ - [ ] describe_local_gateways - [ ] describe_moving_addresses - [ ] describe_nat_gateways -- [ ] describe_network_acls +- [X] describe_network_acls - [ ] describe_network_interface_attribute - [ ] describe_network_interface_permissions - [X] describe_network_interfaces @@ -2581,6 +2871,7 @@ - [ ] export_client_vpn_client_configuration - [ ] export_image - [ ] export_transit_gateway_routes +- [ ] get_associated_ipv6_pool_cidrs - [ ] get_capacity_reservation_usage - [ ] get_coip_pool_usage - [ ] get_console_output @@ -2602,6 +2893,7 @@ - [X] import_key_pair - [ ] import_snapshot - [ ] import_volume +- [ ] modify_availability_zone_group - [ ] modify_capacity_reservation - [ ] modify_client_vpn_endpoint - [ ] modify_default_credit_specification @@ -2647,7 +2939,8 @@ - [ ] purchase_reserved_instances_offering - [ ] purchase_scheduled_instances - [X] reboot_instances -- [ ] register_image +- [X] register_image +- [ ] register_instance_event_notification_attributes - [ ] register_transit_gateway_multicast_group_members - [ ] register_transit_gateway_multicast_group_sources - [ ] reject_transit_gateway_peering_attachment @@ -2675,13 +2968,14 @@ - [ ] revoke_client_vpn_ingress - [X] revoke_security_group_egress - [X] revoke_security_group_ingress -- [ ] run_instances +- [X] run_instances - [ ] run_scheduled_instances - [ ] search_local_gateway_routes - [ ] search_transit_gateway_multicast_groups - [ ] search_transit_gateway_routes - [ ] send_diagnostic_interrupt - [X] start_instances +- [ ] start_vpc_endpoint_service_private_dns_verification - [X] stop_instances - [ ] terminate_client_vpn_connections - [X] terminate_instances @@ -2691,13 +2985,19 @@ - [ ] update_security_group_rule_descriptions_egress - [ ] update_security_group_rule_descriptions_ingress - [ ] withdraw_byoip_cidr +
## ec2-instance-connect -0% implemented -- [x] send_ssh_public_key +
+100% implemented + +- [X] send_ssh_public_key +
## ecr -27% implemented +
+27% implemented + - [ ] batch_check_layer_availability - [X] batch_delete_image - [X] batch_get_image @@ -2727,18 +3027,21 @@ - [ ] tag_resource - [ ] untag_resource - [ ] upload_layer_part +
## ecs -62% implemented +
+72% implemented + - [ ] create_capacity_provider - [X] create_cluster - [X] create_service -- [ ] create_task_set +- [X] create_task_set - [ ] delete_account_setting - [X] delete_attributes - [X] delete_cluster - [X] delete_service -- [ ] delete_task_set +- [X] delete_task_set - [X] deregister_container_instance - [X] deregister_task_definition - [ ] describe_capacity_providers @@ -2746,7 +3049,7 @@ - [X] describe_container_instances - [X] describe_services - [X] describe_task_definition -- [ ] describe_task_sets +- [X] describe_task_sets - [X] describe_tasks - [ ] discover_poll_endpoint - [ ] list_account_settings @@ -2776,28 +3079,43 @@ - [ ] update_container_agent - [X] update_container_instances_state - [X] update_service -- [ ] update_service_primary_task_set -- [ ] update_task_set +- [X] update_service_primary_task_set +- [X] update_task_set +
## efs -0% implemented +
+0% implemented + +- [ ] create_access_point - [ ] create_file_system - [ ] create_mount_target - [ ] create_tags +- [ ] delete_access_point - [ ] delete_file_system +- [ ] delete_file_system_policy - [ ] delete_mount_target - [ ] delete_tags +- [ ] describe_access_points +- [ ] describe_file_system_policy - [ ] describe_file_systems - [ ] describe_lifecycle_configuration - [ ] describe_mount_target_security_groups - [ ] describe_mount_targets - [ ] describe_tags +- [ ] list_tags_for_resource - [ ] modify_mount_target_security_groups +- [ ] put_file_system_policy - [ ] put_lifecycle_configuration +- [ ] tag_resource +- [ ] untag_resource - [ ] update_file_system +
## eks -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_fargate_profile - [ ] create_nodegroup @@ -2819,15 +3137,24 @@ - [ ] update_cluster_version - [ ] update_nodegroup_config - [ ] update_nodegroup_version +
## elastic-inference -0% implemented +
+0% implemented + +- [ ] describe_accelerator_offerings +- [ ] describe_accelerator_types +- [ ] describe_accelerators - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource +
## elasticache -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] authorize_cache_security_group_ingress - [ ] batch_apply_update_action @@ -2838,13 +3165,16 @@ - [ ] create_cache_parameter_group - [ ] create_cache_security_group - [ ] create_cache_subnet_group +- [ ] create_global_replication_group - [ ] create_replication_group - [ ] create_snapshot +- [ ] decrease_node_groups_in_global_replication_group - [ ] decrease_replica_count - [ ] delete_cache_cluster - [ ] delete_cache_parameter_group - [ ] delete_cache_security_group - [ ] delete_cache_subnet_group +- [ ] delete_global_replication_group - [ ] delete_replication_group - [ ] delete_snapshot - [ ] describe_cache_clusters @@ -2855,38 +3185,47 @@ - [ ] describe_cache_subnet_groups - [ ] describe_engine_default_parameters - [ ] describe_events +- [ ] describe_global_replication_groups - [ ] describe_replication_groups - [ ] describe_reserved_cache_nodes - [ ] describe_reserved_cache_nodes_offerings - [ ] describe_service_updates - [ ] describe_snapshots - [ ] describe_update_actions +- [ ] disassociate_global_replication_group +- [ ] failover_global_replication_group +- [ ] increase_node_groups_in_global_replication_group - [ ] increase_replica_count - [ ] list_allowed_node_type_modifications - [ ] list_tags_for_resource - [ ] modify_cache_cluster - [ ] modify_cache_parameter_group - [ ] modify_cache_subnet_group +- [ ] modify_global_replication_group - [ ] modify_replication_group - [ ] modify_replication_group_shard_configuration - [ ] purchase_reserved_cache_nodes_offering +- [ ] rebalance_slots_in_global_replication_group - [ ] reboot_cache_cluster - [ ] remove_tags_from_resource - [ ] reset_cache_parameter_group - [ ] revoke_cache_security_group_ingress - [ ] start_migration - [ ] test_failover +
## elasticbeanstalk -0% implemented +
+13% implemented + - [ ] abort_environment_update - [ ] apply_environment_managed_action - [ ] check_dns_availability - [ ] compose_environments -- [ ] create_application +- [X] create_application - [ ] create_application_version - [ ] create_configuration_template -- [ ] create_environment +- [X] create_environment - [ ] create_platform_version - [ ] create_storage_location - [ ] delete_application @@ -2903,13 +3242,14 @@ - [ ] describe_environment_managed_action_history - [ ] describe_environment_managed_actions - [ ] describe_environment_resources -- [ ] describe_environments +- [X] describe_environments - [ ] describe_events - [ ] describe_instances_health - [ ] describe_platform_version -- [ ] list_available_solution_stacks +- [X] list_available_solution_stacks +- [ ] list_platform_branches - [ ] list_platform_versions -- [ ] list_tags_for_resource +- [X] list_tags_for_resource - [ ] rebuild_environment - [ ] request_environment_info - [ ] restart_app_server @@ -2921,11 +3261,14 @@ - [ ] update_application_version - [ ] update_configuration_template - [ ] update_environment -- [ ] update_tags_for_resource +- [X] update_tags_for_resource - [ ] validate_configuration_settings +
## elastictranscoder -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_job - [ ] create_pipeline @@ -2943,9 +3286,12 @@ - [ ] update_pipeline - [ ] update_pipeline_notifications - [ ] update_pipeline_status +
## elb -34% implemented +
+34% implemented + - [ ] add_tags - [X] apply_security_groups_to_load_balancer - [ ] attach_load_balancer_to_subnets @@ -2975,9 +3321,12 @@ - [ ] set_load_balancer_listener_ssl_certificate - [ ] set_load_balancer_policies_for_backend_server - [X] set_load_balancer_policies_of_listener +
## elbv2 -70% implemented +
+70% implemented + - [ ] add_listener_certificates - [ ] add_tags - [X] create_listener @@ -3012,9 +3361,12 @@ - [X] set_rule_priorities - [X] set_security_groups - [X] set_subnets +
## emr -50% implemented +
+45% implemented + - [ ] add_instance_fleet - [X] add_instance_groups - [X] add_job_flow_steps @@ -3027,6 +3379,7 @@ - [ ] describe_security_configuration - [X] describe_step - [ ] get_block_public_access_configuration +- [ ] get_managed_scaling_policy - [X] list_bootstrap_actions - [X] list_clusters - [ ] list_instance_fleets @@ -3039,41 +3392,56 @@ - [X] modify_instance_groups - [ ] put_auto_scaling_policy - [ ] put_block_public_access_configuration +- [ ] put_managed_scaling_policy - [ ] remove_auto_scaling_policy +- [ ] remove_managed_scaling_policy - [X] remove_tags - [X] run_job_flow - [X] set_termination_protection - [X] set_visible_to_all_users - [X] terminate_job_flows +
## es -0% implemented +
+0% implemented + - [ ] add_tags +- [ ] associate_package - [ ] cancel_elasticsearch_service_software_update - [ ] create_elasticsearch_domain +- [ ] create_package - [ ] delete_elasticsearch_domain - [ ] delete_elasticsearch_service_role +- [ ] delete_package - [ ] describe_elasticsearch_domain - [ ] describe_elasticsearch_domain_config - [ ] describe_elasticsearch_domains - [ ] describe_elasticsearch_instance_type_limits +- [ ] describe_packages - [ ] describe_reserved_elasticsearch_instance_offerings - [ ] describe_reserved_elasticsearch_instances +- [ ] dissociate_package - [ ] get_compatible_elasticsearch_versions - [ ] get_upgrade_history - [ ] get_upgrade_status - [ ] list_domain_names +- [ ] list_domains_for_package - [ ] list_elasticsearch_instance_types - [ ] list_elasticsearch_versions +- [ ] list_packages_for_domain - [ ] list_tags - [ ] purchase_reserved_elasticsearch_instance_offering - [ ] remove_tags - [ ] start_elasticsearch_service_software_update - [ ] update_elasticsearch_domain_config - [ ] upgrade_elasticsearch_domain +
## events -58% implemented +
+67% implemented + - [ ] activate_event_source - [X] create_event_bus - [ ] create_partner_event_source @@ -3093,7 +3461,7 @@ - [ ] list_partner_event_sources - [X] list_rule_names_by_target - [X] list_rules -- [ ] list_tags_for_resource +- [X] list_tags_for_resource - [X] list_targets_by_rule - [X] put_events - [ ] put_partner_events @@ -3102,12 +3470,15 @@ - [X] put_targets - [X] remove_permission - [X] remove_targets -- [ ] tag_resource +- [X] tag_resource - [X] test_event_pattern -- [ ] untag_resource +- [X] untag_resource +
## firehose -0% implemented +
+0% implemented + - [ ] create_delivery_stream - [ ] delete_delivery_stream - [ ] describe_delivery_stream @@ -3120,9 +3491,12 @@ - [ ] tag_delivery_stream - [ ] untag_delivery_stream - [ ] update_destination +
## fms -0% implemented +
+0% implemented + - [ ] associate_admin_account - [ ] delete_notification_channel - [ ] delete_policy @@ -3135,52 +3509,66 @@ - [ ] list_compliance_status - [ ] list_member_accounts - [ ] list_policies +- [ ] list_tags_for_resource - [ ] put_notification_channel - [ ] put_policy +- [ ] tag_resource +- [ ] untag_resource +
## forecast -0% implemented +
+19% implemented + - [ ] create_dataset -- [ ] create_dataset_group +- [X] create_dataset_group - [ ] create_dataset_import_job - [ ] create_forecast - [ ] create_forecast_export_job - [ ] create_predictor - [ ] delete_dataset -- [ ] delete_dataset_group +- [X] delete_dataset_group - [ ] delete_dataset_import_job - [ ] delete_forecast - [ ] delete_forecast_export_job - [ ] delete_predictor - [ ] describe_dataset -- [ ] describe_dataset_group +- [X] describe_dataset_group - [ ] describe_dataset_import_job - [ ] describe_forecast - [ ] describe_forecast_export_job - [ ] describe_predictor - [ ] get_accuracy_metrics -- [ ] list_dataset_groups +- [X] list_dataset_groups - [ ] list_dataset_import_jobs - [ ] list_datasets - [ ] list_forecast_export_jobs - [ ] list_forecasts - [ ] list_predictors -- [ ] update_dataset_group +- [X] update_dataset_group +
## forecastquery -0% implemented +
+0% implemented + - [ ] query_forecast +
## frauddetector -0% implemented +
+0% implemented + - [ ] batch_create_variable - [ ] batch_get_variable - [ ] create_detector_version - [ ] create_model_version - [ ] create_rule - [ ] create_variable +- [ ] delete_detector - [ ] delete_detector_version - [ ] delete_event +- [ ] delete_rule_version - [ ] describe_detector - [ ] describe_model_versions - [ ] get_detector_version @@ -3203,27 +3591,38 @@ - [ ] update_rule_metadata - [ ] update_rule_version - [ ] update_variable +
## fsx -0% implemented +
+0% implemented + +- [ ] cancel_data_repository_task - [ ] create_backup +- [ ] create_data_repository_task - [ ] create_file_system - [ ] create_file_system_from_backup - [ ] delete_backup - [ ] delete_file_system - [ ] describe_backups +- [ ] describe_data_repository_tasks - [ ] describe_file_systems - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource - [ ] update_file_system +
## gamelift -0% implemented +
+0% implemented + - [ ] accept_match +- [ ] claim_game_server - [ ] create_alias - [ ] create_build - [ ] create_fleet +- [ ] create_game_server_group - [ ] create_game_session - [ ] create_game_session_queue - [ ] create_matchmaking_configuration @@ -3236,6 +3635,7 @@ - [ ] delete_alias - [ ] delete_build - [ ] delete_fleet +- [ ] delete_game_server_group - [ ] delete_game_session_queue - [ ] delete_matchmaking_configuration - [ ] delete_matchmaking_rule_set @@ -3243,6 +3643,7 @@ - [ ] delete_script - [ ] delete_vpc_peering_authorization - [ ] delete_vpc_peering_connection +- [ ] deregister_game_server - [ ] describe_alias - [ ] describe_build - [ ] describe_ec2_instance_limits @@ -3251,6 +3652,8 @@ - [ ] describe_fleet_events - [ ] describe_fleet_port_settings - [ ] describe_fleet_utilization +- [ ] describe_game_server +- [ ] describe_game_server_group - [ ] describe_game_session_details - [ ] describe_game_session_placement - [ ] describe_game_session_queues @@ -3270,10 +3673,15 @@ - [ ] list_aliases - [ ] list_builds - [ ] list_fleets +- [ ] list_game_server_groups +- [ ] list_game_servers - [ ] list_scripts +- [ ] list_tags_for_resource - [ ] put_scaling_policy +- [ ] register_game_server - [ ] request_upload_credentials - [ ] resolve_alias +- [ ] resume_game_server_group - [ ] search_game_sessions - [ ] start_fleet_actions - [ ] start_game_session_placement @@ -3282,20 +3690,28 @@ - [ ] stop_fleet_actions - [ ] stop_game_session_placement - [ ] stop_matchmaking +- [ ] suspend_game_server_group +- [ ] tag_resource +- [ ] untag_resource - [ ] update_alias - [ ] update_build - [ ] update_fleet_attributes - [ ] update_fleet_capacity - [ ] update_fleet_port_settings +- [ ] update_game_server +- [ ] update_game_server_group - [ ] update_game_session - [ ] update_game_session_queue - [ ] update_matchmaking_configuration - [ ] update_runtime_configuration - [ ] update_script - [ ] validate_matchmaking_rule_set +
## glacier -12% implemented +
+12% implemented + - [ ] abort_multipart_upload - [ ] abort_vault_lock - [ ] add_tags_to_vault @@ -3329,29 +3745,43 @@ - [ ] set_vault_notifications - [ ] upload_archive - [ ] upload_multipart_part +
## globalaccelerator -0% implemented +
+0% implemented + +- [ ] advertise_byoip_cidr - [ ] create_accelerator - [ ] create_endpoint_group - [ ] create_listener - [ ] delete_accelerator - [ ] delete_endpoint_group - [ ] delete_listener +- [ ] deprovision_byoip_cidr - [ ] describe_accelerator - [ ] describe_accelerator_attributes - [ ] describe_endpoint_group - [ ] describe_listener - [ ] list_accelerators +- [ ] list_byoip_cidrs - [ ] list_endpoint_groups - [ ] list_listeners +- [ ] list_tags_for_resource +- [ ] provision_byoip_cidr +- [ ] tag_resource +- [ ] untag_resource - [ ] update_accelerator - [ ] update_accelerator_attributes - [ ] update_endpoint_group - [ ] update_listener +- [ ] withdraw_byoip_cidr +
## glue -4% implemented +
+5% implemented + - [ ] batch_create_partition - [ ] batch_delete_connection - [ ] batch_delete_partition @@ -3404,7 +3834,7 @@ - [ ] get_crawlers - [ ] get_data_catalog_encryption_settings - [X] get_database -- [ ] get_databases +- [X] get_databases - [ ] get_dataflow_graph - [ ] get_dev_endpoint - [ ] get_dev_endpoints @@ -3441,6 +3871,7 @@ - [ ] list_crawlers - [ ] list_dev_endpoints - [ ] list_jobs +- [ ] list_ml_transforms - [ ] list_triggers - [ ] list_workflows - [ ] put_data_catalog_encryption_settings @@ -3460,6 +3891,7 @@ - [ ] stop_crawler - [ ] stop_crawler_schedule - [ ] stop_trigger +- [ ] stop_workflow_run - [ ] tag_resource - [ ] untag_resource - [ ] update_classifier @@ -3475,9 +3907,12 @@ - [ ] update_trigger - [ ] update_user_defined_function - [ ] update_workflow +
## greengrass -0% implemented +
+0% implemented + - [ ] associate_role_to_group - [ ] associate_service_role_to_account - [ ] create_connector_definition @@ -3568,9 +4003,12 @@ - [ ] update_logger_definition - [ ] update_resource_definition - [ ] update_subscription_definition +
## groundstation -0% implemented +
+0% implemented + - [ ] cancel_contact - [ ] create_config - [ ] create_dataflow_endpoint_group @@ -3596,9 +4034,12 @@ - [ ] untag_resource - [ ] update_config - [ ] update_mission_profile +
## guardduty -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] archive_findings - [ ] create_detector @@ -3616,9 +4057,12 @@ - [ ] delete_members - [ ] delete_publishing_destination - [ ] delete_threat_intel_set +- [ ] describe_organization_configuration - [ ] describe_publishing_destination +- [ ] disable_organization_admin_account - [ ] disassociate_from_master_account - [ ] disassociate_members +- [ ] enable_organization_admin_account - [ ] get_detector - [ ] get_filter - [ ] get_findings @@ -3635,6 +4079,7 @@ - [ ] list_invitations - [ ] list_ip_sets - [ ] list_members +- [ ] list_organization_admin_accounts - [ ] list_publishing_destinations - [ ] list_tags_for_resource - [ ] list_threat_intel_sets @@ -3647,20 +4092,34 @@ - [ ] update_filter - [ ] update_findings_feedback - [ ] update_ip_set +- [ ] update_organization_configuration - [ ] update_publishing_destination - [ ] update_threat_intel_set +
## health -0% implemented +
+0% implemented + +- [ ] describe_affected_accounts_for_organization - [ ] describe_affected_entities +- [ ] describe_affected_entities_for_organization - [ ] describe_entity_aggregates - [ ] describe_event_aggregates - [ ] describe_event_details +- [ ] describe_event_details_for_organization - [ ] describe_event_types - [ ] describe_events +- [ ] describe_events_for_organization +- [ ] describe_health_service_status_for_organization +- [ ] disable_health_service_access_for_organization +- [ ] enable_health_service_access_for_organization +
## iam -67% implemented +
+70% implemented + - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -3687,8 +4146,8 @@ - [X] delete_account_alias - [X] delete_account_password_policy - [X] delete_group -- [ ] delete_group_policy -- [ ] delete_instance_profile +- [X] delete_group_policy +- [X] delete_instance_profile - [X] delete_login_profile - [X] delete_open_id_connect_provider - [X] delete_policy @@ -3776,7 +4235,7 @@ - [X] remove_user_from_group - [ ] reset_service_specific_credential - [ ] resync_mfa_device -- [ ] set_default_policy_version +- [X] set_default_policy_version - [ ] set_security_token_service_preferences - [ ] simulate_custom_policy - [ ] simulate_principal_policy @@ -3801,9 +4260,12 @@ - [X] upload_server_certificate - [X] upload_signing_certificate - [X] upload_ssh_public_key +
## imagebuilder -0% implemented +
+0% implemented + - [ ] cancel_image_creation - [ ] create_component - [ ] create_distribution_configuration @@ -3846,18 +4308,24 @@ - [ ] update_distribution_configuration - [ ] update_image_pipeline - [ ] update_infrastructure_configuration +
## importexport -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_job - [ ] get_shipping_label - [ ] get_status - [ ] list_jobs - [ ] update_job +
## inspector -0% implemented +
+0% implemented + - [ ] add_attributes_to_findings - [ ] create_assessment_target - [ ] create_assessment_template @@ -3895,9 +4363,12 @@ - [ ] subscribe_to_event - [ ] unsubscribe_from_event - [ ] update_assessment_target +
## iot -20% implemented +
+28% implemented + - [ ] accept_certificate_transfer - [ ] add_thing_to_billing_group - [X] add_thing_to_thing_group @@ -3909,13 +4380,14 @@ - [ ] cancel_audit_mitigation_actions_task - [ ] cancel_audit_task - [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] cancel_job_execution +- [X] cancel_job +- [X] cancel_job_execution - [ ] clear_default_authorizer - [ ] confirm_topic_rule_destination - [ ] create_authorizer - [ ] create_billing_group - [ ] create_certificate_from_csr +- [ ] create_dimension - [ ] create_domain_configuration - [ ] create_dynamic_thing_group - [X] create_job @@ -3923,7 +4395,7 @@ - [ ] create_mitigation_action - [ ] create_ota_update - [X] create_policy -- [ ] create_policy_version +- [X] create_policy_version - [ ] create_provisioning_claim - [ ] create_provisioning_template - [ ] create_provisioning_template_version @@ -3941,14 +4413,15 @@ - [ ] delete_billing_group - [ ] delete_ca_certificate - [X] delete_certificate +- [ ] delete_dimension - [ ] delete_domain_configuration - [ ] delete_dynamic_thing_group -- [ ] delete_job -- [ ] delete_job_execution +- [X] delete_job +- [X] delete_job_execution - [ ] delete_mitigation_action - [ ] delete_ota_update - [X] delete_policy -- [ ] delete_policy_version +- [X] delete_policy_version - [ ] delete_provisioning_template - [ ] delete_provisioning_template_version - [ ] delete_registration_code @@ -3972,12 +4445,13 @@ - [ ] describe_ca_certificate - [X] describe_certificate - [ ] describe_default_authorizer +- [ ] describe_dimension - [ ] describe_domain_configuration -- [ ] describe_endpoint +- [X] describe_endpoint - [ ] describe_event_configurations - [ ] describe_index - [X] describe_job -- [ ] describe_job_execution +- [X] describe_job_execution - [ ] describe_mitigation_action - [ ] describe_provisioning_template - [ ] describe_provisioning_template_version @@ -3998,19 +4472,19 @@ - [ ] get_cardinality - [ ] get_effective_policies - [ ] get_indexing_configuration -- [ ] get_job_document +- [X] get_job_document - [ ] get_logging_options - [ ] get_ota_update - [ ] get_percentiles - [X] get_policy -- [ ] get_policy_version +- [X] get_policy_version - [ ] get_registration_code - [ ] get_statistics - [ ] get_topic_rule - [ ] get_topic_rule_destination - [ ] get_v2_logging_options - [ ] list_active_violations -- [ ] list_attached_policies +- [X] list_attached_policies - [ ] list_audit_findings - [ ] list_audit_mitigation_actions_executions - [ ] list_audit_mitigation_actions_tasks @@ -4020,17 +4494,18 @@ - [ ] list_ca_certificates - [X] list_certificates - [ ] list_certificates_by_ca +- [ ] list_dimensions - [ ] list_domain_configurations - [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs +- [X] list_job_executions_for_job +- [X] list_job_executions_for_thing +- [X] list_jobs - [ ] list_mitigation_actions - [ ] list_ota_updates - [ ] list_outgoing_certificates - [X] list_policies - [X] list_policy_principals -- [ ] list_policy_versions +- [X] list_policy_versions - [X] list_principal_policies - [X] list_principal_things - [ ] list_provisioning_template_versions @@ -4058,6 +4533,7 @@ - [ ] list_violation_events - [ ] register_ca_certificate - [X] register_certificate +- [X] register_certificate_without_ca - [ ] register_thing - [ ] reject_certificate_transfer - [ ] remove_thing_from_billing_group @@ -4065,7 +4541,7 @@ - [ ] replace_topic_rule - [ ] search_index - [ ] set_default_authorizer -- [ ] set_default_policy_version +- [X] set_default_policy_version - [ ] set_logging_options - [ ] set_v2_logging_level - [ ] set_v2_logging_options @@ -4083,6 +4559,7 @@ - [ ] update_billing_group - [ ] update_ca_certificate - [X] update_certificate +- [ ] update_dimension - [ ] update_domain_configuration - [ ] update_dynamic_thing_group - [ ] update_event_configurations @@ -4099,23 +4576,32 @@ - [X] update_thing_groups_for_thing - [ ] update_topic_rule_destination - [ ] validate_security_profile_behaviors +
## iot-data -100% implemented +
+100% implemented + - [X] delete_thing_shadow - [X] get_thing_shadow - [X] publish - [X] update_thing_shadow +
## iot-jobs-data -0% implemented +
+0% implemented + - [ ] describe_job_execution - [ ] get_pending_job_executions - [ ] start_next_pending_job_execution - [ ] update_job_execution +
## iot1click-devices -0% implemented +
+0% implemented + - [ ] claim_devices_by_claim_code - [ ] describe_device - [ ] finalize_device_claim @@ -4129,9 +4615,12 @@ - [ ] unclaim_device - [ ] untag_resource - [ ] update_device_state +
## iot1click-projects -0% implemented +
+0% implemented + - [ ] associate_device_with_placement - [ ] create_placement - [ ] create_project @@ -4148,9 +4637,12 @@ - [ ] untag_resource - [ ] update_placement - [ ] update_project +
## iotanalytics -0% implemented +
+0% implemented + - [ ] batch_put_message - [ ] cancel_pipeline_reprocessing - [ ] create_channel @@ -4185,9 +4677,12 @@ - [ ] update_dataset - [ ] update_datastore - [ ] update_pipeline +
## iotevents -0% implemented +
+0% implemented + - [ ] create_detector_model - [ ] create_input - [ ] delete_detector_model @@ -4204,16 +4699,22 @@ - [ ] untag_resource - [ ] update_detector_model - [ ] update_input +
## iotevents-data -0% implemented +
+0% implemented + - [ ] batch_put_message - [ ] batch_update_detector - [ ] describe_detector - [ ] list_detectors +
## iotsecuretunneling -0% implemented +
+0% implemented + - [ ] close_tunnel - [ ] describe_tunnel - [ ] list_tags_for_resource @@ -4221,9 +4722,72 @@ - [ ] open_tunnel - [ ] tag_resource - [ ] untag_resource +
+ +## iotsitewise +
+0% implemented + +- [ ] associate_assets +- [ ] batch_associate_project_assets +- [ ] batch_disassociate_project_assets +- [ ] batch_put_asset_property_value +- [ ] create_access_policy +- [ ] create_asset +- [ ] create_asset_model +- [ ] create_dashboard +- [ ] create_gateway +- [ ] create_portal +- [ ] create_project +- [ ] delete_access_policy +- [ ] delete_asset +- [ ] delete_asset_model +- [ ] delete_dashboard +- [ ] delete_gateway +- [ ] delete_portal +- [ ] delete_project +- [ ] describe_access_policy +- [ ] describe_asset +- [ ] describe_asset_model +- [ ] describe_asset_property +- [ ] describe_dashboard +- [ ] describe_gateway +- [ ] describe_gateway_capability_configuration +- [ ] describe_logging_options +- [ ] describe_portal +- [ ] describe_project +- [ ] disassociate_assets +- [ ] get_asset_property_aggregates +- [ ] get_asset_property_value +- [ ] get_asset_property_value_history +- [ ] list_access_policies +- [ ] list_asset_models +- [ ] list_assets +- [ ] list_associated_assets +- [ ] list_dashboards +- [ ] list_gateways +- [ ] list_portals +- [ ] list_project_assets +- [ ] list_projects +- [ ] list_tags_for_resource +- [ ] put_logging_options +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_access_policy +- [ ] update_asset +- [ ] update_asset_model +- [ ] update_asset_property +- [ ] update_dashboard +- [ ] update_gateway +- [ ] update_gateway_capability_configuration +- [ ] update_portal +- [ ] update_project +
## iotthingsgraph -0% implemented +
+0% implemented + - [ ] associate_entity_to_thing - [ ] create_flow_template - [ ] create_system_instance @@ -4259,9 +4823,12 @@ - [ ] update_flow_template - [ ] update_system_template - [ ] upload_entity_definitions +
## kafka -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_configuration - [ ] delete_cluster @@ -4274,6 +4841,7 @@ - [ ] list_clusters - [ ] list_configuration_revisions - [ ] list_configurations +- [ ] list_kafka_versions - [ ] list_nodes - [ ] list_tags_for_resource - [ ] tag_resource @@ -4282,14 +4850,18 @@ - [ ] update_broker_storage - [ ] update_cluster_configuration - [ ] update_monitoring +
## kendra -0% implemented +
+0% implemented + - [ ] batch_delete_document - [ ] batch_put_document - [ ] create_data_source - [ ] create_faq - [ ] create_index +- [ ] delete_data_source - [ ] delete_faq - [ ] delete_index - [ ] describe_data_source @@ -4299,15 +4871,21 @@ - [ ] list_data_sources - [ ] list_faqs - [ ] list_indices +- [ ] list_tags_for_resource - [ ] query - [ ] start_data_source_sync_job - [ ] stop_data_source_sync_job - [ ] submit_feedback +- [ ] tag_resource +- [ ] untag_resource - [ ] update_data_source - [ ] update_index +
## kinesis -50% implemented +
+50% implemented + - [X] add_tags_to_stream - [X] create_stream - [ ] decrease_stream_retention_period @@ -4336,25 +4914,38 @@ - [ ] stop_stream_encryption - [ ] subscribe_to_shard - [ ] update_shard_count +
## kinesis-video-archived-media -0% implemented -- [ ] get_dash_streaming_session_url -- [ ] get_hls_streaming_session_url +
+60% implemented + +- [X] get_clip +- [X] get_dash_streaming_session_url +- [X] get_hls_streaming_session_url - [ ] get_media_for_fragment_list - [ ] list_fragments +
## kinesis-video-media -0% implemented +
+0% implemented + - [ ] get_media +
## kinesis-video-signaling -0% implemented +
+0% implemented + - [ ] get_ice_server_config - [ ] send_alexa_offer_to_master +
## kinesisanalytics -0% implemented +
+0% implemented + - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input - [ ] add_application_input_processing_configuration @@ -4375,9 +4966,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_application +
## kinesisanalyticsv2 -0% implemented +
+0% implemented + - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input - [ ] add_application_input_processing_configuration @@ -4404,19 +4998,22 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_application +
## kinesisvideo -0% implemented +
+26% implemented + - [ ] create_signaling_channel -- [ ] create_stream +- [X] create_stream - [ ] delete_signaling_channel -- [ ] delete_stream +- [X] delete_stream - [ ] describe_signaling_channel -- [ ] describe_stream -- [ ] get_data_endpoint +- [X] describe_stream +- [X] get_data_endpoint - [ ] get_signaling_channel_endpoint - [ ] list_signaling_channels -- [ ] list_streams +- [X] list_streams - [ ] list_tags_for_resource - [ ] list_tags_for_stream - [ ] tag_resource @@ -4426,9 +5023,12 @@ - [ ] update_data_retention - [ ] update_signaling_channel - [ ] update_stream +
## kms -43% implemented +
+45% implemented + - [X] cancel_key_deletion - [ ] connect_custom_key_store - [ ] create_alias @@ -4470,14 +5070,17 @@ - [X] schedule_key_deletion - [ ] sign - [X] tag_resource -- [ ] untag_resource +- [X] untag_resource - [ ] update_alias - [ ] update_custom_key_store - [X] update_key_description - [ ] verify +
## lakeformation -0% implemented +
+0% implemented + - [ ] batch_grant_permissions - [ ] batch_revoke_permissions - [ ] deregister_resource @@ -4491,18 +5094,21 @@ - [ ] register_resource - [ ] revoke_permissions - [ ] update_resource +
## lambda -32% implemented +
+44% implemented + - [ ] add_layer_version_permission -- [ ] add_permission +- [X] add_permission - [ ] create_alias - [X] create_event_source_mapping - [X] create_function - [ ] delete_alias - [X] delete_event_source_mapping - [X] delete_function -- [ ] delete_function_concurrency +- [X] delete_function_concurrency - [ ] delete_function_event_invoke_config - [ ] delete_layer_version - [ ] delete_provisioned_concurrency_config @@ -4510,13 +5116,13 @@ - [ ] get_alias - [X] get_event_source_mapping - [X] get_function -- [ ] get_function_concurrency +- [X] get_function_concurrency - [ ] get_function_configuration - [ ] get_function_event_invoke_config - [ ] get_layer_version - [ ] get_layer_version_by_arn - [ ] get_layer_version_policy -- [ ] get_policy +- [X] get_policy - [ ] get_provisioned_concurrency_config - [X] invoke - [ ] invoke_async @@ -4531,11 +5137,11 @@ - [X] list_versions_by_function - [ ] publish_layer_version - [ ] publish_version -- [ ] put_function_concurrency +- [X] put_function_concurrency - [ ] put_function_event_invoke_config - [ ] put_provisioned_concurrency_config - [ ] remove_layer_version_permission -- [ ] remove_permission +- [X] remove_permission - [X] tag_resource - [X] untag_resource - [ ] update_alias @@ -4543,9 +5149,12 @@ - [X] update_function_code - [X] update_function_configuration - [ ] update_function_event_invoke_config +
## lex-models -0% implemented +
+0% implemented + - [ ] create_bot_version - [ ] create_intent_version - [ ] create_slot_type_version @@ -4577,22 +5186,31 @@ - [ ] get_slot_type_versions - [ ] get_slot_types - [ ] get_utterances_view +- [ ] list_tags_for_resource - [ ] put_bot - [ ] put_bot_alias - [ ] put_intent - [ ] put_slot_type - [ ] start_import +- [ ] tag_resource +- [ ] untag_resource +
## lex-runtime -0% implemented +
+0% implemented + - [ ] delete_session - [ ] get_session - [ ] post_content - [ ] post_text - [ ] put_session +
## license-manager -0% implemented +
+0% implemented + - [ ] create_license_configuration - [ ] delete_license_configuration - [ ] get_license_configuration @@ -4609,9 +5227,12 @@ - [ ] update_license_configuration - [ ] update_license_specifications_for_resource - [ ] update_service_settings +
## lightsail -0% implemented +
+0% implemented + - [ ] allocate_static_ip - [ ] attach_disk - [ ] attach_instances_to_load_balancer @@ -4620,6 +5241,7 @@ - [ ] close_instance_public_ports - [ ] copy_snapshot - [ ] create_cloud_formation_stack +- [ ] create_contact_method - [ ] create_disk - [ ] create_disk_from_snapshot - [ ] create_disk_snapshot @@ -4634,7 +5256,9 @@ - [ ] create_relational_database - [ ] create_relational_database_from_snapshot - [ ] create_relational_database_snapshot +- [ ] delete_alarm - [ ] delete_auto_snapshot +- [ ] delete_contact_method - [ ] delete_disk - [ ] delete_disk_snapshot - [ ] delete_domain @@ -4655,10 +5279,12 @@ - [ ] enable_add_on - [ ] export_snapshot - [ ] get_active_names +- [ ] get_alarms - [ ] get_auto_snapshots - [ ] get_blueprints - [ ] get_bundles - [ ] get_cloud_formation_stack_records +- [ ] get_contact_methods - [ ] get_disk - [ ] get_disk_snapshot - [ ] get_disk_snapshots @@ -4702,24 +5328,30 @@ - [ ] is_vpc_peered - [ ] open_instance_public_ports - [ ] peer_vpc +- [ ] put_alarm - [ ] put_instance_public_ports - [ ] reboot_instance - [ ] reboot_relational_database - [ ] release_static_ip +- [ ] send_contact_method_verification - [ ] start_instance - [ ] start_relational_database - [ ] stop_instance - [ ] stop_relational_database - [ ] tag_resource +- [ ] test_alarm - [ ] unpeer_vpc - [ ] untag_resource - [ ] update_domain_entry - [ ] update_load_balancer_attribute - [ ] update_relational_database - [ ] update_relational_database_parameters +
## logs -35% implemented +
+40% implemented + - [ ] associate_kms_key - [ ] cancel_export_task - [ ] create_export_task @@ -4729,17 +5361,19 @@ - [X] delete_log_group - [X] delete_log_stream - [ ] delete_metric_filter +- [ ] delete_query_definition - [ ] delete_resource_policy - [X] delete_retention_policy -- [ ] delete_subscription_filter +- [X] delete_subscription_filter - [ ] describe_destinations - [ ] describe_export_tasks - [X] describe_log_groups - [X] describe_log_streams - [ ] describe_metric_filters - [ ] describe_queries +- [ ] describe_query_definitions - [ ] describe_resource_policies -- [ ] describe_subscription_filters +- [X] describe_subscription_filters - [ ] disassociate_kms_key - [X] filter_log_events - [X] get_log_events @@ -4751,17 +5385,21 @@ - [ ] put_destination_policy - [X] put_log_events - [ ] put_metric_filter +- [ ] put_query_definition - [ ] put_resource_policy - [X] put_retention_policy -- [ ] put_subscription_filter +- [X] put_subscription_filter - [ ] start_query - [ ] stop_query - [X] tag_log_group - [ ] test_metric_filter - [X] untag_log_group +
## machinelearning -0% implemented +
+0% implemented + - [ ] add_tags - [ ] create_batch_prediction - [ ] create_data_source_from_rds @@ -4790,9 +5428,12 @@ - [ ] update_data_source - [ ] update_evaluation - [ ] update_ml_model +
## macie -0% implemented +
+0% implemented + - [ ] associate_member_account - [ ] associate_s3_resources - [ ] disassociate_member_account @@ -4800,49 +5441,127 @@ - [ ] list_member_accounts - [ ] list_s3_resources - [ ] update_s3_resources +
-## managedblockchain -0% implemented +## macie2 +
+0% implemented + +- [ ] accept_invitation +- [ ] archive_findings +- [ ] batch_get_custom_data_identifiers +- [ ] create_classification_job +- [ ] create_custom_data_identifier +- [ ] create_findings_filter +- [ ] create_invitations - [ ] create_member -- [ ] create_network -- [ ] create_node -- [ ] create_proposal +- [ ] create_sample_findings +- [ ] decline_invitations +- [ ] delete_custom_data_identifier +- [ ] delete_findings_filter +- [ ] delete_invitations - [ ] delete_member -- [ ] delete_node +- [ ] describe_buckets +- [ ] describe_classification_job +- [ ] describe_organization_configuration +- [ ] disable_macie +- [ ] disable_organization_admin_account +- [ ] disassociate_from_master_account +- [ ] disassociate_member +- [ ] enable_macie +- [ ] enable_organization_admin_account +- [ ] get_bucket_statistics +- [ ] get_classification_export_configuration +- [ ] get_custom_data_identifier +- [ ] get_finding_statistics +- [ ] get_findings +- [ ] get_findings_filter +- [ ] get_invitations_count +- [ ] get_macie_session +- [ ] get_master_account - [ ] get_member -- [ ] get_network -- [ ] get_node -- [ ] get_proposal +- [ ] get_usage_statistics +- [ ] get_usage_totals +- [ ] list_classification_jobs +- [ ] list_custom_data_identifiers +- [ ] list_findings +- [ ] list_findings_filters - [ ] list_invitations - [ ] list_members -- [ ] list_networks -- [ ] list_nodes -- [ ] list_proposal_votes -- [ ] list_proposals -- [ ] reject_invitation -- [ ] vote_on_proposal +- [ ] list_organization_admin_accounts +- [ ] list_tags_for_resource +- [ ] put_classification_export_configuration +- [ ] tag_resource +- [ ] test_custom_data_identifier +- [ ] unarchive_findings +- [ ] untag_resource +- [ ] update_classification_job +- [ ] update_findings_filter +- [ ] update_macie_session +- [ ] update_member_session +- [ ] update_organization_configuration +
+ +## managedblockchain +
+100% implemented + +- [X] create_member +- [X] create_network +- [X] create_node +- [X] create_proposal +- [X] delete_member +- [X] delete_node +- [X] get_member +- [X] get_network +- [X] get_node +- [X] get_proposal +- [X] list_invitations +- [X] list_members +- [X] list_networks +- [X] list_nodes +- [X] list_proposal_votes +- [X] list_proposals +- [X] reject_invitation +- [X] update_member +- [X] update_node +- [X] vote_on_proposal +
## marketplace-catalog -0% implemented +
+0% implemented + - [ ] cancel_change_set - [ ] describe_change_set - [ ] describe_entity - [ ] list_change_sets - [ ] list_entities - [ ] start_change_set +
## marketplace-entitlement -0% implemented +
+0% implemented + - [ ] get_entitlements +
## marketplacecommerceanalytics -0% implemented +
+0% implemented + - [ ] generate_data_set - [ ] start_support_data_export +
## mediaconnect -0% implemented +
+0% implemented + - [ ] add_flow_outputs +- [ ] add_flow_sources +- [ ] add_flow_vpc_interfaces - [ ] create_flow - [ ] delete_flow - [ ] describe_flow @@ -4851,17 +5570,23 @@ - [ ] list_flows - [ ] list_tags_for_resource - [ ] remove_flow_output +- [ ] remove_flow_source +- [ ] remove_flow_vpc_interface - [ ] revoke_flow_entitlement - [ ] start_flow - [ ] stop_flow - [ ] tag_resource - [ ] untag_resource +- [ ] update_flow - [ ] update_flow_entitlement - [ ] update_flow_output - [ ] update_flow_source +
## mediaconvert -0% implemented +
+0% implemented + - [ ] associate_certificate - [ ] cancel_job - [ ] create_job @@ -4887,9 +5612,12 @@ - [ ] update_job_template - [ ] update_preset - [ ] update_queue +
## medialive -0% implemented +
+0% implemented + - [ ] batch_update_schedule - [ ] create_channel - [ ] create_input @@ -4907,6 +5635,7 @@ - [ ] delete_tags - [ ] describe_channel - [ ] describe_input +- [ ] describe_input_device - [ ] describe_input_security_group - [ ] describe_multiplex - [ ] describe_multiplex_program @@ -4914,6 +5643,7 @@ - [ ] describe_reservation - [ ] describe_schedule - [ ] list_channels +- [ ] list_input_devices - [ ] list_input_security_groups - [ ] list_inputs - [ ] list_multiplex_programs @@ -4929,13 +5659,17 @@ - [ ] update_channel - [ ] update_channel_class - [ ] update_input +- [ ] update_input_device - [ ] update_input_security_group - [ ] update_multiplex - [ ] update_multiplex_program - [ ] update_reservation +
## mediapackage -0% implemented +
+0% implemented + - [ ] create_channel - [ ] create_harvest_job - [ ] create_origin_endpoint @@ -4954,9 +5688,12 @@ - [ ] untag_resource - [ ] update_channel - [ ] update_origin_endpoint +
## mediapackage-vod -0% implemented +
+0% implemented + - [ ] create_asset - [ ] create_packaging_configuration - [ ] create_packaging_group @@ -4969,38 +5706,53 @@ - [ ] list_assets - [ ] list_packaging_configurations - [ ] list_packaging_groups +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +
## mediastore -0% implemented +
+0% implemented + - [ ] create_container - [ ] delete_container - [ ] delete_container_policy - [ ] delete_cors_policy - [ ] delete_lifecycle_policy +- [ ] delete_metric_policy - [ ] describe_container - [ ] get_container_policy - [ ] get_cors_policy - [ ] get_lifecycle_policy +- [ ] get_metric_policy - [ ] list_containers - [ ] list_tags_for_resource - [ ] put_container_policy - [ ] put_cors_policy - [ ] put_lifecycle_policy +- [ ] put_metric_policy - [ ] start_access_logging - [ ] stop_access_logging - [ ] tag_resource - [ ] untag_resource +
## mediastore-data -0% implemented +
+0% implemented + - [ ] delete_object - [ ] describe_object - [ ] get_object - [ ] list_items - [ ] put_object +
## mediatailor -0% implemented +
+0% implemented + - [ ] delete_playback_configuration - [ ] get_playback_configuration - [ ] list_playback_configurations @@ -5008,16 +5760,22 @@ - [ ] put_playback_configuration - [ ] tag_resource - [ ] untag_resource +
## meteringmarketplace -0% implemented +
+0% implemented + - [ ] batch_meter_usage - [ ] meter_usage - [ ] register_usage - [ ] resolve_customer +
## mgh -0% implemented +
+0% implemented + - [ ] associate_created_artifact - [ ] associate_discovered_resource - [ ] create_progress_update_stream @@ -5027,6 +5785,7 @@ - [ ] disassociate_created_artifact - [ ] disassociate_discovered_resource - [ ] import_migration_task +- [ ] list_application_states - [ ] list_created_artifacts - [ ] list_discovered_resources - [ ] list_migration_tasks @@ -5034,15 +5793,21 @@ - [ ] notify_application_state - [ ] notify_migration_task_state - [ ] put_resource_attributes +
## migrationhub-config -0% implemented +
+0% implemented + - [ ] create_home_region_control - [ ] describe_home_region_controls - [ ] get_home_region +
## mobile -0% implemented +
+0% implemented + - [ ] create_project - [ ] delete_project - [ ] describe_bundle @@ -5052,9 +5817,12 @@ - [ ] list_bundles - [ ] list_projects - [ ] update_project +
## mq -0% implemented +
+0% implemented + - [ ] create_broker - [ ] create_configuration - [ ] create_tags @@ -5077,9 +5845,12 @@ - [ ] update_broker - [ ] update_configuration - [ ] update_user +
## mturk -0% implemented +
+0% implemented + - [ ] accept_qualification_request - [ ] approve_assignment - [ ] associate_qualification_with_worker @@ -5119,9 +5890,12 @@ - [ ] update_hit_type_of_hit - [ ] update_notification_settings - [ ] update_qualification_type +
## neptune -0% implemented +
+0% implemented + - [ ] add_role_to_db_cluster - [ ] add_source_identifier_to_subscription - [ ] add_tags_to_resource @@ -5179,9 +5953,14 @@ - [ ] reset_db_parameter_group - [ ] restore_db_cluster_from_snapshot - [ ] restore_db_cluster_to_point_in_time +- [ ] start_db_cluster +- [ ] stop_db_cluster +
## networkmanager -0% implemented +
+0% implemented + - [ ] associate_customer_gateway - [ ] associate_link - [ ] create_device @@ -5210,9 +5989,12 @@ - [ ] update_global_network - [ ] update_link - [ ] update_site +
## opsworks -12% implemented +
+12% implemented + - [ ] assign_instance - [ ] assign_volume - [ ] associate_elastic_ip @@ -5287,9 +6069,12 @@ - [ ] update_stack - [ ] update_user_profile - [ ] update_volume +
## opsworkscm -0% implemented +
+0% implemented + - [ ] associate_node - [ ] create_backup - [ ] create_server @@ -5302,13 +6087,19 @@ - [ ] describe_servers - [ ] disassociate_node - [ ] export_server_engine_attribute +- [ ] list_tags_for_resource - [ ] restore_server - [ ] start_maintenance +- [ ] tag_resource +- [ ] untag_resource - [ ] update_server - [ ] update_server_engine_attributes +
## organizations -48% implemented +
+68% implemented + - [ ] accept_handshake - [X] attach_policy - [ ] cancel_handshake @@ -5320,7 +6111,8 @@ - [ ] decline_handshake - [ ] delete_organization - [ ] delete_organizational_unit -- [ ] delete_policy +- [X] delete_policy +- [X] deregister_delegated_administrator - [X] describe_account - [X] describe_create_account_status - [ ] describe_effective_policy @@ -5329,18 +6121,20 @@ - [X] describe_organizational_unit - [X] describe_policy - [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type +- [X] disable_aws_service_access +- [X] disable_policy_type - [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type +- [X] enable_aws_service_access +- [X] enable_policy_type - [ ] invite_account_to_organization - [ ] leave_organization - [X] list_accounts - [X] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization +- [X] list_aws_service_access_for_organization - [X] list_children - [ ] list_create_account_status +- [X] list_delegated_administrators +- [X] list_delegated_services_for_account - [ ] list_handshakes_for_account - [ ] list_handshakes_for_organization - [X] list_organizational_units_for_parent @@ -5351,22 +6145,31 @@ - [X] list_tags_for_resource - [X] list_targets_for_policy - [X] move_account +- [X] register_delegated_administrator - [ ] remove_account_from_organization - [X] tag_resource - [X] untag_resource -- [ ] update_organizational_unit -- [ ] update_policy +- [X] update_organizational_unit +- [X] update_policy +
## outposts -0% implemented +
+0% implemented + - [ ] create_outpost +- [ ] delete_outpost +- [ ] delete_site - [ ] get_outpost - [ ] get_outpost_instance_types - [ ] list_outposts - [ ] list_sites +
## personalize -0% implemented +
+0% implemented + - [ ] create_batch_inference_job - [ ] create_campaign - [ ] create_dataset @@ -5406,23 +6209,35 @@ - [ ] list_solution_versions - [ ] list_solutions - [ ] update_campaign +
## personalize-events -0% implemented +
+0% implemented + - [ ] put_events +
## personalize-runtime -0% implemented +
+0% implemented + - [ ] get_personalized_ranking - [ ] get_recommendations +
## pi -0% implemented +
+0% implemented + - [ ] describe_dimension_keys - [ ] get_resource_metrics +
## pinpoint -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_campaign - [ ] create_email_template @@ -5430,6 +6245,7 @@ - [ ] create_import_job - [ ] create_journey - [ ] create_push_template +- [ ] create_recommender_configuration - [ ] create_segment - [ ] create_sms_template - [ ] create_voice_template @@ -5448,6 +6264,7 @@ - [ ] delete_gcm_channel - [ ] delete_journey - [ ] delete_push_template +- [ ] delete_recommender_configuration - [ ] delete_segment - [ ] delete_sms_channel - [ ] delete_sms_template @@ -5485,6 +6302,8 @@ - [ ] get_journey_execution_activity_metrics - [ ] get_journey_execution_metrics - [ ] get_push_template +- [ ] get_recommender_configuration +- [ ] get_recommender_configurations - [ ] get_segment - [ ] get_segment_export_jobs - [ ] get_segment_import_jobs @@ -5498,6 +6317,7 @@ - [ ] get_voice_template - [ ] list_journeys - [ ] list_tags_for_resource +- [ ] list_template_versions - [ ] list_templates - [ ] phone_number_validate - [ ] put_event_stream @@ -5523,14 +6343,19 @@ - [ ] update_journey - [ ] update_journey_state - [ ] update_push_template +- [ ] update_recommender_configuration - [ ] update_segment - [ ] update_sms_channel - [ ] update_sms_template +- [ ] update_template_active_version - [ ] update_voice_channel - [ ] update_voice_template +
## pinpoint-email -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_dedicated_ip_pool @@ -5573,9 +6398,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_configuration_set_event_destination +
## pinpoint-sms-voice -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] delete_configuration_set @@ -5583,9 +6411,12 @@ - [ ] get_configuration_set_event_destinations - [ ] send_voice_message - [ ] update_configuration_set_event_destination +
## polly -55% implemented +
+55% implemented + - [X] delete_lexicon - [X] describe_voices - [X] get_lexicon @@ -5595,37 +6426,53 @@ - [X] put_lexicon - [ ] start_speech_synthesis_task - [ ] synthesize_speech +
## pricing -0% implemented +
+0% implemented + - [ ] describe_services - [ ] get_attribute_values - [ ] get_products +
## qldb -0% implemented +
+0% implemented + +- [ ] cancel_journal_kinesis_stream - [ ] create_ledger - [ ] delete_ledger +- [ ] describe_journal_kinesis_stream - [ ] describe_journal_s3_export - [ ] describe_ledger - [ ] export_journal_to_s3 - [ ] get_block - [ ] get_digest - [ ] get_revision +- [ ] list_journal_kinesis_streams_for_ledger - [ ] list_journal_s3_exports - [ ] list_journal_s3_exports_for_ledger - [ ] list_ledgers - [ ] list_tags_for_resource +- [ ] stream_journal_to_kinesis - [ ] tag_resource - [ ] untag_resource - [ ] update_ledger +
## qldb-session -0% implemented +
+0% implemented + - [ ] send_command +
## quicksight -0% implemented +
+0% implemented + - [ ] cancel_ingestion - [ ] create_dashboard - [ ] create_data_set @@ -5676,6 +6523,7 @@ - [ ] list_user_groups - [ ] list_users - [ ] register_user +- [ ] search_dashboards - [ ] tag_resource - [ ] untag_resource - [ ] update_dashboard @@ -5691,35 +6539,42 @@ - [ ] update_template_alias - [ ] update_template_permissions - [ ] update_user +
## ram -0% implemented +
+20% implemented + - [ ] accept_resource_share_invitation - [ ] associate_resource_share - [ ] associate_resource_share_permission -- [ ] create_resource_share -- [ ] delete_resource_share +- [X] create_resource_share +- [X] delete_resource_share - [ ] disassociate_resource_share - [ ] disassociate_resource_share_permission -- [ ] enable_sharing_with_aws_organization +- [X] enable_sharing_with_aws_organization - [ ] get_permission - [ ] get_resource_policies - [ ] get_resource_share_associations - [ ] get_resource_share_invitations -- [ ] get_resource_shares +- [X] get_resource_shares - [ ] list_pending_invitation_resources - [ ] list_permissions - [ ] list_principals - [ ] list_resource_share_permissions +- [ ] list_resource_types - [ ] list_resources - [ ] promote_resource_share_created_from_policy - [ ] reject_resource_share_invitation - [ ] tag_resource - [ ] untag_resource -- [ ] update_resource_share +- [X] update_resource_share +
## rds -0% implemented +
+0% implemented + - [ ] add_role_to_db_cluster - [ ] add_role_to_db_instance - [ ] add_source_identifier_to_subscription @@ -5727,6 +6582,7 @@ - [ ] apply_pending_maintenance_action - [ ] authorize_db_security_group_ingress - [ ] backtrack_db_cluster +- [ ] cancel_export_task - [ ] copy_db_cluster_parameter_group - [ ] copy_db_cluster_snapshot - [ ] copy_db_parameter_group @@ -5792,6 +6648,7 @@ - [ ] describe_event_categories - [ ] describe_event_subscriptions - [ ] describe_events +- [ ] describe_export_tasks - [ ] describe_global_clusters - [ ] describe_installation_media - [ ] describe_option_group_options @@ -5806,6 +6663,7 @@ - [ ] failover_db_cluster - [ ] import_installation_media - [ ] list_tags_for_resource +- [ ] modify_certificates - [ ] modify_current_db_cluster_capacity - [ ] modify_db_cluster - [ ] modify_db_cluster_endpoint @@ -5843,21 +6701,28 @@ - [ ] start_activity_stream - [ ] start_db_cluster - [ ] start_db_instance +- [ ] start_export_task - [ ] stop_activity_stream - [ ] stop_db_cluster - [ ] stop_db_instance +
## rds-data -0% implemented +
+0% implemented + - [ ] batch_execute_statement - [ ] begin_transaction - [ ] commit_transaction - [ ] execute_sql - [ ] execute_statement - [ ] rollback_transaction +
## redshift -30% implemented +
+28% implemented + - [ ] accept_reserved_node_exchange - [ ] authorize_cluster_security_group_ingress - [ ] authorize_snapshot_access @@ -5877,6 +6742,7 @@ - [X] create_snapshot_copy_grant - [ ] create_snapshot_schedule - [X] create_tags +- [ ] create_usage_limit - [X] delete_cluster - [X] delete_cluster_parameter_group - [X] delete_cluster_security_group @@ -5889,6 +6755,7 @@ - [X] delete_snapshot_copy_grant - [ ] delete_snapshot_schedule - [X] delete_tags +- [ ] delete_usage_limit - [ ] describe_account_attributes - [ ] describe_cluster_db_revisions - [X] describe_cluster_parameter_groups @@ -5917,6 +6784,7 @@ - [ ] describe_storage - [ ] describe_table_restore_status - [X] describe_tags +- [ ] describe_usage_limits - [ ] disable_logging - [X] disable_snapshot_copy - [ ] enable_logging @@ -5935,18 +6803,24 @@ - [ ] modify_scheduled_action - [X] modify_snapshot_copy_retention_period - [ ] modify_snapshot_schedule +- [ ] modify_usage_limit +- [ ] pause_cluster - [ ] purchase_reserved_node_offering - [ ] reboot_cluster - [ ] reset_cluster_parameter_group - [ ] resize_cluster - [X] restore_from_cluster_snapshot - [ ] restore_table_from_cluster_snapshot +- [ ] resume_cluster - [ ] revoke_cluster_security_group_ingress - [ ] revoke_snapshot_access - [ ] rotate_encryption_key +
## rekognition -0% implemented +
+0% implemented + - [ ] compare_faces - [ ] create_collection - [ ] create_project @@ -5954,6 +6828,8 @@ - [ ] create_stream_processor - [ ] delete_collection - [ ] delete_faces +- [ ] delete_project +- [ ] delete_project_version - [ ] delete_stream_processor - [ ] describe_collection - [ ] describe_project_versions @@ -5971,6 +6847,7 @@ - [ ] get_face_search - [ ] get_label_detection - [ ] get_person_tracking +- [ ] get_text_detection - [ ] index_faces - [ ] list_collections - [ ] list_faces @@ -5986,11 +6863,15 @@ - [ ] start_person_tracking - [ ] start_project_version - [ ] start_stream_processor +- [ ] start_text_detection - [ ] stop_project_version - [ ] stop_stream_processor +
## resource-groups -75% implemented +
+75% implemented + - [X] create_group - [X] delete_group - [X] get_group @@ -6003,9 +6884,12 @@ - [X] untag - [X] update_group - [X] update_group_query +
## resourcegroupstaggingapi -37% implemented +
+37% implemented + - [ ] describe_report_creation - [ ] get_compliance_summary - [X] get_resources @@ -6014,12 +6898,16 @@ - [ ] start_report_creation - [ ] tag_resources - [ ] untag_resources +
## robomaker -0% implemented +
+0% implemented + - [ ] batch_describe_simulation_job - [ ] cancel_deployment_job - [ ] cancel_simulation_job +- [ ] cancel_simulation_job_batch - [ ] create_deployment_job - [ ] create_fleet - [ ] create_robot @@ -6039,23 +6927,29 @@ - [ ] describe_robot_application - [ ] describe_simulation_application - [ ] describe_simulation_job +- [ ] describe_simulation_job_batch - [ ] list_deployment_jobs - [ ] list_fleets - [ ] list_robot_applications - [ ] list_robots - [ ] list_simulation_applications +- [ ] list_simulation_job_batches - [ ] list_simulation_jobs - [ ] list_tags_for_resource - [ ] register_robot - [ ] restart_simulation_job +- [ ] start_simulation_job_batch - [ ] sync_deployment_job - [ ] tag_resource - [ ] untag_resource - [ ] update_robot_application - [ ] update_simulation_application +
## route53 -12% implemented +
+12% implemented + - [ ] associate_vpc_with_hosted_zone - [ ] change_resource_record_sets - [X] change_tags_for_resource @@ -6112,9 +7006,14 @@ - [ ] update_hosted_zone_comment - [ ] update_traffic_policy_comment - [ ] update_traffic_policy_instance +
## route53domains -0% implemented +
+0% implemented + +- [ ] accept_domain_transfer_from_another_aws_account +- [ ] cancel_domain_transfer_to_another_aws_account - [ ] check_domain_availability - [ ] check_domain_transferability - [ ] delete_tags_for_domain @@ -6130,18 +7029,23 @@ - [ ] list_operations - [ ] list_tags_for_domain - [ ] register_domain +- [ ] reject_domain_transfer_from_another_aws_account - [ ] renew_domain - [ ] resend_contact_reachability_email - [ ] retrieve_domain_auth_code - [ ] transfer_domain +- [ ] transfer_domain_to_another_aws_account - [ ] update_domain_contact - [ ] update_domain_contact_privacy - [ ] update_domain_nameservers - [ ] update_tags_for_domain - [ ] view_billing +
## route53resolver -0% implemented +
+0% implemented + - [ ] associate_resolver_endpoint_ip_address - [ ] associate_resolver_rule - [ ] create_resolver_endpoint @@ -6164,9 +7068,12 @@ - [ ] untag_resource - [ ] update_resolver_endpoint - [ ] update_resolver_rule +
## s3 -14% implemented +
+26% implemented + - [ ] abort_multipart_upload - [ ] complete_multipart_upload - [ ] copy_object @@ -6175,65 +7082,65 @@ - [X] delete_bucket - [ ] delete_bucket_analytics_configuration - [X] delete_bucket_cors -- [ ] delete_bucket_encryption +- [X] delete_bucket_encryption - [ ] delete_bucket_inventory_configuration -- [X] delete_bucket_lifecycle +- [ ] delete_bucket_lifecycle - [ ] delete_bucket_metrics_configuration - [X] delete_bucket_policy - [ ] delete_bucket_replication - [X] delete_bucket_tagging - [ ] delete_bucket_website - [X] delete_object -- [ ] delete_object_tagging -- [X] delete_objects -- [X] delete_public_access_block +- [X] delete_object_tagging +- [ ] delete_objects +- [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration - [X] get_bucket_cors -- [ ] get_bucket_encryption +- [X] get_bucket_encryption - [ ] get_bucket_inventory_configuration -- [X] get_bucket_lifecycle -- [X] get_bucket_lifecycle_configuration -- [X] get_bucket_location +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location - [X] get_bucket_logging - [ ] get_bucket_metrics_configuration - [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration +- [X] get_bucket_notification_configuration - [X] get_bucket_policy -- [X] get_bucket_policy_status +- [ ] get_bucket_policy_status - [ ] get_bucket_replication - [ ] get_bucket_request_payment - [X] get_bucket_tagging - [X] get_bucket_versioning - [ ] get_bucket_website - [X] get_object -- [X] get_object_acl +- [ ] get_object_acl - [ ] get_object_legal_hold - [ ] get_object_lock_configuration - [ ] get_object_retention - [ ] get_object_tagging - [ ] get_object_torrent -- [X] get_public_access_block +- [ ] get_public_access_block - [ ] head_bucket - [ ] head_object - [ ] list_bucket_analytics_configurations - [ ] list_bucket_inventory_configurations - [ ] list_bucket_metrics_configurations -- [X] list_buckets -- [X] list_multipart_uploads +- [ ] list_buckets +- [ ] list_multipart_uploads - [ ] list_object_versions -- [X] list_objects -- [X] list_objects_v2 +- [ ] list_objects +- [ ] list_objects_v2 - [ ] list_parts - [X] put_bucket_accelerate_configuration -- [X] put_bucket_acl +- [ ] put_bucket_acl - [ ] put_bucket_analytics_configuration - [X] put_bucket_cors -- [ ] put_bucket_encryption +- [X] put_bucket_encryption - [ ] put_bucket_inventory_configuration -- [X] put_bucket_lifecycle -- [X] put_bucket_lifecycle_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration - [X] put_bucket_logging - [ ] put_bucket_metrics_configuration - [ ] put_bucket_notification @@ -6242,41 +7149,50 @@ - [ ] put_bucket_replication - [ ] put_bucket_request_payment - [X] put_bucket_tagging -- [X] put_bucket_versioning +- [ ] put_bucket_versioning - [ ] put_bucket_website -- [X] put_object +- [ ] put_object - [ ] put_object_acl - [ ] put_object_legal_hold - [ ] put_object_lock_configuration - [ ] put_object_retention - [ ] put_object_tagging -- [X] put_public_access_block +- [ ] put_public_access_block - [ ] restore_object - [ ] select_object_content - [ ] upload_part - [ ] upload_part_copy +
## s3control -0% implemented +
+0% implemented + - [ ] create_access_point - [ ] create_job - [ ] delete_access_point - [ ] delete_access_point_policy +- [ ] delete_job_tagging - [ ] delete_public_access_block - [ ] describe_job - [ ] get_access_point - [ ] get_access_point_policy - [ ] get_access_point_policy_status +- [ ] get_job_tagging - [ ] get_public_access_block - [ ] list_access_points - [ ] list_jobs - [ ] put_access_point_policy +- [ ] put_job_tagging - [ ] put_public_access_block - [ ] update_job_priority - [ ] update_job_status +
## sagemaker -0% implemented +
+12% implemented + - [ ] add_tags - [ ] associate_trial_component - [ ] create_algorithm @@ -6285,22 +7201,22 @@ - [ ] create_code_repository - [ ] create_compilation_job - [ ] create_domain -- [ ] create_endpoint -- [ ] create_endpoint_config +- [X] create_endpoint +- [X] create_endpoint_config - [ ] create_experiment - [ ] create_flow_definition - [ ] create_human_task_ui - [ ] create_hyper_parameter_tuning_job - [ ] create_labeling_job -- [ ] create_model +- [X] create_model - [ ] create_model_package - [ ] create_monitoring_schedule -- [ ] create_notebook_instance +- [X] create_notebook_instance - [ ] create_notebook_instance_lifecycle_config - [ ] create_presigned_domain_url - [ ] create_presigned_notebook_instance_url - [ ] create_processing_job -- [ ] create_training_job +- [X] create_training_job - [ ] create_transform_job - [ ] create_trial - [ ] create_trial_component @@ -6310,14 +7226,14 @@ - [ ] delete_app - [ ] delete_code_repository - [ ] delete_domain -- [ ] delete_endpoint -- [ ] delete_endpoint_config +- [X] delete_endpoint +- [X] delete_endpoint_config - [ ] delete_experiment - [ ] delete_flow_definition -- [ ] delete_model +- [X] delete_model - [ ] delete_model_package - [ ] delete_monitoring_schedule -- [ ] delete_notebook_instance +- [X] delete_notebook_instance - [ ] delete_notebook_instance_lifecycle_config - [ ] delete_tags - [ ] delete_trial @@ -6330,25 +7246,26 @@ - [ ] describe_code_repository - [ ] describe_compilation_job - [ ] describe_domain -- [ ] describe_endpoint -- [ ] describe_endpoint_config +- [X] describe_endpoint +- [X] describe_endpoint_config - [ ] describe_experiment - [ ] describe_flow_definition - [ ] describe_human_task_ui - [ ] describe_hyper_parameter_tuning_job - [ ] describe_labeling_job -- [ ] describe_model +- [X] describe_model - [ ] describe_model_package - [ ] describe_monitoring_schedule - [ ] describe_notebook_instance - [ ] describe_notebook_instance_lifecycle_config - [ ] describe_processing_job - [ ] describe_subscribed_workteam -- [ ] describe_training_job +- [X] describe_training_job - [ ] describe_transform_job - [ ] describe_trial - [ ] describe_trial_component - [ ] describe_user_profile +- [ ] describe_workforce - [ ] describe_workteam - [ ] disassociate_trial_component - [ ] get_search_suggestions @@ -6368,7 +7285,7 @@ - [ ] list_labeling_jobs - [ ] list_labeling_jobs_for_workteam - [ ] list_model_packages -- [ ] list_models +- [X] list_models - [ ] list_monitoring_executions - [ ] list_monitoring_schedules - [ ] list_notebook_instance_lifecycle_configs @@ -6386,13 +7303,13 @@ - [ ] render_ui_template - [ ] search - [ ] start_monitoring_schedule -- [ ] start_notebook_instance +- [X] start_notebook_instance - [ ] stop_auto_ml_job - [ ] stop_compilation_job - [ ] stop_hyper_parameter_tuning_job - [ ] stop_labeling_job - [ ] stop_monitoring_schedule -- [ ] stop_notebook_instance +- [X] stop_notebook_instance - [ ] stop_processing_job - [ ] stop_training_job - [ ] stop_transform_job @@ -6407,22 +7324,32 @@ - [ ] update_trial - [ ] update_trial_component - [ ] update_user_profile +- [ ] update_workforce - [ ] update_workteam +
## sagemaker-a2i-runtime -0% implemented +
+0% implemented + - [ ] delete_human_loop - [ ] describe_human_loop - [ ] list_human_loops - [ ] start_human_loop - [ ] stop_human_loop +
## sagemaker-runtime -0% implemented +
+0% implemented + - [ ] invoke_endpoint +
## savingsplans -0% implemented +
+0% implemented + - [ ] create_savings_plan - [ ] describe_savings_plan_rates - [ ] describe_savings_plans @@ -6431,14 +7358,18 @@ - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource +
## schemas -0% implemented +
+0% implemented + - [ ] create_discoverer - [ ] create_registry - [ ] create_schema - [ ] delete_discoverer - [ ] delete_registry +- [ ] delete_resource_policy - [ ] delete_schema - [ ] delete_schema_version - [ ] describe_code_binding @@ -6447,25 +7378,28 @@ - [ ] describe_schema - [ ] get_code_binding_source - [ ] get_discovered_schema +- [ ] get_resource_policy - [ ] list_discoverers - [ ] list_registries - [ ] list_schema_versions - [ ] list_schemas - [ ] list_tags_for_resource -- [ ] lock_service_linked_role - [ ] put_code_binding +- [ ] put_resource_policy - [ ] search_schemas - [ ] start_discoverer - [ ] stop_discoverer - [ ] tag_resource -- [ ] unlock_service_linked_role - [ ] untag_resource - [ ] update_discoverer - [ ] update_registry - [ ] update_schema +
## sdb -0% implemented +
+0% implemented + - [ ] batch_delete_attributes - [ ] batch_put_attributes - [ ] create_domain @@ -6476,9 +7410,12 @@ - [ ] list_domains - [ ] put_attributes - [ ] select +
## secretsmanager -61% implemented +
+66% implemented + - [ ] cancel_rotate_secret - [X] create_secret - [ ] delete_resource_policy @@ -6495,15 +7432,19 @@ - [X] rotate_secret - [ ] tag_resource - [ ] untag_resource -- [ ] update_secret +- [X] update_secret - [ ] update_secret_version_stage +
## securityhub -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] batch_disable_standards - [ ] batch_enable_standards - [ ] batch_import_findings +- [ ] batch_update_findings - [ ] create_action_target - [ ] create_insight - [ ] create_members @@ -6515,6 +7456,8 @@ - [ ] describe_action_targets - [ ] describe_hub - [ ] describe_products +- [ ] describe_standards +- [ ] describe_standards_controls - [ ] disable_import_findings_for_product - [ ] disable_security_hub - [ ] disassociate_from_master_account @@ -6538,9 +7481,13 @@ - [ ] update_action_target - [ ] update_findings - [ ] update_insight +- [ ] update_standards_control +
## serverlessrepo -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_application_version - [ ] create_cloud_formation_change_set @@ -6553,10 +7500,14 @@ - [ ] list_application_versions - [ ] list_applications - [ ] put_application_policy +- [ ] unshare_application - [ ] update_application +
## service-quotas -0% implemented +
+0% implemented + - [ ] associate_service_quota_template - [ ] delete_service_quota_increase_request_from_template - [ ] disassociate_service_quota_template @@ -6573,9 +7524,12 @@ - [ ] list_services - [ ] put_service_quota_increase_request_into_template - [ ] request_service_quota_increase +
## servicecatalog -0% implemented +
+0% implemented + - [ ] accept_portfolio_share - [ ] associate_budget_with_resource - [ ] associate_principal_with_portfolio @@ -6659,9 +7613,12 @@ - [ ] update_provisioning_artifact - [ ] update_service_action - [ ] update_tag_option +
## servicediscovery -0% implemented +
+0% implemented + - [ ] create_http_namespace - [ ] create_private_dns_namespace - [ ] create_public_dns_namespace @@ -6682,17 +7639,20 @@ - [ ] register_instance - [ ] update_instance_custom_health_status - [ ] update_service +
## ses -14% implemented +
+23% implemented + - [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination +- [X] create_configuration_set +- [X] create_configuration_set_event_destination - [ ] create_configuration_set_tracking_options - [ ] create_custom_verification_email_template - [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set +- [X] create_receipt_rule +- [X] create_receipt_rule_set - [ ] create_template - [ ] delete_configuration_set - [ ] delete_configuration_set_event_destination @@ -6717,15 +7677,15 @@ - [ ] get_identity_policies - [ ] get_identity_verification_attributes - [X] get_send_quota -- [ ] get_send_statistics -- [ ] get_template +- [X] get_send_statistics +- [X] get_template - [ ] list_configuration_sets - [ ] list_custom_verification_email_templates - [X] list_identities - [ ] list_identity_policies - [ ] list_receipt_filters - [ ] list_receipt_rule_sets -- [ ] list_templates +- [X] list_templates - [X] list_verified_email_addresses - [ ] put_configuration_set_delivery_options - [ ] put_identity_policy @@ -6756,9 +7716,12 @@ - [ ] verify_domain_identity - [X] verify_email_address - [X] verify_email_identity +
## sesv2 -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_dedicated_ip_pool @@ -6808,11 +7771,15 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_configuration_set_event_destination +
## shield -0% implemented +
+0% implemented + - [ ] associate_drt_log_bucket - [ ] associate_drt_role +- [ ] associate_health_check - [ ] create_protection - [ ] create_subscription - [ ] delete_protection @@ -6824,14 +7791,18 @@ - [ ] describe_subscription - [ ] disassociate_drt_log_bucket - [ ] disassociate_drt_role +- [ ] disassociate_health_check - [ ] get_subscription_state - [ ] list_attacks - [ ] list_protections - [ ] update_emergency_contact_settings - [ ] update_subscription +
## signer -0% implemented +
+0% implemented + - [ ] cancel_signing_profile - [ ] describe_signing_job - [ ] get_signing_platform @@ -6844,9 +7815,12 @@ - [ ] start_signing_job - [ ] tag_resource - [ ] untag_resource +
## sms -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_replication_job - [ ] delete_app @@ -6875,9 +7849,12 @@ - [ ] terminate_app - [ ] update_app - [ ] update_replication_job +
## sms-voice -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] delete_configuration_set @@ -6886,9 +7863,12 @@ - [ ] list_configuration_sets - [ ] send_voice_message - [ ] update_configuration_set_event_destination +
## snowball -0% implemented +
+0% implemented + - [ ] cancel_cluster - [ ] cancel_job - [ ] create_address @@ -6908,9 +7888,12 @@ - [ ] list_jobs - [ ] update_cluster - [ ] update_job +
## sns -63% implemented +
+63% implemented + - [X] add_permission - [ ] check_if_phone_number_is_opted_out - [ ] confirm_subscription @@ -6944,9 +7927,12 @@ - [X] tag_resource - [X] unsubscribe - [X] untag_resource +
## sqs -85% implemented +
+85% implemented + - [X] add_permission - [X] change_message_visibility - [ ] change_message_visibility_batch @@ -6967,23 +7953,26 @@ - [X] set_queue_attributes - [X] tag_queue - [X] untag_queue +
## ssm -11% implemented +
+18% implemented + - [X] add_tags_to_resource - [ ] cancel_command - [ ] cancel_maintenance_window_execution - [ ] create_activation - [ ] create_association - [ ] create_association_batch -- [ ] create_document +- [X] create_document - [ ] create_maintenance_window - [ ] create_ops_item - [ ] create_patch_baseline - [ ] create_resource_data_sync - [ ] delete_activation - [ ] delete_association -- [ ] delete_document +- [X] delete_document - [ ] delete_inventory - [ ] delete_maintenance_window - [X] delete_parameter @@ -7001,7 +7990,7 @@ - [ ] describe_automation_executions - [ ] describe_automation_step_executions - [ ] describe_available_patches -- [ ] describe_document +- [X] describe_document - [ ] describe_document_permission - [ ] describe_effective_instance_associations - [ ] describe_effective_patches_for_patch_baseline @@ -7032,7 +8021,7 @@ - [ ] get_connection_status - [ ] get_default_patch_baseline - [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document +- [X] get_document - [ ] get_inventory - [ ] get_inventory_schema - [ ] get_maintenance_window @@ -7049,7 +8038,7 @@ - [ ] get_patch_baseline - [ ] get_patch_baseline_for_patch_group - [ ] get_service_setting -- [ ] label_parameter_version +- [X] label_parameter_version - [ ] list_association_versions - [ ] list_associations - [ ] list_command_invocations @@ -7057,7 +8046,7 @@ - [ ] list_compliance_items - [ ] list_compliance_summaries - [ ] list_document_versions -- [ ] list_documents +- [X] list_documents - [ ] list_inventory_entries - [ ] list_resource_compliance_summaries - [ ] list_resource_data_sync @@ -7082,8 +8071,8 @@ - [ ] terminate_session - [ ] update_association - [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version +- [X] update_document +- [X] update_document_default_version - [ ] update_maintenance_window - [ ] update_maintenance_window_target - [ ] update_maintenance_window_task @@ -7092,22 +8081,31 @@ - [ ] update_patch_baseline - [ ] update_resource_data_sync - [ ] update_service_setting +
## sso -0% implemented +
+0% implemented + - [ ] get_role_credentials - [ ] list_account_roles - [ ] list_accounts - [ ] logout +
## sso-oidc -0% implemented +
+0% implemented + - [ ] create_token - [ ] register_client - [ ] start_device_authorization +
## stepfunctions -36% implemented +
+36% implemented + - [ ] create_activity - [X] create_state_machine - [ ] delete_activity @@ -7127,12 +8125,15 @@ - [ ] send_task_success - [X] start_execution - [X] stop_execution -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_state_machine +- [X] tag_resource +- [X] untag_resource +- [X] update_state_machine +
## storagegateway -0% implemented +
+0% implemented + - [ ] activate_gateway - [ ] add_cache - [ ] add_tags_to_resource @@ -7150,6 +8151,7 @@ - [ ] create_stored_iscsi_volume - [ ] create_tape_with_barcode - [ ] create_tapes +- [ ] delete_automatic_tape_creation_policy - [ ] delete_bandwidth_rate_limit - [ ] delete_chap_credentials - [ ] delete_file_share @@ -7179,6 +8181,7 @@ - [ ] detach_volume - [ ] disable_gateway - [ ] join_domain +- [ ] list_automatic_tape_creation_policies - [ ] list_file_shares - [ ] list_gateways - [ ] list_local_disks @@ -7198,6 +8201,7 @@ - [ ] shutdown_gateway - [ ] start_availability_monitor_test - [ ] start_gateway +- [ ] update_automatic_tape_creation_policy - [ ] update_bandwidth_rate_limit - [ ] update_chap_credentials - [ ] update_gateway_information @@ -7208,20 +8212,26 @@ - [ ] update_smb_security_strategy - [ ] update_snapshot_schedule - [ ] update_vtl_device_type +
## sts -50% implemented +
+62% implemented + - [X] assume_role -- [ ] assume_role_with_saml +- [X] assume_role_with_saml - [X] assume_role_with_web_identity - [ ] decode_authorization_message - [ ] get_access_key_info -- [ ] get_caller_identity +- [x] get_caller_identity - [X] get_federation_token - [X] get_session_token +
## support -0% implemented +
+0% implemented + - [ ] add_attachments_to_set - [ ] add_communication_to_case - [ ] create_case @@ -7236,9 +8246,12 @@ - [ ] describe_trusted_advisor_checks - [ ] refresh_trusted_advisor_check - [ ] resolve_case +
## swf -48% implemented +
+51% implemented + - [ ] count_closed_workflow_executions - [ ] count_open_workflow_executions - [X] count_pending_activity_tasks @@ -7273,33 +8286,75 @@ - [ ] tag_resource - [X] terminate_workflow_execution - [ ] undeprecate_activity_type -- [ ] undeprecate_domain +- [X] undeprecate_domain - [ ] undeprecate_workflow_type - [ ] untag_resource +
+ +## synthetics +
+0% implemented + +- [ ] create_canary +- [ ] delete_canary +- [ ] describe_canaries +- [ ] describe_canaries_last_run +- [ ] describe_runtime_versions +- [ ] get_canary +- [ ] get_canary_runs +- [ ] list_tags_for_resource +- [ ] start_canary +- [ ] stop_canary +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_canary +
## textract -0% implemented +
+0% implemented + - [ ] analyze_document - [ ] detect_document_text - [ ] get_document_analysis - [ ] get_document_text_detection - [ ] start_document_analysis - [ ] start_document_text_detection +
## transcribe -0% implemented +
+0% implemented + +- [ ] create_medical_vocabulary - [ ] create_vocabulary +- [ ] create_vocabulary_filter +- [ ] delete_medical_transcription_job +- [ ] delete_medical_vocabulary - [ ] delete_transcription_job - [ ] delete_vocabulary +- [ ] delete_vocabulary_filter +- [ ] get_medical_transcription_job +- [ ] get_medical_vocabulary - [ ] get_transcription_job - [ ] get_vocabulary +- [ ] get_vocabulary_filter +- [ ] list_medical_transcription_jobs +- [ ] list_medical_vocabularies - [ ] list_transcription_jobs - [ ] list_vocabularies +- [ ] list_vocabulary_filters +- [ ] start_medical_transcription_job - [ ] start_transcription_job +- [ ] update_medical_vocabulary - [ ] update_vocabulary +- [ ] update_vocabulary_filter +
## transfer -0% implemented +
+0% implemented + - [ ] create_server - [ ] create_user - [ ] delete_server @@ -7318,17 +8373,27 @@ - [ ] untag_resource - [ ] update_server - [ ] update_user +
## translate -0% implemented +
+0% implemented + - [ ] delete_terminology +- [ ] describe_text_translation_job - [ ] get_terminology - [ ] import_terminology - [ ] list_terminologies +- [ ] list_text_translation_jobs +- [ ] start_text_translation_job +- [ ] stop_text_translation_job - [ ] translate_text +
## waf -0% implemented +
+0% implemented + - [ ] create_byte_match_set - [ ] create_geo_match_set - [ ] create_ip_set @@ -7340,6 +8405,7 @@ - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl +- [ ] create_web_acl_migration_stack - [ ] create_xss_match_set - [ ] delete_byte_match_set - [ ] delete_geo_match_set @@ -7405,9 +8471,12 @@ - [ ] update_sql_injection_match_set - [ ] update_web_acl - [ ] update_xss_match_set +
## waf-regional -0% implemented +
+0% implemented + - [ ] associate_web_acl - [ ] create_byte_match_set - [ ] create_geo_match_set @@ -7420,6 +8489,7 @@ - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl +- [ ] create_web_acl_migration_stack - [ ] create_xss_match_set - [ ] delete_byte_match_set - [ ] delete_geo_match_set @@ -7488,17 +8558,22 @@ - [ ] update_sql_injection_match_set - [ ] update_web_acl - [ ] update_xss_match_set +
## wafv2 -0% implemented +
+0% implemented + - [ ] associate_web_acl - [ ] check_capacity - [ ] create_ip_set - [ ] create_regex_pattern_set - [ ] create_rule_group - [ ] create_web_acl +- [ ] delete_firewall_manager_rule_groups - [ ] delete_ip_set - [ ] delete_logging_configuration +- [ ] delete_permission_policy - [ ] delete_regex_pattern_set - [ ] delete_rule_group - [ ] delete_web_acl @@ -7506,6 +8581,7 @@ - [ ] disassociate_web_acl - [ ] get_ip_set - [ ] get_logging_configuration +- [ ] get_permission_policy - [ ] get_rate_based_statement_managed_keys - [ ] get_regex_pattern_set - [ ] get_rule_group @@ -7521,15 +8597,19 @@ - [ ] list_tags_for_resource - [ ] list_web_acls - [ ] put_logging_configuration +- [ ] put_permission_policy - [ ] tag_resource - [ ] untag_resource - [ ] update_ip_set - [ ] update_regex_pattern_set - [ ] update_rule_group - [ ] update_web_acl +
## workdocs -0% implemented +
+0% implemented + - [ ] abort_document_version_upload - [ ] activate_user - [ ] add_resource_permissions @@ -7571,9 +8651,12 @@ - [ ] update_document_version - [ ] update_folder - [ ] update_user +
## worklink -0% implemented +
+0% implemented + - [ ] associate_domain - [ ] associate_website_authorization_provider - [ ] associate_website_certificate_authority @@ -7604,15 +8687,19 @@ - [ ] update_domain_metadata - [ ] update_fleet_metadata - [ ] update_identity_provider_configuration +
## workmail -0% implemented +
+0% implemented + - [ ] associate_delegate_to_resource - [ ] associate_member_to_group - [ ] create_alias - [ ] create_group - [ ] create_resource - [ ] create_user +- [ ] delete_access_control_rule - [ ] delete_alias - [ ] delete_group - [ ] delete_mailbox_permissions @@ -7625,7 +8712,9 @@ - [ ] describe_user - [ ] disassociate_delegate_from_resource - [ ] disassociate_member_from_group +- [ ] get_access_control_effect - [ ] get_mailbox_details +- [ ] list_access_control_rules - [ ] list_aliases - [ ] list_group_members - [ ] list_groups @@ -7633,20 +8722,30 @@ - [ ] list_organizations - [ ] list_resource_delegates - [ ] list_resources +- [ ] list_tags_for_resource - [ ] list_users +- [ ] put_access_control_rule - [ ] put_mailbox_permissions - [ ] register_to_work_mail - [ ] reset_password +- [ ] tag_resource +- [ ] untag_resource - [ ] update_mailbox_quota - [ ] update_primary_email_address - [ ] update_resource +
## workmailmessageflow -0% implemented +
+0% implemented + - [ ] get_raw_message_content +
## workspaces -0% implemented +
+0% implemented + - [ ] associate_ip_groups - [ ] authorize_ip_rules - [ ] copy_workspace_image @@ -7671,6 +8770,7 @@ - [ ] disassociate_ip_groups - [ ] import_workspace_image - [ ] list_available_management_cidr_ranges +- [ ] migrate_workspace - [ ] modify_account - [ ] modify_client_properties - [ ] modify_selfservice_permissions @@ -7687,9 +8787,12 @@ - [ ] stop_workspaces - [ ] terminate_workspaces - [ ] update_rules_of_ip_group +
## xray -0% implemented +
+0% implemented + - [ ] batch_get_traces - [ ] create_group - [ ] create_sampling_rule @@ -7710,3 +8813,4 @@ - [ ] put_trace_segments - [ ] update_group - [ ] update_sampling_rule +
diff --git a/MANIFEST.in b/MANIFEST.in index bd7eb968a..51d1b223c 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,5 +3,6 @@ include requirements.txt requirements-dev.txt tox.ini include moto/ec2/resources/instance_types.json include moto/ec2/resources/amis.json include moto/cognitoidp/resources/*.json +include moto/dynamodb2/parsing/reserved_keywords.txt recursive-include moto/templates * recursive-include tests * diff --git a/Makefile b/Makefile index e84d036b7..391a8efa0 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,11 @@ SHELL := /bin/bash ifeq ($(TEST_SERVER_MODE), true) # exclude test_iot and test_iotdata for now # because authentication of iot is very complicated - TEST_EXCLUDE := --exclude='test_iot.*' + + # exclude test_kinesisvideoarchivedmedia + # because testing with moto_server is difficult with data-endpoint + + TEST_EXCLUDE := -k 'not (test_iot or test_kinesisvideoarchivedmedia)' else TEST_EXCLUDE := endif @@ -19,13 +23,13 @@ lint: test-only: rm -f .coverage rm -rf cover - @nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE) + @pytest -sv --cov=moto --cov-report html ./tests/ $(TEST_EXCLUDE) test: lint test-only test_server: - @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ + @TEST_SERVER_MODE=true pytest -sv --cov=moto --cov-report html ./tests/ aws_managed_policies: scripts/update_managed_policies.py @@ -35,7 +39,7 @@ upload_pypi_artifact: twine upload dist/* push_dockerhub_image: - docker build -t motoserver/moto . + docker build -t motoserver/moto . --tag moto:`python setup.py --version` docker push motoserver/moto tag_github_release: @@ -53,3 +57,6 @@ implementation_coverage: scaffold: @pip install -r requirements-dev.txt > /dev/null exec python scripts/scaffold.py + +int_test: + @./scripts/int_test.sh diff --git a/README.md b/README.md index f5c45a6b6..250294ef9 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,25 @@ ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/moto.svg) ![PyPI - Downloads](https://img.shields.io/pypi/dw/moto.svg) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +## Install + +To install moto for a specific service: +```console +$ pip install moto[ec2,s3] +``` +This will install Moto, and the dependencies required for that specific service. +If you don't care about the number of dependencies, or if you want to mock many AWS services: +```console +$ pip install moto[all] +``` +Not all services might be covered, in which case you might see a warning: +`moto 1.3.16 does not provide the extra 'service'`. +You can ignore the warning, or simply install moto as is: +```console +$ pip install moto +``` + ## In a nutshell Moto is a library that allows your tests to easily mock out AWS Services. @@ -57,98 +76,58 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock It gets even better! Moto isn't just for Python code and it isn't just for S3. Look at the [standalone server mode](https://github.com/spulec/moto#stand-alone-server-mode) for more information about running Moto with other languages. Here's the status of the other AWS services implemented: -```gherkin -|-------------------------------------------------------------------------------------| -| Service Name | Decorator | Development Status | -|-------------------------------------------------------------------------------------| -| ACM | @mock_acm | all endpoints done | -|-------------------------------------------------------------------------------------| -| API Gateway | @mock_apigateway | core endpoints done | -|-------------------------------------------------------------------------------------| -| Autoscaling | @mock_autoscaling | core endpoints done | -|-------------------------------------------------------------------------------------| -| Cloudformation | @mock_cloudformation | core endpoints done | -|-------------------------------------------------------------------------------------| -| Cloudwatch | @mock_cloudwatch | basic endpoints done | -|-------------------------------------------------------------------------------------| -| CloudwatchEvents | @mock_events | all endpoints done | -|-------------------------------------------------------------------------------------| -| Cognito Identity | @mock_cognitoidentity | basic endpoints done | -|-------------------------------------------------------------------------------------| -| Cognito Identity Provider | @mock_cognitoidp | basic endpoints done | -|-------------------------------------------------------------------------------------| -| Config | @mock_config | basic endpoints done | -| | | core endpoints done | -|-------------------------------------------------------------------------------------| -| Data Pipeline | @mock_datapipeline | basic endpoints done | -|-------------------------------------------------------------------------------------| -| DynamoDB | @mock_dynamodb | core endpoints done | -| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | -|-------------------------------------------------------------------------------------| -| EC2 | @mock_ec2 | core endpoints done | -| - AMI | | core endpoints done | -| - EBS | | core endpoints done | -| - Instances | | all endpoints done | -| - Security Groups | | core endpoints done | -| - Tags | | all endpoints done | -|-------------------------------------------------------------------------------------| -| ECR | @mock_ecr | basic endpoints done | -|-------------------------------------------------------------------------------------| -| ECS | @mock_ecs | basic endpoints done | -|-------------------------------------------------------------------------------------| -| ELB | @mock_elb | core endpoints done | -|-------------------------------------------------------------------------------------| -| ELBv2 | @mock_elbv2 | all endpoints done | -|-------------------------------------------------------------------------------------| -| EMR | @mock_emr | core endpoints done | -|-------------------------------------------------------------------------------------| -| Glacier | @mock_glacier | core endpoints done | -|-------------------------------------------------------------------------------------| -| IAM | @mock_iam | core endpoints done | -|-------------------------------------------------------------------------------------| -| IoT | @mock_iot | core endpoints done | -| | @mock_iotdata | core endpoints done | -|-------------------------------------------------------------------------------------| -| Kinesis | @mock_kinesis | core endpoints done | -|-------------------------------------------------------------------------------------| -| KMS | @mock_kms | basic endpoints done | -|-------------------------------------------------------------------------------------| -| Lambda | @mock_lambda | basic endpoints done, requires | -| | | docker | -|-------------------------------------------------------------------------------------| -| Logs | @mock_logs | basic endpoints done | -|-------------------------------------------------------------------------------------| -| Organizations | @mock_organizations | some core endpoints done | -|-------------------------------------------------------------------------------------| -| Polly | @mock_polly | all endpoints done | -|-------------------------------------------------------------------------------------| -| RDS | @mock_rds | core endpoints done | -|-------------------------------------------------------------------------------------| -| RDS2 | @mock_rds2 | core endpoints done | -|-------------------------------------------------------------------------------------| -| Redshift | @mock_redshift | core endpoints done | -|-------------------------------------------------------------------------------------| -| Route53 | @mock_route53 | core endpoints done | -|-------------------------------------------------------------------------------------| -| S3 | @mock_s3 | core endpoints done | -|-------------------------------------------------------------------------------------| -| SecretsManager | @mock_secretsmanager | basic endpoints done | -|-------------------------------------------------------------------------------------| -| SES | @mock_ses | all endpoints done | -|-------------------------------------------------------------------------------------| -| SNS | @mock_sns | all endpoints done | -|-------------------------------------------------------------------------------------| -| SQS | @mock_sqs | core endpoints done | -|-------------------------------------------------------------------------------------| -| SSM | @mock_ssm | core endpoints done | -|-------------------------------------------------------------------------------------| -| STS | @mock_sts | core endpoints done | -|-------------------------------------------------------------------------------------| -| SWF | @mock_swf | basic endpoints done | -|-------------------------------------------------------------------------------------| -| X-Ray | @mock_xray | all endpoints done | -|-------------------------------------------------------------------------------------| -``` +| Service Name | Decorator | Development Status | Comment | +|---------------------------|-----------------------|---------------------------------|-----------------------------| +| ACM | @mock_acm | all endpoints done | | +| API Gateway | @mock_apigateway | core endpoints done | | +| Application Autoscaling | @mock_applicationautoscaling | basic endpoints done | | +| Athena | @mock_athena | core endpoints done | | +| Autoscaling | @mock_autoscaling | core endpoints done | | +| Cloudformation | @mock_cloudformation | core endpoints done | | +| Cloudwatch | @mock_cloudwatch | basic endpoints done | | +| CloudwatchEvents | @mock_events | all endpoints done | | +| Cognito Identity | @mock_cognitoidentity | basic endpoints done | | +| Cognito Identity Provider | @mock_cognitoidp | basic endpoints done | | +| Config | @mock_config | basic + core endpoints done | | +| Data Pipeline | @mock_datapipeline | basic endpoints done | | +| DynamoDB | @mock_dynamodb | core endpoints done | API 20111205. Deprecated. | +| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | API 20120810 (Latest) | +| EC2 | @mock_ec2 | core endpoints done | | +| - AMI | | core endpoints done | | +| - EBS | | core endpoints done | | +| - Instances | | all endpoints done | | +| - Security Groups | | core endpoints done | | +| - Tags | | all endpoints done | | +| ECR | @mock_ecr | basic endpoints done | | +| ECS | @mock_ecs | basic endpoints done | | +| ELB | @mock_elb | core endpoints done | | +| ELBv2 | @mock_elbv2 | all endpoints done | | +| EMR | @mock_emr | core endpoints done | | +| Forecast | @mock_forecast | some core endpoints done | | +| Glacier | @mock_glacier | core endpoints done | | +| Glue | @mock_glue | core endpoints done | | +| IAM | @mock_iam | core endpoints done | | +| IoT | @mock_iot | core endpoints done | | +| IoT data | @mock_iotdata | core endpoints done | | +| Kinesis | @mock_kinesis | core endpoints done | | +| KMS | @mock_kms | basic endpoints done | | +| Lambda | @mock_lambda | basic endpoints done, requires docker | | +| Logs | @mock_logs | basic endpoints done | | +| Organizations | @mock_organizations | some core endpoints done | | +| Polly | @mock_polly | all endpoints done | | +| RDS | @mock_rds | core endpoints done | | +| RDS2 | @mock_rds2 | core endpoints done | | +| Redshift | @mock_redshift | core endpoints done | | +| Route53 | @mock_route53 | core endpoints done | | +| S3 | @mock_s3 | core endpoints done | | +| SecretsManager | @mock_secretsmanager | basic endpoints done | | +| SES | @mock_ses | all endpoints done | | +| SNS | @mock_sns | all endpoints done | | +| SQS | @mock_sqs | core endpoints done | | +| SSM | @mock_ssm | core endpoints done | | +| STS | @mock_sts | core endpoints done | | +| SWF | @mock_swf | basic endpoints done | | +| X-Ray | @mock_xray | all endpoints done | | For a full list of endpoint [implementation coverage](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) @@ -450,12 +429,14 @@ boto3.resource( ) ``` -## Install +### Caveats +The standalone server has some caveats with some services. The following services +require that you update your hosts file for your code to work properly: +1. `s3-control` -```console -$ pip install moto -``` +For the above services, this is required because the hostname is in the form of `AWS_ACCOUNT_ID.localhost`. +As a result, you need to add that entry to your host file for your tests to function properly. ## Releases diff --git a/docs/conf.py b/docs/conf.py index 28a4b4e6b..7bba967b2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,12 +20,12 @@ import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -33,32 +33,34 @@ import shlex extensions = [] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Moto' -copyright = '2015, Steve Pulec' -author = 'Steve Pulec' +project = "Moto" +copyright = "2015, Steve Pulec" +author = "Steve Pulec" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '0.4.10' +import moto + +version = moto.__version__ # The full version, including alpha/beta/rc tags. -release = '0.4.10' +release = moto.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -69,37 +71,37 @@ language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -109,156 +111,149 @@ todo_include_todos = False # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'Motodoc' +htmlhelp_basename = "Motodoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'Moto.tex', 'Moto Documentation', - 'Steve Pulec', 'manual'), + (master_doc, "Moto.tex", "Moto Documentation", "Steve Pulec", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'moto', 'Moto Documentation', - [author], 1) -] +man_pages = [(master_doc, "moto", "Moto Documentation", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -267,19 +262,25 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'Moto', 'Moto Documentation', - author, 'Moto', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "Moto", + "Moto Documentation", + author, + "Moto", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False diff --git a/docs/docs/getting_started.rst b/docs/docs/getting_started.rst index d52e76235..ffe37f3a0 100644 --- a/docs/docs/getting_started.rst +++ b/docs/docs/getting_started.rst @@ -24,8 +24,7 @@ For example, we have the following code we want to test: .. sourcecode:: python - import boto - from boto.s3.key import Key + import boto3 class MyModel(object): def __init__(self, name, value): @@ -33,11 +32,8 @@ For example, we have the following code we want to test: self.value = value def save(self): - conn = boto.connect_s3() - bucket = conn.get_bucket('mybucket') - k = Key(bucket) - k.key = self.name - k.set_contents_from_string(self.value) + s3 = boto3.client('s3', region_name='us-east-1') + s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment. @@ -48,20 +44,23 @@ With a decorator wrapping, all the calls to S3 are automatically mocked out. .. sourcecode:: python - import boto + import boto3 from moto import mock_s3 from mymodule import MyModel @mock_s3 def test_my_model_save(): - conn = boto.connect_s3() + conn = boto3.resource('s3', region_name='us-east-1') # We need to create the bucket since this is all in Moto's 'virtual' AWS account - conn.create_bucket('mybucket') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' Context manager ~~~~~~~~~~~~~~~ @@ -72,13 +71,16 @@ Same as the Decorator, every call inside the ``with`` statement is mocked out. def test_my_model_save(): with mock_s3(): - conn = boto.connect_s3() - conn.create_bucket('mybucket') + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' Raw ~~~ @@ -91,13 +93,16 @@ You can also start and stop the mocking manually. mock = mock_s3() mock.start() - conn = boto.connect_s3() - conn.create_bucket('mybucket') + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' mock.stop() diff --git a/docs/index.rst b/docs/index.rst index 22ac97228..4f2d7e090 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -60,6 +60,8 @@ Currently implemented Services: +---------------------------+-----------------------+------------------------------------+ | EMR | @mock_emr | core endpoints done | +---------------------------+-----------------------+------------------------------------+ +| Forecast | @mock_forecast | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ | Glacier | @mock_glacier | core endpoints done | +---------------------------+-----------------------+------------------------------------+ | IAM | @mock_iam | core endpoints done | diff --git a/moto/__init__.py b/moto/__init__.py index 44b25f41e..fd467cbf8 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -1,67 +1,130 @@ from __future__ import unicode_literals -from .acm import mock_acm # noqa -from .apigateway import mock_apigateway, mock_apigateway_deprecated # noqa -from .athena import mock_athena # noqa -from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # noqa -from .awslambda import mock_lambda, mock_lambda_deprecated # noqa -from .batch import mock_batch # noqa -from .cloudformation import mock_cloudformation # noqa -from .cloudformation import mock_cloudformation_deprecated # noqa -from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # noqa -from .codecommit import mock_codecommit # noqa -from .codepipeline import mock_codepipeline # noqa -from .cognitoidentity import mock_cognitoidentity # noqa -from .cognitoidentity import mock_cognitoidentity_deprecated # noqa -from .cognitoidp import mock_cognitoidp, mock_cognitoidp_deprecated # noqa -from .config import mock_config # noqa -from .datapipeline import mock_datapipeline # noqa -from .datapipeline import mock_datapipeline_deprecated # noqa -from .datasync import mock_datasync # noqa -from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # noqa -from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # noqa -from .dynamodbstreams import mock_dynamodbstreams # noqa -from .ec2 import mock_ec2, mock_ec2_deprecated # noqa -from .ec2_instance_connect import mock_ec2_instance_connect # noqa -from .ecr import mock_ecr, mock_ecr_deprecated # noqa -from .ecs import mock_ecs, mock_ecs_deprecated # noqa -from .elb import mock_elb, mock_elb_deprecated # noqa -from .elbv2 import mock_elbv2 # noqa -from .emr import mock_emr, mock_emr_deprecated # noqa -from .events import mock_events # noqa -from .glacier import mock_glacier, mock_glacier_deprecated # noqa -from .glue import mock_glue # noqa -from .iam import mock_iam, mock_iam_deprecated # noqa -from .iot import mock_iot # noqa -from .iotdata import mock_iotdata # noqa -from .kinesis import mock_kinesis, mock_kinesis_deprecated # noqa -from .kms import mock_kms, mock_kms_deprecated # noqa -from .logs import mock_logs, mock_logs_deprecated # noqa -from .opsworks import mock_opsworks, mock_opsworks_deprecated # noqa -from .organizations import mock_organizations # noqa -from .polly import mock_polly # noqa -from .rds import mock_rds, mock_rds_deprecated # noqa -from .rds2 import mock_rds2, mock_rds2_deprecated # noqa -from .redshift import mock_redshift, mock_redshift_deprecated # noqa -from .resourcegroups import mock_resourcegroups # noqa -from .resourcegroupstaggingapi import mock_resourcegroupstaggingapi # noqa -from .route53 import mock_route53, mock_route53_deprecated # noqa -from .s3 import mock_s3, mock_s3_deprecated # noqa -from .secretsmanager import mock_secretsmanager # noqa -from .ses import mock_ses, mock_ses_deprecated # noqa -from .sns import mock_sns, mock_sns_deprecated # noqa -from .sqs import mock_sqs, mock_sqs_deprecated # noqa -from .ssm import mock_ssm # noqa -from .stepfunctions import mock_stepfunctions # noqa -from .sts import mock_sts, mock_sts_deprecated # noqa -from .swf import mock_swf, mock_swf_deprecated # noqa -from .xray import XRaySegment, mock_xray, mock_xray_client # noqa +import importlib + + +def lazy_load(module_name, element): + def f(*args, **kwargs): + module = importlib.import_module(module_name, "moto") + return getattr(module, element)(*args, **kwargs) + + return f + + +mock_acm = lazy_load(".acm", "mock_acm") +mock_apigateway = lazy_load(".apigateway", "mock_apigateway") +mock_apigateway_deprecated = lazy_load(".apigateway", "mock_apigateway_deprecated") +mock_athena = lazy_load(".athena", "mock_athena") +mock_applicationautoscaling = lazy_load( + ".applicationautoscaling", "mock_applicationautoscaling" +) +mock_autoscaling = lazy_load(".autoscaling", "mock_autoscaling") +mock_autoscaling_deprecated = lazy_load(".autoscaling", "mock_autoscaling_deprecated") +mock_lambda = lazy_load(".awslambda", "mock_lambda") +mock_lambda_deprecated = lazy_load(".awslambda", "mock_lambda_deprecated") +mock_batch = lazy_load(".batch", "mock_batch") +mock_batch = lazy_load(".batch", "mock_batch") +mock_cloudformation = lazy_load(".cloudformation", "mock_cloudformation") +mock_cloudformation_deprecated = lazy_load( + ".cloudformation", "mock_cloudformation_deprecated" +) +mock_cloudwatch = lazy_load(".cloudwatch", "mock_cloudwatch") +mock_cloudwatch_deprecated = lazy_load(".cloudwatch", "mock_cloudwatch_deprecated") +mock_codecommit = lazy_load(".codecommit", "mock_codecommit") +mock_codepipeline = lazy_load(".codepipeline", "mock_codepipeline") +mock_cognitoidentity = lazy_load(".cognitoidentity", "mock_cognitoidentity") +mock_cognitoidentity_deprecated = lazy_load( + ".cognitoidentity", "mock_cognitoidentity_deprecated" +) +mock_cognitoidp = lazy_load(".cognitoidp", "mock_cognitoidp") +mock_cognitoidp_deprecated = lazy_load(".cognitoidp", "mock_cognitoidp_deprecated") +mock_config = lazy_load(".config", "mock_config") +mock_datapipeline = lazy_load(".datapipeline", "mock_datapipeline") +mock_datapipeline_deprecated = lazy_load( + ".datapipeline", "mock_datapipeline_deprecated" +) +mock_datasync = lazy_load(".datasync", "mock_datasync") +mock_dynamodb = lazy_load(".dynamodb", "mock_dynamodb") +mock_dynamodb_deprecated = lazy_load(".dynamodb", "mock_dynamodb_deprecated") +mock_dynamodb2 = lazy_load(".dynamodb2", "mock_dynamodb2") +mock_dynamodb2_deprecated = lazy_load(".dynamodb2", "mock_dynamodb2_deprecated") +mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams") +mock_elasticbeanstalk = lazy_load(".elasticbeanstalk", "mock_elasticbeanstalk") +mock_ec2 = lazy_load(".ec2", "mock_ec2") +mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated") +mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect") +mock_ecr = lazy_load(".ecr", "mock_ecr") +mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated") +mock_ecs = lazy_load(".ecs", "mock_ecs") +mock_ecs_deprecated = lazy_load(".ecs", "mock_ecs_deprecated") +mock_elb = lazy_load(".elb", "mock_elb") +mock_elb_deprecated = lazy_load(".elb", "mock_elb_deprecated") +mock_elbv2 = lazy_load(".elbv2", "mock_elbv2") +mock_emr = lazy_load(".emr", "mock_emr") +mock_emr_deprecated = lazy_load(".emr", "mock_emr_deprecated") +mock_events = lazy_load(".events", "mock_events") +mock_forecast = lazy_load(".forecast", "mock_forecast") +mock_glacier = lazy_load(".glacier", "mock_glacier") +mock_glacier_deprecated = lazy_load(".glacier", "mock_glacier_deprecated") +mock_glue = lazy_load(".glue", "mock_glue") +mock_iam = lazy_load(".iam", "mock_iam") +mock_iam_deprecated = lazy_load(".iam", "mock_iam_deprecated") +mock_iot = lazy_load(".iot", "mock_iot") +mock_iotdata = lazy_load(".iotdata", "mock_iotdata") +mock_kinesis = lazy_load(".kinesis", "mock_kinesis") +mock_kinesis_deprecated = lazy_load(".kinesis", "mock_kinesis_deprecated") +mock_kms = lazy_load(".kms", "mock_kms") +mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated") +mock_logs = lazy_load(".logs", "mock_logs") +mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated") +mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain") +mock_opsworks = lazy_load(".opsworks", "mock_opsworks") +mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated") +mock_organizations = lazy_load(".organizations", "mock_organizations") +mock_polly = lazy_load(".polly", "mock_polly") +mock_ram = lazy_load(".ram", "mock_ram") +mock_rds = lazy_load(".rds", "mock_rds") +mock_rds_deprecated = lazy_load(".rds", "mock_rds_deprecated") +mock_rds2 = lazy_load(".rds2", "mock_rds2") +mock_rds2_deprecated = lazy_load(".rds2", "mock_rds2_deprecated") +mock_redshift = lazy_load(".redshift", "mock_redshift") +mock_redshift_deprecated = lazy_load(".redshift", "mock_redshift_deprecated") +mock_resourcegroups = lazy_load(".resourcegroups", "mock_resourcegroups") +mock_resourcegroupstaggingapi = lazy_load( + ".resourcegroupstaggingapi", "mock_resourcegroupstaggingapi" +) +mock_route53 = lazy_load(".route53", "mock_route53") +mock_route53_deprecated = lazy_load(".route53", "mock_route53_deprecated") +mock_s3 = lazy_load(".s3", "mock_s3") +mock_s3_deprecated = lazy_load(".s3", "mock_s3_deprecated") +mock_sagemaker = lazy_load(".sagemaker", "mock_sagemaker") +mock_secretsmanager = lazy_load(".secretsmanager", "mock_secretsmanager") +mock_ses = lazy_load(".ses", "mock_ses") +mock_ses_deprecated = lazy_load(".ses", "mock_ses_deprecated") +mock_sns = lazy_load(".sns", "mock_sns") +mock_sns_deprecated = lazy_load(".sns", "mock_sns_deprecated") +mock_sqs = lazy_load(".sqs", "mock_sqs") +mock_sqs_deprecated = lazy_load(".sqs", "mock_sqs_deprecated") +mock_ssm = lazy_load(".ssm", "mock_ssm") +mock_stepfunctions = lazy_load(".stepfunctions", "mock_stepfunctions") +mock_sts = lazy_load(".sts", "mock_sts") +mock_sts_deprecated = lazy_load(".sts", "mock_sts_deprecated") +mock_swf = lazy_load(".swf", "mock_swf") +mock_swf_deprecated = lazy_load(".swf", "mock_swf_deprecated") +mock_transcribe = lazy_load(".transcribe", "mock_transcribe") +XRaySegment = lazy_load(".xray", "XRaySegment") +mock_xray = lazy_load(".xray", "mock_xray") +mock_xray_client = lazy_load(".xray", "mock_xray_client") +mock_kinesisvideo = lazy_load(".kinesisvideo", "mock_kinesisvideo") +mock_kinesisvideoarchivedmedia = lazy_load( + ".kinesisvideoarchivedmedia", "mock_kinesisvideoarchivedmedia" +) # import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = "moto" -__version__ = "1.3.15.dev" +__version__ = "1.3.16.dev" try: diff --git a/moto/acm/models.py b/moto/acm/models.py index 3df541982..3963b88c2 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -1,9 +1,9 @@ from __future__ import unicode_literals import re -import json import datetime from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import AWSError from moto.ec2 import ec2_backends from .utils import make_arn_for_certificate @@ -50,18 +50,6 @@ def datetime_to_epoch(date): return int((date - datetime.datetime(1970, 1, 1)).total_seconds()) -class AWSError(Exception): - TYPE = None - STATUS = 400 - - def __init__(self, message): - self.message = message - - def response(self): - resp = {"__type": self.TYPE, "message": self.message} - return json.dumps(resp), dict(status=self.STATUS) - - class AWSValidationException(AWSError): TYPE = "ValidationException" @@ -70,6 +58,68 @@ class AWSResourceNotFoundException(AWSError): TYPE = "ResourceNotFoundException" +class AWSTooManyTagsException(AWSError): + TYPE = "TooManyTagsException" + + +class TagHolder(dict): + MAX_TAG_COUNT = 50 + MAX_KEY_LENGTH = 128 + MAX_VALUE_LENGTH = 256 + + def _validate_kv(self, key, value, index): + if len(key) > self.MAX_KEY_LENGTH: + raise AWSValidationException( + "Value '%s' at 'tags.%d.member.key' failed to satisfy constraint: Member must have length less than or equal to %s" + % (key, index, self.MAX_KEY_LENGTH) + ) + if value and len(value) > self.MAX_VALUE_LENGTH: + raise AWSValidationException( + "Value '%s' at 'tags.%d.member.value' failed to satisfy constraint: Member must have length less than or equal to %s" + % (value, index, self.MAX_VALUE_LENGTH) + ) + if key.startswith("aws:"): + raise AWSValidationException( + 'Invalid Tag Key: "%s". AWS internal tags cannot be changed with this API' + % key + ) + + def add(self, tags): + tags_copy = self.copy() + for i, tag in enumerate(tags): + key = tag["Key"] + value = tag.get("Value", None) + self._validate_kv(key, value, i + 1) + + tags_copy[key] = value + if len(tags_copy) > self.MAX_TAG_COUNT: + raise AWSTooManyTagsException( + "the TagSet: '{%s}' contains too many Tags" + % ", ".join(k + "=" + str(v or "") for k, v in tags_copy.items()) + ) + + self.update(tags_copy) + + def remove(self, tags): + for i, tag in enumerate(tags): + key = tag["Key"] + value = tag.get("Value", None) + self._validate_kv(key, value, i + 1) + try: + # If value isnt provided, just delete key + if value is None: + del self[key] + # If value is provided, only delete if it matches what already exists + elif self[key] == value: + del self[key] + except KeyError: + pass + + def equals(self, tags): + tags = {t["Key"]: t.get("Value", None) for t in tags} if tags else {} + return self == tags + + class CertBundle(BaseModel): def __init__( self, @@ -88,7 +138,7 @@ class CertBundle(BaseModel): self.key = private_key self._key = None self.chain = chain - self.tags = {} + self.tags = TagHolder() self._chain = None self.type = cert_type # Should really be an enum self.status = cert_status # Should really be an enum @@ -293,9 +343,12 @@ class CertBundle(BaseModel): key_algo = "EC_prime256v1" # Look for SANs - san_obj = self._cert.extensions.get_extension_for_oid( - cryptography.x509.OID_SUBJECT_ALTERNATIVE_NAME - ) + try: + san_obj = self._cert.extensions.get_extension_for_oid( + cryptography.x509.OID_SUBJECT_ALTERNATIVE_NAME + ) + except cryptography.x509.ExtensionNotFound: + san_obj = None sans = [] if san_obj is not None: sans = [item.value for item in san_obj.value] @@ -385,7 +438,7 @@ class AWSCertificateManagerBackend(BaseBackend): "expires": datetime.datetime.now() + datetime.timedelta(hours=1), } - def import_cert(self, certificate, private_key, chain=None, arn=None): + def import_cert(self, certificate, private_key, chain=None, arn=None, tags=None): if arn is not None: if arn not in self._certificates: raise self._arn_not_found(arn) @@ -400,6 +453,9 @@ class AWSCertificateManagerBackend(BaseBackend): self._certificates[bundle.arn] = bundle + if tags: + self.add_tags_to_certificate(bundle.arn, tags) + return bundle.arn def get_certificates_list(self, statuses): @@ -434,10 +490,11 @@ class AWSCertificateManagerBackend(BaseBackend): domain_validation_options, idempotency_token, subject_alt_names, + tags=None, ): if idempotency_token is not None: arn = self._get_arn_from_idempotency_token(idempotency_token) - if arn is not None: + if arn and self._certificates[arn].tags.equals(tags): return arn cert = CertBundle.generate_cert( @@ -447,34 +504,20 @@ class AWSCertificateManagerBackend(BaseBackend): self._set_idempotency_token_arn(idempotency_token, cert.arn) self._certificates[cert.arn] = cert + if tags: + cert.tags.add(tags) + return cert.arn def add_tags_to_certificate(self, arn, tags): # get_cert does arn check cert_bundle = self.get_certificate(arn) - - for tag in tags: - key = tag["Key"] - value = tag.get("Value", None) - cert_bundle.tags[key] = value + cert_bundle.tags.add(tags) def remove_tags_from_certificate(self, arn, tags): # get_cert does arn check cert_bundle = self.get_certificate(arn) - - for tag in tags: - key = tag["Key"] - value = tag.get("Value", None) - - try: - # If value isnt provided, just delete key - if value is None: - del cert_bundle.tags[key] - # If value is provided, only delete if it matches what already exists - elif cert_bundle.tags[key] == value: - del cert_bundle.tags[key] - except KeyError: - pass + cert_bundle.tags.remove(tags) acm_backends = {} diff --git a/moto/acm/responses.py b/moto/acm/responses.py index 13b22fa95..0908c6ff7 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -117,6 +117,7 @@ class AWSCertificateManagerResponse(BaseResponse): private_key = self._get_param("PrivateKey") chain = self._get_param("CertificateChain") # Optional current_arn = self._get_param("CertificateArn") # Optional + tags = self._get_param("Tags") # Optional # Simple parameter decoding. Rather do it here as its a data transport decision not part of the # actual data @@ -142,7 +143,7 @@ class AWSCertificateManagerResponse(BaseResponse): try: arn = self.acm_backend.import_cert( - certificate, private_key, chain=chain, arn=current_arn + certificate, private_key, chain=chain, arn=current_arn, tags=tags ) except AWSError as err: return err.response() @@ -210,6 +211,7 @@ class AWSCertificateManagerResponse(BaseResponse): ) # is ignored atm idempotency_token = self._get_param("IdempotencyToken") subject_alt_names = self._get_param("SubjectAlternativeNames") + tags = self._get_param("Tags") # Optional if subject_alt_names is not None and len(subject_alt_names) > 10: # There is initial AWS limit of 10 @@ -227,6 +229,7 @@ class AWSCertificateManagerResponse(BaseResponse): domain_validation_options, idempotency_token, subject_alt_names, + tags, ) except AWSError as err: return err.response() diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index 2a306ab99..4d3475d0e 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -85,6 +85,15 @@ class NoMethodDefined(BadRequestException): ) +class AuthorizerNotFoundException(RESTError): + code = 404 + + def __init__(self): + super(AuthorizerNotFoundException, self).__init__( + "NotFoundException", "Invalid Authorizer identifier specified" + ) + + class StageNotFoundException(RESTError): code = 404 @@ -103,6 +112,15 @@ class ApiKeyNotFoundException(RESTError): ) +class UsagePlanNotFoundException(RESTError): + code = 404 + + def __init__(self): + super(UsagePlanNotFoundException, self).__init__( + "NotFoundException", "Invalid Usage Plan ID specified" + ) + + class ApiKeyAlreadyExists(RESTError): code = 409 @@ -110,3 +128,57 @@ class ApiKeyAlreadyExists(RESTError): super(ApiKeyAlreadyExists, self).__init__( "ConflictException", "API Key already exists" ) + + +class InvalidDomainName(BadRequestException): + code = 404 + + def __init__(self): + super(InvalidDomainName, self).__init__( + "BadRequestException", "No Domain Name specified" + ) + + +class DomainNameNotFound(RESTError): + code = 404 + + def __init__(self): + super(DomainNameNotFound, self).__init__( + "NotFoundException", "Invalid Domain Name specified" + ) + + +class InvalidRestApiId(BadRequestException): + code = 404 + + def __init__(self): + super(InvalidRestApiId, self).__init__( + "BadRequestException", "No Rest API Id specified" + ) + + +class InvalidModelName(BadRequestException): + code = 404 + + def __init__(self): + super(InvalidModelName, self).__init__( + "BadRequestException", "No Model Name specified" + ) + + +class RestAPINotFound(RESTError): + code = 404 + + def __init__(self): + super(RestAPINotFound, self).__init__( + "NotFoundException", "Invalid Rest API Id specified" + ) + + +class ModelNotFound(RESTError): + code = 404 + + def __init__(self): + super(ModelNotFound, self).__init__( + "NotFoundException", "Invalid Model Name specified" + ) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index ae7bdfac3..4a44404a2 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -14,12 +14,12 @@ try: except ImportError: from urllib.parse import urlparse import responses -from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from .utils import create_id from moto.core.utils import path_url -from moto.sts.models import ACCOUNT_ID from .exceptions import ( ApiKeyNotFoundException, + UsagePlanNotFoundException, AwsProxyNotAllowed, CrossAccountNotAllowed, IntegrationMethodNotDefined, @@ -28,11 +28,18 @@ from .exceptions import ( InvalidHttpEndpoint, InvalidResourcePathException, InvalidRequestInput, + AuthorizerNotFoundException, StageNotFoundException, RoleNotSpecified, NoIntegrationDefined, NoMethodDefined, ApiKeyAlreadyExists, + DomainNameNotFound, + InvalidDomainName, + InvalidRestApiId, + InvalidModelName, + RestAPINotFound, + ModelNotFound, ) STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" @@ -48,11 +55,21 @@ class Deployment(BaseModel, dict): class IntegrationResponse(BaseModel, dict): - def __init__(self, status_code, selection_pattern=None): - self["responseTemplates"] = {"application/json": None} + def __init__( + self, + status_code, + selection_pattern=None, + response_templates=None, + content_handling=None, + ): + if response_templates is None: + response_templates = {"application/json": None} + self["responseTemplates"] = response_templates self["statusCode"] = status_code if selection_pattern: self["selectionPattern"] = selection_pattern + if content_handling: + self["contentHandling"] = content_handling class Integration(BaseModel, dict): @@ -64,8 +81,14 @@ class Integration(BaseModel, dict): self["requestTemplates"] = request_templates self["integrationResponses"] = {"200": IntegrationResponse(200)} - def create_integration_response(self, status_code, selection_pattern): - integration_response = IntegrationResponse(status_code, selection_pattern) + def create_integration_response( + self, status_code, selection_pattern, response_templates, content_handling + ): + if response_templates == {}: + response_templates = None + integration_response = IntegrationResponse( + status_code, selection_pattern, response_templates, content_handling + ) self["integrationResponses"][status_code] = integration_response return integration_response @@ -83,14 +106,14 @@ class MethodResponse(BaseModel, dict): class Method(BaseModel, dict): - def __init__(self, method_type, authorization_type): + def __init__(self, method_type, authorization_type, **kwargs): super(Method, self).__init__() self.update( dict( httpMethod=method_type, authorizationType=authorization_type, authorizerId=None, - apiKeyRequired=None, + apiKeyRequired=kwargs.get("api_key_required") or False, requestParameters=None, requestModels=None, methodIntegration=None, @@ -117,14 +140,15 @@ class Resource(BaseModel): self.api_id = api_id self.path_part = path_part self.parent_id = parent_id - self.resource_methods = {"GET": {}} + self.resource_methods = {} def to_dict(self): response = { "path": self.get_path(), "id": self.id, - "resourceMethods": self.resource_methods, } + if self.resource_methods: + response["resourceMethods"] = self.resource_methods if self.parent_id: response["parentId"] = self.parent_id response["pathPart"] = self.path_part @@ -158,8 +182,12 @@ class Resource(BaseModel): ) return response.status_code, response.text - def add_method(self, method_type, authorization_type): - method = Method(method_type=method_type, authorization_type=authorization_type) + def add_method(self, method_type, authorization_type, api_key_required): + method = Method( + method_type=method_type, + authorization_type=authorization_type, + api_key_required=api_key_required, + ) self.resource_methods[method_type] = method return method @@ -182,6 +210,54 @@ class Resource(BaseModel): return self.resource_methods[method_type].pop("methodIntegration") +class Authorizer(BaseModel, dict): + def __init__(self, id, name, authorizer_type, **kwargs): + super(Authorizer, self).__init__() + self["id"] = id + self["name"] = name + self["type"] = authorizer_type + if kwargs.get("provider_arns"): + self["providerARNs"] = kwargs.get("provider_arns") + if kwargs.get("auth_type"): + self["authType"] = kwargs.get("auth_type") + if kwargs.get("authorizer_uri"): + self["authorizerUri"] = kwargs.get("authorizer_uri") + if kwargs.get("authorizer_credentials"): + self["authorizerCredentials"] = kwargs.get("authorizer_credentials") + if kwargs.get("identity_source"): + self["identitySource"] = kwargs.get("identity_source") + if kwargs.get("identity_validation_expression"): + self["identityValidationExpression"] = kwargs.get( + "identity_validation_expression" + ) + self["authorizerResultTtlInSeconds"] = kwargs.get("authorizer_result_ttl") + + def apply_operations(self, patch_operations): + for op in patch_operations: + if "/authorizerUri" in op["path"]: + self["authorizerUri"] = op["value"] + elif "/authorizerCredentials" in op["path"]: + self["authorizerCredentials"] = op["value"] + elif "/authorizerResultTtlInSeconds" in op["path"]: + self["authorizerResultTtlInSeconds"] = int(op["value"]) + elif "/authType" in op["path"]: + self["authType"] = op["value"] + elif "/identitySource" in op["path"]: + self["identitySource"] = op["value"] + elif "/identityValidationExpression" in op["path"]: + self["identityValidationExpression"] = op["value"] + elif "/name" in op["path"]: + self["name"] = op["value"] + elif "/providerARNs" in op["path"]: + # TODO: add and remove + raise Exception('Patch operation for "%s" not implemented' % op["path"]) + elif "/type" in op["path"]: + self["type"] = op["value"] + else: + raise Exception('Patch operation "%s" not implemented' % op["op"]) + return self + + class Stage(BaseModel, dict): def __init__( self, @@ -323,10 +399,10 @@ class ApiKey(BaseModel, dict): self, name=None, description=None, - enabled=True, + enabled=False, generateDistinctId=False, value=None, - stageKeys=None, + stageKeys=[], tags=None, customerId=None, ): @@ -401,15 +477,17 @@ class RestAPI(BaseModel): self.description = description self.create_date = int(time.time()) self.api_key_source = kwargs.get("api_key_source") or "HEADER" + self.policy = kwargs.get("policy") or None self.endpoint_configuration = kwargs.get("endpoint_configuration") or { "types": ["EDGE"] } self.tags = kwargs.get("tags") or {} self.deployments = {} + self.authorizers = {} self.stages = {} - self.resources = {} + self.models = {} self.add_child("/") # Add default child def __repr__(self): @@ -424,6 +502,7 @@ class RestAPI(BaseModel): "apiKeySource": self.api_key_source, "endpointConfiguration": self.endpoint_configuration, "tags": self.tags, + "policy": self.policy, } def add_child(self, path, parent_id=None): @@ -438,6 +517,29 @@ class RestAPI(BaseModel): self.resources[child_id] = child return child + def add_model( + self, + name, + description=None, + schema=None, + content_type=None, + cli_input_json=None, + generate_cli_skeleton=None, + ): + model_id = create_id() + new_model = Model( + id=model_id, + name=name, + description=description, + schema=schema, + content_type=content_type, + cli_input_json=cli_input_json, + generate_cli_skeleton=generate_cli_skeleton, + ) + + self.models[name] = new_model + return new_model + def get_resource_for_path(self, path_after_stage_name): for resource in self.resources.values(): if resource.get_path() == path_after_stage_name: @@ -474,6 +576,34 @@ class RestAPI(BaseModel): ), ) + def create_authorizer( + self, + id, + name, + authorizer_type, + provider_arns=None, + auth_type=None, + authorizer_uri=None, + authorizer_credentials=None, + identity_source=None, + identiy_validation_expression=None, + authorizer_result_ttl=None, + ): + authorizer = Authorizer( + id=id, + name=name, + authorizer_type=authorizer_type, + provider_arns=provider_arns, + auth_type=auth_type, + authorizer_uri=authorizer_uri, + authorizer_credentials=authorizer_credentials, + identity_source=identity_source, + identiy_validation_expression=identiy_validation_expression, + authorizer_result_ttl=authorizer_result_ttl, + ) + self.authorizers[id] = authorizer + return authorizer + def create_stage( self, name, @@ -513,6 +643,9 @@ class RestAPI(BaseModel): def get_deployment(self, deployment_id): return self.deployments[deployment_id] + def get_authorizers(self): + return list(self.authorizers.values()) + def get_stages(self): return list(self.stages.values()) @@ -523,6 +656,58 @@ class RestAPI(BaseModel): return self.deployments.pop(deployment_id) +class DomainName(BaseModel, dict): + def __init__(self, domain_name, **kwargs): + super(DomainName, self).__init__() + self["domainName"] = domain_name + self["regionalDomainName"] = domain_name + self["distributionDomainName"] = domain_name + self["domainNameStatus"] = "AVAILABLE" + self["domainNameStatusMessage"] = "Domain Name Available" + self["regionalHostedZoneId"] = "Z2FDTNDATAQYW2" + self["distributionHostedZoneId"] = "Z2FDTNDATAQYW2" + self["certificateUploadDate"] = int(time.time()) + if kwargs.get("certificate_name"): + self["certificateName"] = kwargs.get("certificate_name") + if kwargs.get("certificate_arn"): + self["certificateArn"] = kwargs.get("certificate_arn") + if kwargs.get("certificate_body"): + self["certificateBody"] = kwargs.get("certificate_body") + if kwargs.get("tags"): + self["tags"] = kwargs.get("tags") + if kwargs.get("security_policy"): + self["securityPolicy"] = kwargs.get("security_policy") + if kwargs.get("certificate_chain"): + self["certificateChain"] = kwargs.get("certificate_chain") + if kwargs.get("regional_certificate_name"): + self["regionalCertificateName"] = kwargs.get("regional_certificate_name") + if kwargs.get("certificate_private_key"): + self["certificatePrivateKey"] = kwargs.get("certificate_private_key") + if kwargs.get("regional_certificate_arn"): + self["regionalCertificateArn"] = kwargs.get("regional_certificate_arn") + if kwargs.get("endpoint_configuration"): + self["endpointConfiguration"] = kwargs.get("endpoint_configuration") + if kwargs.get("generate_cli_skeleton"): + self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton") + + +class Model(BaseModel, dict): + def __init__(self, id, name, **kwargs): + super(Model, self).__init__() + self["id"] = id + self["name"] = name + if kwargs.get("description"): + self["description"] = kwargs.get("description") + if kwargs.get("schema"): + self["schema"] = kwargs.get("schema") + if kwargs.get("content_type"): + self["contentType"] = kwargs.get("content_type") + if kwargs.get("cli_input_json"): + self["cliInputJson"] = kwargs.get("cli_input_json") + if kwargs.get("generate_cli_skeleton"): + self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton") + + class APIGatewayBackend(BaseBackend): def __init__(self, region_name): super(APIGatewayBackend, self).__init__() @@ -530,6 +715,8 @@ class APIGatewayBackend(BaseBackend): self.keys = {} self.usage_plans = {} self.usage_plan_keys = {} + self.domain_names = {} + self.models = {} self.region_name = region_name def reset(self): @@ -544,6 +731,7 @@ class APIGatewayBackend(BaseBackend): api_key_source=None, endpoint_configuration=None, tags=None, + policy=None, ): api_id = create_id() rest_api = RestAPI( @@ -554,12 +742,15 @@ class APIGatewayBackend(BaseBackend): api_key_source=api_key_source, endpoint_configuration=endpoint_configuration, tags=tags, + policy=policy, ) self.apis[api_id] = rest_api return rest_api def get_rest_api(self, function_id): - rest_api = self.apis[function_id] + rest_api = self.apis.get(function_id) + if rest_api is None: + raise RestAPINotFound() return rest_api def list_apis(self): @@ -594,11 +785,60 @@ class APIGatewayBackend(BaseBackend): resource = self.get_resource(function_id, resource_id) return resource.get_method(method_type) - def create_method(self, function_id, resource_id, method_type, authorization_type): + def create_method( + self, + function_id, + resource_id, + method_type, + authorization_type, + api_key_required=None, + ): resource = self.get_resource(function_id, resource_id) - method = resource.add_method(method_type, authorization_type) + method = resource.add_method( + method_type, authorization_type, api_key_required=api_key_required + ) return method + def get_authorizer(self, restapi_id, authorizer_id): + api = self.get_rest_api(restapi_id) + authorizer = api.authorizers.get(authorizer_id) + if authorizer is None: + raise AuthorizerNotFoundException() + else: + return authorizer + + def get_authorizers(self, restapi_id): + api = self.get_rest_api(restapi_id) + return api.get_authorizers() + + def create_authorizer(self, restapi_id, name, authorizer_type, **kwargs): + api = self.get_rest_api(restapi_id) + authorizer_id = create_id() + authorizer = api.create_authorizer( + authorizer_id, + name, + authorizer_type, + provider_arns=kwargs.get("provider_arns"), + auth_type=kwargs.get("auth_type"), + authorizer_uri=kwargs.get("authorizer_uri"), + authorizer_credentials=kwargs.get("authorizer_credentials"), + identity_source=kwargs.get("identity_source"), + identiy_validation_expression=kwargs.get("identiy_validation_expression"), + authorizer_result_ttl=kwargs.get("authorizer_result_ttl"), + ) + return api.authorizers.get(authorizer["id"]) + + def update_authorizer(self, restapi_id, authorizer_id, patch_operations): + authorizer = self.get_authorizer(restapi_id, authorizer_id) + if not authorizer: + api = self.get_rest_api(restapi_id) + authorizer = api.authorizers[authorizer_id] = Authorizer() + return authorizer.apply_operations(patch_operations) + + def delete_authorizer(self, restapi_id, authorizer_id): + api = self.get_rest_api(restapi_id) + del api.authorizers[authorizer_id] + def get_stage(self, function_id, stage_name): api = self.get_rest_api(function_id) stage = api.stages.get(stage_name) @@ -726,12 +966,13 @@ class APIGatewayBackend(BaseBackend): status_code, selection_pattern, response_templates, + content_handling, ): if response_templates is None: raise InvalidRequestInput() integration = self.get_integration(function_id, resource_id, method_type) integration_response = integration.create_integration_response( - status_code, selection_pattern + status_code, selection_pattern, response_templates, content_handling ) return integration_response @@ -821,6 +1062,9 @@ class APIGatewayBackend(BaseBackend): return plans def get_usage_plan(self, usage_plan_id): + if usage_plan_id not in self.usage_plans: + raise UsagePlanNotFoundException() + return self.usage_plans[usage_plan_id] def delete_usage_plan(self, usage_plan_id): @@ -853,6 +1097,17 @@ class APIGatewayBackend(BaseBackend): return list(self.usage_plan_keys[usage_plan_id].values()) def get_usage_plan_key(self, usage_plan_id, key_id): + # first check if is a valid api key + if key_id not in self.keys: + raise ApiKeyNotFoundException() + + # then check if is a valid api key and that the key is in the plan + if ( + usage_plan_id not in self.usage_plan_keys + or key_id not in self.usage_plan_keys[usage_plan_id] + ): + raise UsagePlanNotFoundException() + return self.usage_plan_keys[usage_plan_id][key_id] def delete_usage_plan_key(self, usage_plan_id, key_id): @@ -866,6 +1121,98 @@ class APIGatewayBackend(BaseBackend): except Exception: return False + def create_domain_name( + self, + domain_name, + certificate_name=None, + tags=None, + certificate_arn=None, + certificate_body=None, + certificate_private_key=None, + certificate_chain=None, + regional_certificate_name=None, + regional_certificate_arn=None, + endpoint_configuration=None, + security_policy=None, + generate_cli_skeleton=None, + ): + + if not domain_name: + raise InvalidDomainName() + + new_domain_name = DomainName( + domain_name=domain_name, + certificate_name=certificate_name, + certificate_private_key=certificate_private_key, + certificate_arn=certificate_arn, + certificate_body=certificate_body, + certificate_chain=certificate_chain, + regional_certificate_name=regional_certificate_name, + regional_certificate_arn=regional_certificate_arn, + endpoint_configuration=endpoint_configuration, + tags=tags, + security_policy=security_policy, + generate_cli_skeleton=generate_cli_skeleton, + ) + + self.domain_names[domain_name] = new_domain_name + return new_domain_name + + def get_domain_names(self): + return list(self.domain_names.values()) + + def get_domain_name(self, domain_name): + domain_info = self.domain_names.get(domain_name) + if domain_info is None: + raise DomainNameNotFound + else: + return self.domain_names[domain_name] + + def create_model( + self, + rest_api_id, + name, + content_type, + description=None, + schema=None, + cli_input_json=None, + generate_cli_skeleton=None, + ): + + if not rest_api_id: + raise InvalidRestApiId + if not name: + raise InvalidModelName + + api = self.get_rest_api(rest_api_id) + new_model = api.add_model( + name=name, + description=description, + schema=schema, + content_type=content_type, + cli_input_json=cli_input_json, + generate_cli_skeleton=generate_cli_skeleton, + ) + + return new_model + + def get_models(self, rest_api_id): + if not rest_api_id: + raise InvalidRestApiId + api = self.get_rest_api(rest_api_id) + models = api.models.values() + return list(models) + + def get_model(self, rest_api_id, model_name): + if not rest_api_id: + raise InvalidRestApiId + api = self.get_rest_api(rest_api_id) + model = api.models.get(model_name) + if model is None: + raise ModelNotFound + else: + return model + apigateway_backends = {} for region_name in Session().get_available_regions("apigateway"): diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index e10d670c5..0454ae58e 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -6,13 +6,22 @@ from moto.core.responses import BaseResponse from .models import apigateway_backends from .exceptions import ( ApiKeyNotFoundException, + UsagePlanNotFoundException, BadRequestException, CrossAccountNotAllowed, + AuthorizerNotFoundException, StageNotFoundException, ApiKeyAlreadyExists, + DomainNameNotFound, + InvalidDomainName, + InvalidRestApiId, + InvalidModelName, + RestAPINotFound, + ModelNotFound, ) API_KEY_SOURCES = ["AUTHORIZER", "HEADER"] +AUTHORIZER_TYPES = ["TOKEN", "REQUEST", "COGNITO_USER_POOLS"] ENDPOINT_CONFIGURATION_TYPES = ["PRIVATE", "EDGE", "REGIONAL"] @@ -51,6 +60,7 @@ class APIGatewayResponse(BaseResponse): api_key_source = self._get_param("apiKeySource") endpoint_configuration = self._get_param("endpointConfiguration") tags = self._get_param("tags") + policy = self._get_param("policy") # Param validation if api_key_source and api_key_source not in API_KEY_SOURCES: @@ -86,6 +96,7 @@ class APIGatewayResponse(BaseResponse): api_key_source=api_key_source, endpoint_configuration=endpoint_configuration, tags=tags, + policy=policy, ) return 200, {}, json.dumps(rest_api.to_dict()) @@ -145,8 +156,13 @@ class APIGatewayResponse(BaseResponse): return 200, {}, json.dumps(method) elif self.method == "PUT": authorization_type = self._get_param("authorizationType") + api_key_required = self._get_param("apiKeyRequired") method = self.backend.create_method( - function_id, resource_id, method_type, authorization_type + function_id, + resource_id, + method_type, + authorization_type, + api_key_required, ) return 200, {}, json.dumps(method) @@ -172,6 +188,88 @@ class APIGatewayResponse(BaseResponse): ) return 200, {}, json.dumps(method_response) + def restapis_authorizers(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + url_path_parts = self.path.split("/") + restapi_id = url_path_parts[2] + + if self.method == "POST": + name = self._get_param("name") + authorizer_type = self._get_param("type") + + provider_arns = self._get_param_with_default_value("providerARNs", None) + auth_type = self._get_param_with_default_value("authType", None) + authorizer_uri = self._get_param_with_default_value("authorizerUri", None) + authorizer_credentials = self._get_param_with_default_value( + "authorizerCredentials", None + ) + identity_source = self._get_param_with_default_value("identitySource", None) + identiy_validation_expression = self._get_param_with_default_value( + "identityValidationExpression", None + ) + authorizer_result_ttl = self._get_param_with_default_value( + "authorizerResultTtlInSeconds", 300 + ) + + # Param validation + if authorizer_type and authorizer_type not in AUTHORIZER_TYPES: + return self.error( + "ValidationException", + ( + "1 validation error detected: " + "Value '{authorizer_type}' at 'createAuthorizerInput.type' failed " + "to satisfy constraint: Member must satisfy enum value set: " + "[TOKEN, REQUEST, COGNITO_USER_POOLS]" + ).format(authorizer_type=authorizer_type), + ) + + authorizer_response = self.backend.create_authorizer( + restapi_id, + name, + authorizer_type, + provider_arns=provider_arns, + auth_type=auth_type, + authorizer_uri=authorizer_uri, + authorizer_credentials=authorizer_credentials, + identity_source=identity_source, + identiy_validation_expression=identiy_validation_expression, + authorizer_result_ttl=authorizer_result_ttl, + ) + elif self.method == "GET": + authorizers = self.backend.get_authorizers(restapi_id) + return 200, {}, json.dumps({"item": authorizers}) + + return 200, {}, json.dumps(authorizer_response) + + def authorizers(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + url_path_parts = self.path.split("/") + restapi_id = url_path_parts[2] + authorizer_id = url_path_parts[4] + + if self.method == "GET": + try: + authorizer_response = self.backend.get_authorizer( + restapi_id, authorizer_id + ) + except AuthorizerNotFoundException as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) + elif self.method == "PATCH": + patch_operations = self._get_param("patchOperations") + authorizer_response = self.backend.update_authorizer( + restapi_id, authorizer_id, patch_operations + ) + elif self.method == "DELETE": + self.backend.delete_authorizer(restapi_id, authorizer_id) + return 202, {}, "{}" + return 200, {}, json.dumps(authorizer_response) + def restapis_stages(self, request, full_url, headers): self.setup_class(request, full_url, headers) url_path_parts = self.path.split("/") @@ -289,6 +387,7 @@ class APIGatewayResponse(BaseResponse): elif self.method == "PUT": selection_pattern = self._get_param("selectionPattern") response_templates = self._get_param("responseTemplates") + content_handling = self._get_param("contentHandling") integration_response = self.backend.create_integration_response( function_id, resource_id, @@ -296,6 +395,7 @@ class APIGatewayResponse(BaseResponse): status_code, selection_pattern, response_templates, + content_handling, ) elif self.method == "DELETE": integration_response = self.backend.delete_integration_response( @@ -349,16 +449,15 @@ class APIGatewayResponse(BaseResponse): except ApiKeyAlreadyExists as error: return ( error.code, - self.headers, + {}, '{{"message":"{0}","code":"{1}"}}'.format( error.message, error.error_type ), ) - + return 201, {}, json.dumps(apikey_response) elif self.method == "GET": apikeys_response = self.backend.get_apikeys() return 200, {}, json.dumps({"item": apikeys_response}) - return 200, {}, json.dumps(apikey_response) def apikey_individual(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -366,6 +465,7 @@ class APIGatewayResponse(BaseResponse): url_path_parts = self.path.split("/") apikey = url_path_parts[2] + status_code = 200 if self.method == "GET": apikey_response = self.backend.get_apikey(apikey) elif self.method == "PATCH": @@ -373,7 +473,9 @@ class APIGatewayResponse(BaseResponse): apikey_response = self.backend.update_apikey(apikey, patch_operations) elif self.method == "DELETE": apikey_response = self.backend.delete_apikey(apikey) - return 200, {}, json.dumps(apikey_response) + status_code = 202 + + return status_code, {}, json.dumps(apikey_response) def usage_plans(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -393,7 +495,16 @@ class APIGatewayResponse(BaseResponse): usage_plan = url_path_parts[2] if self.method == "GET": - usage_plan_response = self.backend.get_usage_plan(usage_plan) + try: + usage_plan_response = self.backend.get_usage_plan(usage_plan) + except (UsagePlanNotFoundException) as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) elif self.method == "DELETE": usage_plan_response = self.backend.delete_usage_plan(usage_plan) return 200, {}, json.dumps(usage_plan_response) @@ -417,13 +528,11 @@ class APIGatewayResponse(BaseResponse): error.message, error.error_type ), ) - + return 201, {}, json.dumps(usage_plan_response) elif self.method == "GET": usage_plans_response = self.backend.get_usage_plan_keys(usage_plan_id) return 200, {}, json.dumps({"item": usage_plans_response}) - return 200, {}, json.dumps(usage_plan_response) - def usage_plan_key_individual(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -432,9 +541,147 @@ class APIGatewayResponse(BaseResponse): key_id = url_path_parts[4] if self.method == "GET": - usage_plan_response = self.backend.get_usage_plan_key(usage_plan_id, key_id) + try: + usage_plan_response = self.backend.get_usage_plan_key( + usage_plan_id, key_id + ) + except (UsagePlanNotFoundException, ApiKeyNotFoundException) as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) elif self.method == "DELETE": usage_plan_response = self.backend.delete_usage_plan_key( usage_plan_id, key_id ) return 200, {}, json.dumps(usage_plan_response) + + def domain_names(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + try: + if self.method == "GET": + domain_names = self.backend.get_domain_names() + return 200, {}, json.dumps({"item": domain_names}) + + elif self.method == "POST": + domain_name = self._get_param("domainName") + certificate_name = self._get_param("certificateName") + tags = self._get_param("tags") + certificate_arn = self._get_param("certificateArn") + certificate_body = self._get_param("certificateBody") + certificate_private_key = self._get_param("certificatePrivateKey") + certificate_chain = self._get_param("certificateChain") + regional_certificate_name = self._get_param("regionalCertificateName") + regional_certificate_arn = self._get_param("regionalCertificateArn") + endpoint_configuration = self._get_param("endpointConfiguration") + security_policy = self._get_param("securityPolicy") + generate_cli_skeleton = self._get_param("generateCliSkeleton") + domain_name_resp = self.backend.create_domain_name( + domain_name, + certificate_name, + tags, + certificate_arn, + certificate_body, + certificate_private_key, + certificate_chain, + regional_certificate_name, + regional_certificate_arn, + endpoint_configuration, + security_policy, + generate_cli_skeleton, + ) + return 200, {}, json.dumps(domain_name_resp) + + except InvalidDomainName as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) + + def domain_name_induvidual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + url_path_parts = self.path.split("/") + domain_name = url_path_parts[2] + domain_names = {} + try: + if self.method == "GET": + if domain_name is not None: + domain_names = self.backend.get_domain_name(domain_name) + return 200, {}, json.dumps(domain_names) + except DomainNameNotFound as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) + + def models(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + rest_api_id = self.path.replace("/restapis/", "", 1).split("/")[0] + + try: + if self.method == "GET": + models = self.backend.get_models(rest_api_id) + return 200, {}, json.dumps({"item": models}) + + elif self.method == "POST": + name = self._get_param("name") + description = self._get_param("description") + schema = self._get_param("schema") + content_type = self._get_param("contentType") + cli_input_json = self._get_param("cliInputJson") + generate_cli_skeleton = self._get_param("generateCliSkeleton") + model = self.backend.create_model( + rest_api_id, + name, + content_type, + description, + schema, + cli_input_json, + generate_cli_skeleton, + ) + + return 200, {}, json.dumps(model) + + except (InvalidRestApiId, InvalidModelName, RestAPINotFound) as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) + + def model_induvidual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + url_path_parts = self.path.split("/") + rest_api_id = url_path_parts[2] + model_name = url_path_parts[4] + model_info = {} + try: + if self.method == "GET": + model_info = self.backend.get_model(rest_api_id, model_name) + return 200, {}, json.dumps(model_info) + except ( + ModelNotFound, + RestAPINotFound, + InvalidRestApiId, + InvalidModelName, + ) as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index bb2b2d216..7e8de1398 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -7,18 +7,24 @@ url_paths = { "{0}/restapis$": APIGatewayResponse().restapis, "{0}/restapis/(?P[^/]+)/?$": APIGatewayResponse().restapis_individual, "{0}/restapis/(?P[^/]+)/resources$": APIGatewayResponse().resources, + "{0}/restapis/(?P[^/]+)/authorizers$": APIGatewayResponse().restapis_authorizers, + "{0}/restapis/(?P[^/]+)/authorizers/(?P[^/]+)/?$": APIGatewayResponse().authorizers, "{0}/restapis/(?P[^/]+)/stages$": APIGatewayResponse().restapis_stages, "{0}/restapis/(?P[^/]+)/stages/(?P[^/]+)/?$": APIGatewayResponse().stages, "{0}/restapis/(?P[^/]+)/deployments$": APIGatewayResponse().deployments, "{0}/restapis/(?P[^/]+)/deployments/(?P[^/]+)/?$": APIGatewayResponse().individual_deployment, "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/?$": APIGatewayResponse().resource_individual, "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/?$": APIGatewayResponse().resource_methods, - "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/responses/(?P\d+)$": APIGatewayResponse().resource_method_responses, + r"{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/responses/(?P\d+)$": APIGatewayResponse().resource_method_responses, "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/?$": APIGatewayResponse().integrations, - "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/responses/(?P\d+)/?$": APIGatewayResponse().integration_responses, + r"{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/responses/(?P\d+)/?$": APIGatewayResponse().integration_responses, "{0}/apikeys$": APIGatewayResponse().apikeys, "{0}/apikeys/(?P[^/]+)": APIGatewayResponse().apikey_individual, "{0}/usageplans$": APIGatewayResponse().usage_plans, + "{0}/domainnames$": APIGatewayResponse().domain_names, + "{0}/restapis/(?P[^/]+)/models$": APIGatewayResponse().models, + "{0}/restapis/(?P[^/]+)/models/(?P[^/]+)/?$": APIGatewayResponse().model_induvidual, + "{0}/domainnames/(?P[^/]+)/?$": APIGatewayResponse().domain_name_induvidual, "{0}/usageplans/(?P[^/]+)/?$": APIGatewayResponse().usage_plan_individual, "{0}/usageplans/(?P[^/]+)/keys$": APIGatewayResponse().usage_plan_keys, "{0}/usageplans/(?P[^/]+)/keys/(?P[^/]+)/?$": APIGatewayResponse().usage_plan_key_individual, diff --git a/moto/applicationautoscaling/__init__.py b/moto/applicationautoscaling/__init__.py new file mode 100644 index 000000000..6e3db1ccf --- /dev/null +++ b/moto/applicationautoscaling/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import applicationautoscaling_backends +from ..core.models import base_decorator + +applicationautoscaling_backend = applicationautoscaling_backends["us-east-1"] +mock_applicationautoscaling = base_decorator(applicationautoscaling_backends) diff --git a/moto/applicationautoscaling/exceptions.py b/moto/applicationautoscaling/exceptions.py new file mode 100644 index 000000000..e409da4e7 --- /dev/null +++ b/moto/applicationautoscaling/exceptions.py @@ -0,0 +1,9 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class AWSValidationException(JsonRESTError): + def __init__(self, message, **kwargs): + super(AWSValidationException, self).__init__( + "ValidationException", message, **kwargs + ) diff --git a/moto/applicationautoscaling/models.py b/moto/applicationautoscaling/models.py new file mode 100644 index 000000000..40d1094fc --- /dev/null +++ b/moto/applicationautoscaling/models.py @@ -0,0 +1,348 @@ +from __future__ import unicode_literals +from moto.core import BaseBackend, BaseModel +from moto.ecs import ecs_backends +from .exceptions import AWSValidationException +from collections import OrderedDict +from enum import Enum, unique +import time +import uuid + + +@unique +class ServiceNamespaceValueSet(Enum): + APPSTREAM = "appstream" + RDS = "rds" + LAMBDA = "lambda" + CASSANDRA = "cassandra" + DYNAMODB = "dynamodb" + CUSTOM_RESOURCE = "custom-resource" + ELASTICMAPREDUCE = "elasticmapreduce" + EC2 = "ec2" + COMPREHEND = "comprehend" + ECS = "ecs" + SAGEMAKER = "sagemaker" + + +@unique +class ScalableDimensionValueSet(Enum): + CASSANDRA_TABLE_READ_CAPACITY_UNITS = "cassandra:table:ReadCapacityUnits" + CASSANDRA_TABLE_WRITE_CAPACITY_UNITS = "cassandra:table:WriteCapacityUnits" + DYNAMODB_INDEX_READ_CAPACITY_UNITS = "dynamodb:index:ReadCapacityUnits" + DYNAMODB_INDEX_WRITE_CAPACITY_UNITS = "dynamodb:index:WriteCapacityUnits" + DYNAMODB_TABLE_READ_CAPACITY_UNITS = "dynamodb:table:ReadCapacityUnits" + DYNAMODB_TABLE_WRITE_CAPACITY_UNITS = "dynamodb:table:WriteCapacityUnits" + RDS_CLUSTER_READ_REPLICA_COUNT = "rds:cluster:ReadReplicaCount" + RDS_CLUSTER_CAPACITY = "rds:cluster:Capacity" + COMPREHEND_DOCUMENT_CLASSIFIER_ENDPOINT_DESIRED_INFERENCE_UNITS = ( + "comprehend:document-classifier-endpoint:DesiredInferenceUnits" + ) + ELASTICMAPREDUCE_INSTANCE_FLEET_ON_DEMAND_CAPACITY = ( + "elasticmapreduce:instancefleet:OnDemandCapacity" + ) + ELASTICMAPREDUCE_INSTANCE_FLEET_SPOT_CAPACITY = ( + "elasticmapreduce:instancefleet:SpotCapacity" + ) + ELASTICMAPREDUCE_INSTANCE_GROUP_INSTANCE_COUNT = ( + "elasticmapreduce:instancegroup:InstanceCount" + ) + LAMBDA_FUNCTION_PROVISIONED_CONCURRENCY = "lambda:function:ProvisionedConcurrency" + APPSTREAM_FLEET_DESIRED_CAPACITY = "appstream:fleet:DesiredCapacity" + CUSTOM_RESOURCE_RESOURCE_TYPE_PROPERTY = "custom-resource:ResourceType:Property" + SAGEMAKER_VARIANT_DESIRED_INSTANCE_COUNT = "sagemaker:variant:DesiredInstanceCount" + EC2_SPOT_FLEET_REQUEST_TARGET_CAPACITY = "ec2:spot-fleet-request:TargetCapacity" + ECS_SERVICE_DESIRED_COUNT = "ecs:service:DesiredCount" + + +class ApplicationAutoscalingBackend(BaseBackend): + def __init__(self, region, ecs): + super(ApplicationAutoscalingBackend, self).__init__() + self.region = region + self.ecs_backend = ecs + self.targets = OrderedDict() + self.policies = {} + + def reset(self): + region = self.region + ecs = self.ecs_backend + self.__dict__ = {} + self.__init__(region, ecs) + + @property + def applicationautoscaling_backend(self): + return applicationautoscaling_backends[self.region] + + def describe_scalable_targets( + self, namespace, r_ids=None, dimension=None, + ): + """ Describe scalable targets. """ + if r_ids is None: + r_ids = [] + targets = self._flatten_scalable_targets(namespace) + if dimension is not None: + targets = [t for t in targets if t.scalable_dimension == dimension] + if len(r_ids) > 0: + targets = [t for t in targets if t.resource_id in r_ids] + return targets + + def _flatten_scalable_targets(self, namespace): + """ Flatten scalable targets for a given service namespace down to a list. """ + targets = [] + for dimension in self.targets.keys(): + for resource_id in self.targets[dimension].keys(): + targets.append(self.targets[dimension][resource_id]) + targets = [t for t in targets if t.service_namespace == namespace] + return targets + + def register_scalable_target(self, namespace, r_id, dimension, **kwargs): + """ Registers or updates a scalable target. """ + _ = _target_params_are_valid(namespace, r_id, dimension) + if namespace == ServiceNamespaceValueSet.ECS.value: + _ = self._ecs_service_exists_for_target(r_id) + if self._scalable_target_exists(r_id, dimension): + target = self.targets[dimension][r_id] + target.update(**kwargs) + else: + target = FakeScalableTarget(self, namespace, r_id, dimension, **kwargs) + self._add_scalable_target(target) + return target + + def _scalable_target_exists(self, r_id, dimension): + return r_id in self.targets.get(dimension, []) + + def _ecs_service_exists_for_target(self, r_id): + """Raises a ValidationException if an ECS service does not exist + for the specified resource ID. + """ + resource_type, cluster, service = r_id.split("/") + result = self.ecs_backend.describe_services(cluster, [service]) + if len(result) != 1: + raise AWSValidationException("ECS service doesn't exist: {}".format(r_id)) + return True + + def _add_scalable_target(self, target): + if target.scalable_dimension not in self.targets: + self.targets[target.scalable_dimension] = OrderedDict() + if target.resource_id not in self.targets[target.scalable_dimension]: + self.targets[target.scalable_dimension][target.resource_id] = target + return target + + def deregister_scalable_target(self, namespace, r_id, dimension): + """ Registers or updates a scalable target. """ + if self._scalable_target_exists(r_id, dimension): + del self.targets[dimension][r_id] + else: + raise AWSValidationException( + "No scalable target found for service namespace: {}, resource ID: {}, scalable dimension: {}".format( + namespace, r_id, dimension + ) + ) + + def put_scaling_policy( + self, + policy_name, + service_namespace, + resource_id, + scalable_dimension, + policy_body, + policy_type=None, + ): + policy_key = FakeApplicationAutoscalingPolicy.formulate_key( + service_namespace, resource_id, scalable_dimension, policy_name + ) + if policy_key in self.policies: + old_policy = self.policies[policy_name] + policy = FakeApplicationAutoscalingPolicy( + region_name=self.region, + policy_name=policy_name, + service_namespace=service_namespace, + resource_id=resource_id, + scalable_dimension=scalable_dimension, + policy_type=policy_type if policy_type else old_policy.policy_type, + policy_body=policy_body if policy_body else old_policy._policy_body, + ) + else: + policy = FakeApplicationAutoscalingPolicy( + region_name=self.region, + policy_name=policy_name, + service_namespace=service_namespace, + resource_id=resource_id, + scalable_dimension=scalable_dimension, + policy_type=policy_type, + policy_body=policy_body, + ) + self.policies[policy_key] = policy + return policy + + def describe_scaling_policies(self, service_namespace, **kwargs): + policy_names = kwargs.get("policy_names") + resource_id = kwargs.get("resource_id") + scalable_dimension = kwargs.get("scalable_dimension") + max_results = kwargs.get("max_results") or 100 + next_token = kwargs.get("next_token") + policies = [ + policy + for policy in self.policies.values() + if policy.service_namespace == service_namespace + ] + if policy_names: + policies = [ + policy for policy in policies if policy.policy_name in policy_names + ] + if resource_id: + policies = [ + policy for policy in policies if policy.resource_id in resource_id + ] + if scalable_dimension: + policies = [ + policy + for policy in policies + if policy.scalable_dimension in scalable_dimension + ] + starting_point = int(next_token) if next_token else 0 + ending_point = starting_point + max_results + policies_page = policies[starting_point:ending_point] + new_next_token = str(ending_point) if ending_point < len(policies) else None + return new_next_token, policies_page + + def delete_scaling_policy( + self, policy_name, service_namespace, resource_id, scalable_dimension + ): + policy_key = FakeApplicationAutoscalingPolicy.formulate_key( + service_namespace, resource_id, scalable_dimension, policy_name + ) + if policy_key in self.policies: + del self.policies[policy_key] + return {} + else: + raise AWSValidationException( + "No scaling policy found for service namespace: {}, resource ID: {}, scalable dimension: {}, policy name: {}".format( + service_namespace, resource_id, scalable_dimension, policy_name + ) + ) + + +def _target_params_are_valid(namespace, r_id, dimension): + """ Check whether namespace, resource_id and dimension are valid and consistent with each other. """ + is_valid = True + valid_namespaces = [n.value for n in ServiceNamespaceValueSet] + if namespace not in valid_namespaces: + is_valid = False + if dimension is not None: + try: + valid_dimensions = [d.value for d in ScalableDimensionValueSet] + d_namespace, d_resource_type, scaling_property = dimension.split(":") + resource_type = _get_resource_type_from_resource_id(r_id) + if ( + dimension not in valid_dimensions + or d_namespace != namespace + or resource_type != d_resource_type + ): + is_valid = False + except ValueError: + is_valid = False + if not is_valid: + raise AWSValidationException( + "Unsupported service namespace, resource type or scalable dimension" + ) + return is_valid + + +def _get_resource_type_from_resource_id(resource_id): + # AWS Application Autoscaling resource_ids are multi-component (path-like) identifiers that vary in format, + # depending on the type of resource it identifies. resource_type is one of its components. + # resource_id format variations are described in + # https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html + # In a nutshell: + # - Most use slash separators, but some use colon separators. + # - The resource type is usually the first component of the resource_id... + # - ...except for sagemaker endpoints, dynamodb GSIs and keyspaces tables, where it's the third. + # - Comprehend uses an arn, with the resource type being the last element. + + if resource_id.startswith("arn:aws:comprehend"): + resource_id = resource_id.split(":")[-1] + resource_split = ( + resource_id.split("/") if "/" in resource_id else resource_id.split(":") + ) + if ( + resource_split[0] == "endpoint" + or (resource_split[0] == "table" and len(resource_split) > 2) + or (resource_split[0] == "keyspace") + ): + resource_type = resource_split[2] + else: + resource_type = resource_split[0] + return resource_type + + +class FakeScalableTarget(BaseModel): + def __init__( + self, backend, service_namespace, resource_id, scalable_dimension, **kwargs + ): + self.applicationautoscaling_backend = backend + self.service_namespace = service_namespace + self.resource_id = resource_id + self.scalable_dimension = scalable_dimension + self.min_capacity = kwargs["min_capacity"] + self.max_capacity = kwargs["max_capacity"] + self.role_arn = kwargs["role_arn"] + self.suspended_state = kwargs["suspended_state"] + self.creation_time = time.time() + + def update(self, **kwargs): + if kwargs["min_capacity"] is not None: + self.min_capacity = kwargs["min_capacity"] + if kwargs["max_capacity"] is not None: + self.max_capacity = kwargs["max_capacity"] + if kwargs["suspended_state"] is not None: + self.suspended_state = kwargs["suspended_state"] + + +class FakeApplicationAutoscalingPolicy(BaseModel): + def __init__( + self, + region_name, + policy_name, + service_namespace, + resource_id, + scalable_dimension, + policy_type, + policy_body, + ): + self.step_scaling_policy_configuration = None + self.target_tracking_scaling_policy_configuration = None + + if "policy_type" == "StepScaling": + self.step_scaling_policy_configuration = policy_body + self.target_tracking_scaling_policy_configuration = None + elif policy_type == "TargetTrackingScaling": + self.step_scaling_policy_configuration = None + self.target_tracking_scaling_policy_configuration = policy_body + else: + raise AWSValidationException( + "Unknown policy type {} specified.".format(policy_type) + ) + + self._policy_body = policy_body + self.service_namespace = service_namespace + self.resource_id = resource_id + self.scalable_dimension = scalable_dimension + self.policy_name = policy_name + self.policy_type = policy_type + self._guid = uuid.uuid4() + self.policy_arn = "arn:aws:autoscaling:{}:scalingPolicy:{}:resource/sagemaker/{}:policyName/{}".format( + region_name, self._guid, self.resource_id, self.policy_name + ) + self.creation_time = time.time() + + @staticmethod + def formulate_key(service_namespace, resource_id, scalable_dimension, policy_name): + return "{}\t{}\t{}\t{}".format( + service_namespace, resource_id, scalable_dimension, policy_name + ) + + +applicationautoscaling_backends = {} +for region_name, ecs_backend in ecs_backends.items(): + applicationautoscaling_backends[region_name] = ApplicationAutoscalingBackend( + region_name, ecs_backend + ) diff --git a/moto/applicationautoscaling/responses.py b/moto/applicationautoscaling/responses.py new file mode 100644 index 000000000..ad63af948 --- /dev/null +++ b/moto/applicationautoscaling/responses.py @@ -0,0 +1,159 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +import json +from .models import ( + applicationautoscaling_backends, + ScalableDimensionValueSet, + ServiceNamespaceValueSet, +) +from .exceptions import AWSValidationException + + +class ApplicationAutoScalingResponse(BaseResponse): + @property + def applicationautoscaling_backend(self): + return applicationautoscaling_backends[self.region] + + def describe_scalable_targets(self): + self._validate_params() + service_namespace = self._get_param("ServiceNamespace") + resource_ids = self._get_param("ResourceIds") + scalable_dimension = self._get_param("ScalableDimension") + max_results = self._get_int_param("MaxResults", 50) + marker = self._get_param("NextToken") + all_scalable_targets = self.applicationautoscaling_backend.describe_scalable_targets( + service_namespace, resource_ids, scalable_dimension + ) + start = int(marker) + 1 if marker else 0 + next_token = None + scalable_targets_resp = all_scalable_targets[start : start + max_results] + if len(all_scalable_targets) > start + max_results: + next_token = str(len(scalable_targets_resp) - 1) + targets = [_build_target(t) for t in scalable_targets_resp] + return json.dumps({"ScalableTargets": targets, "NextToken": next_token}) + + def register_scalable_target(self): + """ Registers or updates a scalable target. """ + self._validate_params() + self.applicationautoscaling_backend.register_scalable_target( + self._get_param("ServiceNamespace"), + self._get_param("ResourceId"), + self._get_param("ScalableDimension"), + min_capacity=self._get_int_param("MinCapacity"), + max_capacity=self._get_int_param("MaxCapacity"), + role_arn=self._get_param("RoleARN"), + suspended_state=self._get_param("SuspendedState"), + ) + return json.dumps({}) + + def deregister_scalable_target(self): + """ Deregisters a scalable target. """ + self._validate_params() + self.applicationautoscaling_backend.deregister_scalable_target( + self._get_param("ServiceNamespace"), + self._get_param("ResourceId"), + self._get_param("ScalableDimension"), + ) + return json.dumps({}) + + def put_scaling_policy(self): + policy = self.applicationautoscaling_backend.put_scaling_policy( + policy_name=self._get_param("PolicyName"), + service_namespace=self._get_param("ServiceNamespace"), + resource_id=self._get_param("ResourceId"), + scalable_dimension=self._get_param("ScalableDimension"), + policy_type=self._get_param("PolicyType"), + policy_body=self._get_param( + "StepScalingPolicyConfiguration", + self._get_param("TargetTrackingScalingPolicyConfiguration"), + ), + ) + return json.dumps({"PolicyARN": policy.policy_arn, "Alarms": []}) # ToDo + + def describe_scaling_policies(self): + ( + next_token, + policy_page, + ) = self.applicationautoscaling_backend.describe_scaling_policies( + service_namespace=self._get_param("ServiceNamespace"), + resource_id=self._get_param("ResourceId"), + scalable_dimension=self._get_param("ScalableDimension"), + max_results=self._get_param("MaxResults"), + next_token=self._get_param("NextToken"), + ) + response_obj = {"ScalingPolicies": [_build_policy(p) for p in policy_page]} + if next_token: + response_obj["NextToken"] = next_token + return json.dumps(response_obj) + + def delete_scaling_policy(self): + self.applicationautoscaling_backend.delete_scaling_policy( + policy_name=self._get_param("PolicyName"), + service_namespace=self._get_param("ServiceNamespace"), + resource_id=self._get_param("ResourceId"), + scalable_dimension=self._get_param("ScalableDimension"), + ) + return json.dumps({}) + + def _validate_params(self): + """Validate parameters. + TODO Integrate this validation with the validation in models.py + """ + namespace = self._get_param("ServiceNamespace") + dimension = self._get_param("ScalableDimension") + messages = [] + dimensions = [d.value for d in ScalableDimensionValueSet] + message = None + if dimension is not None and dimension not in dimensions: + messages.append( + "Value '{}' at 'scalableDimension' " + "failed to satisfy constraint: Member must satisfy enum value set: " + "{}".format(dimension, dimensions) + ) + namespaces = [n.value for n in ServiceNamespaceValueSet] + if namespace is not None and namespace not in namespaces: + messages.append( + "Value '{}' at 'serviceNamespace' " + "failed to satisfy constraint: Member must satisfy enum value set: " + "{}".format(namespace, namespaces) + ) + if len(messages) == 1: + message = "1 validation error detected: {}".format(messages[0]) + elif len(messages) > 1: + message = "{} validation errors detected: {}".format( + len(messages), "; ".join(messages) + ) + if message: + raise AWSValidationException(message) + + +def _build_target(t): + return { + "CreationTime": t.creation_time, + "ServiceNamespace": t.service_namespace, + "ResourceId": t.resource_id, + "RoleARN": t.role_arn, + "ScalableDimension": t.scalable_dimension, + "MaxCapacity": t.max_capacity, + "MinCapacity": t.min_capacity, + "SuspendedState": t.suspended_state, + } + + +def _build_policy(p): + response = { + "PolicyARN": p.policy_arn, + "PolicyName": p.policy_name, + "ServiceNamespace": p.service_namespace, + "ResourceId": p.resource_id, + "ScalableDimension": p.scalable_dimension, + "PolicyType": p.policy_type, + "CreationTime": p.creation_time, + } + if p.policy_type == "StepScaling": + response["StepScalingPolicyConfiguration"] = p.step_scaling_policy_configuration + elif p.policy_type == "TargetTrackingScaling": + response[ + "TargetTrackingScalingPolicyConfiguration" + ] = p.target_tracking_scaling_policy_configuration + return response diff --git a/moto/applicationautoscaling/urls.py b/moto/applicationautoscaling/urls.py new file mode 100644 index 000000000..8a608f954 --- /dev/null +++ b/moto/applicationautoscaling/urls.py @@ -0,0 +1,8 @@ +from __future__ import unicode_literals +from .responses import ApplicationAutoScalingResponse + +url_bases = ["https?://application-autoscaling.(.+).amazonaws.com"] + +url_paths = { + "{0}/$": ApplicationAutoScalingResponse.dispatch, +} diff --git a/moto/applicationautoscaling/utils.py b/moto/applicationautoscaling/utils.py new file mode 100644 index 000000000..72330c508 --- /dev/null +++ b/moto/applicationautoscaling/utils.py @@ -0,0 +1,10 @@ +from six.moves.urllib.parse import urlparse + + +def region_from_applicationautoscaling_url(url): + domain = urlparse(url).netloc + + if "." in domain: + return domain.split(".")[1] + else: + return "us-east-1" diff --git a/moto/athena/models.py b/moto/athena/models.py index 6aeca0ffa..24ad73ab9 100644 --- a/moto/athena/models.py +++ b/moto/athena/models.py @@ -2,10 +2,9 @@ from __future__ import unicode_literals import time from boto3 import Session +from moto.core import BaseBackend, BaseModel, ACCOUNT_ID -from moto.core import BaseBackend, BaseModel - -from moto.core import ACCOUNT_ID +from uuid import uuid4 class TaggableResourceMixin(object): @@ -50,6 +49,27 @@ class WorkGroup(TaggableResourceMixin, BaseModel): self.configuration = configuration +class Execution(BaseModel): + def __init__(self, query, context, config, workgroup): + self.id = str(uuid4()) + self.query = query + self.context = context + self.config = config + self.workgroup = workgroup + self.start_time = time.time() + self.status = "QUEUED" + + +class NamedQuery(BaseModel): + def __init__(self, name, description, database, query_string, workgroup): + self.id = str(uuid4()) + self.name = name + self.description = description + self.database = database + self.query_string = query_string + self.workgroup = workgroup + + class AthenaBackend(BaseBackend): region_name = None @@ -57,6 +77,8 @@ class AthenaBackend(BaseBackend): if region_name is not None: self.region_name = region_name self.work_groups = {} + self.executions = {} + self.named_queries = {} def create_work_group(self, name, configuration, description, tags): if name in self.work_groups: @@ -76,6 +98,46 @@ class AthenaBackend(BaseBackend): for wg in self.work_groups.values() ] + def get_work_group(self, name): + if name not in self.work_groups: + return None + wg = self.work_groups[name] + return { + "Name": wg.name, + "State": wg.state, + "Configuration": wg.configuration, + "Description": wg.description, + "CreationTime": time.time(), + } + + def start_query_execution(self, query, context, config, workgroup): + execution = Execution( + query=query, context=context, config=config, workgroup=workgroup + ) + self.executions[execution.id] = execution + return execution.id + + def get_execution(self, exec_id): + return self.executions[exec_id] + + def stop_query_execution(self, exec_id): + execution = self.executions[exec_id] + execution.status = "CANCELLED" + + def create_named_query(self, name, description, database, query_string, workgroup): + nq = NamedQuery( + name=name, + description=description, + database=database, + query_string=query_string, + workgroup=workgroup, + ) + self.named_queries[nq.id] = nq + return nq.id + + def get_named_query(self, query_id): + return self.named_queries[query_id] if query_id in self.named_queries else None + athena_backends = {} for region in Session().get_available_regions("athena"): diff --git a/moto/athena/responses.py b/moto/athena/responses.py index 80cac5d62..b5e6d6a95 100644 --- a/moto/athena/responses.py +++ b/moto/athena/responses.py @@ -18,15 +18,7 @@ class AthenaResponse(BaseResponse): name, configuration, description, tags ) if not work_group: - return ( - json.dumps( - { - "__type": "InvalidRequestException", - "Message": "WorkGroup already exists", - } - ), - dict(status=400), - ) + return self.error("WorkGroup already exists", 400) return json.dumps( { "CreateWorkGroupResponse": { @@ -39,3 +31,86 @@ class AthenaResponse(BaseResponse): def list_work_groups(self): return json.dumps({"WorkGroups": self.athena_backend.list_work_groups()}) + + def get_work_group(self): + name = self._get_param("WorkGroup") + return json.dumps({"WorkGroup": self.athena_backend.get_work_group(name)}) + + def start_query_execution(self): + query = self._get_param("QueryString") + context = self._get_param("QueryExecutionContext") + config = self._get_param("ResultConfiguration") + workgroup = self._get_param("WorkGroup") + if workgroup and not self.athena_backend.get_work_group(workgroup): + return self.error("WorkGroup does not exist", 400) + id = self.athena_backend.start_query_execution( + query=query, context=context, config=config, workgroup=workgroup + ) + return json.dumps({"QueryExecutionId": id}) + + def get_query_execution(self): + exec_id = self._get_param("QueryExecutionId") + execution = self.athena_backend.get_execution(exec_id) + result = { + "QueryExecution": { + "QueryExecutionId": exec_id, + "Query": execution.query, + "StatementType": "DDL", + "ResultConfiguration": execution.config, + "QueryExecutionContext": execution.context, + "Status": { + "State": execution.status, + "SubmissionDateTime": execution.start_time, + }, + "Statistics": { + "EngineExecutionTimeInMillis": 0, + "DataScannedInBytes": 0, + "TotalExecutionTimeInMillis": 0, + "QueryQueueTimeInMillis": 0, + "QueryPlanningTimeInMillis": 0, + "ServiceProcessingTimeInMillis": 0, + }, + "WorkGroup": execution.workgroup, + } + } + return json.dumps(result) + + def stop_query_execution(self): + exec_id = self._get_param("QueryExecutionId") + self.athena_backend.stop_query_execution(exec_id) + return json.dumps({}) + + def error(self, msg, status): + return ( + json.dumps({"__type": "InvalidRequestException", "Message": msg,}), + dict(status=status), + ) + + def create_named_query(self): + name = self._get_param("Name") + description = self._get_param("Description") + database = self._get_param("Database") + query_string = self._get_param("QueryString") + workgroup = self._get_param("WorkGroup") + if workgroup and not self.athena_backend.get_work_group(workgroup): + return self.error("WorkGroup does not exist", 400) + query_id = self.athena_backend.create_named_query( + name, description, database, query_string, workgroup + ) + return json.dumps({"NamedQueryId": query_id}) + + def get_named_query(self): + query_id = self._get_param("NamedQueryId") + nq = self.athena_backend.get_named_query(query_id) + return json.dumps( + { + "NamedQuery": { + "Name": nq.name, + "Description": nq.description, + "Database": nq.database, + "QueryString": nq.query_string, + "NamedQueryId": nq.id, + "WorkGroup": nq.workgroup, + } + } + ) diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py index 6f73eff8f..2fddd18ec 100644 --- a/moto/autoscaling/exceptions.py +++ b/moto/autoscaling/exceptions.py @@ -21,3 +21,8 @@ class InvalidInstanceError(AutoscalingClientError): super(InvalidInstanceError, self).__init__( "ValidationError", "Instance [{0}] is invalid.".format(instance_id) ) + + +class ValidationError(AutoscalingClientError): + def __init__(self, message): + super(ValidationError, self).__init__("ValidationError", message) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 45ee7d192..f4afd51be 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -2,11 +2,15 @@ from __future__ import unicode_literals import random -from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping +from moto.packages.boto.ec2.blockdevicemapping import ( + BlockDeviceType, + BlockDeviceMapping, +) from moto.ec2.exceptions import InvalidInstanceIdError from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel +from moto.core.utils import camelcase_to_underscores from moto.ec2 import ec2_backends from moto.elb import elb_backends from moto.elbv2 import elbv2_backends @@ -15,6 +19,7 @@ from .exceptions import ( AutoscalingClientError, ResourceContentionError, InvalidInstanceError, + ValidationError, ) # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown @@ -74,7 +79,7 @@ class FakeScalingPolicy(BaseModel): ) -class FakeLaunchConfiguration(BaseModel): +class FakeLaunchConfiguration(CloudFormationModel): def __init__( self, name, @@ -127,6 +132,15 @@ class FakeLaunchConfiguration(BaseModel): ) return config + @staticmethod + def cloudformation_name_type(): + return "LaunchConfigurationName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-launchconfiguration.html + return "AWS::AutoScaling::LaunchConfiguration" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -215,7 +229,7 @@ class FakeLaunchConfiguration(BaseModel): return block_device_map -class FakeAutoScalingGroup(BaseModel): +class FakeAutoScalingGroup(CloudFormationModel): def __init__( self, name, @@ -224,6 +238,7 @@ class FakeAutoScalingGroup(BaseModel): max_size, min_size, launch_config_name, + launch_template, vpc_zone_identifier, default_cooldown, health_check_period, @@ -233,10 +248,12 @@ class FakeAutoScalingGroup(BaseModel): placement_group, termination_policies, autoscaling_backend, + ec2_backend, tags, new_instances_protected_from_scale_in=False, ): self.autoscaling_backend = autoscaling_backend + self.ec2_backend = ec2_backend self.name = name self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier) @@ -244,10 +261,10 @@ class FakeAutoScalingGroup(BaseModel): self.max_size = max_size self.min_size = min_size - self.launch_config = self.autoscaling_backend.launch_configurations[ - launch_config_name - ] - self.launch_config_name = launch_config_name + self.launch_template = None + self.launch_config = None + + self._set_launch_configuration(launch_config_name, launch_template) self.default_cooldown = ( default_cooldown if default_cooldown else DEFAULT_COOLDOWN @@ -267,6 +284,9 @@ class FakeAutoScalingGroup(BaseModel): self.tags = tags if tags else [] self.set_desired_capacity(desired_capacity) + def active_instances(self): + return [x for x in self.instance_states if x.lifecycle_state == "InService"] + def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False): # for updates, if only AZs are provided, they must not clash with # the AZs of existing VPCs @@ -298,6 +318,51 @@ class FakeAutoScalingGroup(BaseModel): self.availability_zones = availability_zones self.vpc_zone_identifier = vpc_zone_identifier + def _set_launch_configuration(self, launch_config_name, launch_template): + if launch_config_name: + self.launch_config = self.autoscaling_backend.launch_configurations[ + launch_config_name + ] + self.launch_config_name = launch_config_name + + if launch_template: + launch_template_id = launch_template.get("launch_template_id") + launch_template_name = launch_template.get("launch_template_name") + + if not (launch_template_id or launch_template_name) or ( + launch_template_id and launch_template_name + ): + raise ValidationError( + "Valid requests must contain either launchTemplateId or LaunchTemplateName" + ) + + if launch_template_id: + self.launch_template = self.ec2_backend.get_launch_template( + launch_template_id + ) + elif launch_template_name: + self.launch_template = self.ec2_backend.get_launch_template_by_name( + launch_template_name + ) + self.launch_template_version = int(launch_template["version"]) + + @staticmethod + def __set_string_propagate_at_launch_booleans_on_tags(tags): + bool_to_string = {True: "true", False: "false"} + for tag in tags: + if "PropagateAtLaunch" in tag: + tag["PropagateAtLaunch"] = bool_to_string[tag["PropagateAtLaunch"]] + return tags + + @staticmethod + def cloudformation_name_type(): + return "AutoScalingGroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-autoscalinggroup.html + return "AWS::AutoScaling::AutoScalingGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -305,6 +370,10 @@ class FakeAutoScalingGroup(BaseModel): properties = cloudformation_json["Properties"] launch_config_name = properties.get("LaunchConfigurationName") + launch_template = { + camelcase_to_underscores(k): v + for k, v in properties.get("LaunchTemplate", {}).items() + } load_balancer_names = properties.get("LoadBalancerNames", []) target_group_arns = properties.get("TargetGroupARNs", []) @@ -316,6 +385,7 @@ class FakeAutoScalingGroup(BaseModel): max_size=properties.get("MaxSize"), min_size=properties.get("MinSize"), launch_config_name=launch_config_name, + launch_template=launch_template, vpc_zone_identifier=( ",".join(properties.get("VPCZoneIdentifier", [])) or None ), @@ -326,7 +396,9 @@ class FakeAutoScalingGroup(BaseModel): target_group_arns=target_group_arns, placement_group=None, termination_policies=properties.get("TerminationPolicies", []), - tags=properties.get("Tags", []), + tags=cls.__set_string_propagate_at_launch_booleans_on_tags( + properties.get("Tags", []) + ), new_instances_protected_from_scale_in=properties.get( "NewInstancesProtectedFromScaleIn", False ), @@ -362,6 +434,38 @@ class FakeAutoScalingGroup(BaseModel): def physical_resource_id(self): return self.name + @property + def image_id(self): + if self.launch_template: + version = self.launch_template.get_version(self.launch_template_version) + return version.image_id + + return self.launch_config.image_id + + @property + def instance_type(self): + if self.launch_template: + version = self.launch_template.get_version(self.launch_template_version) + return version.instance_type + + return self.launch_config.instance_type + + @property + def user_data(self): + if self.launch_template: + version = self.launch_template.get_version(self.launch_template_version) + return version.user_data + + return self.launch_config.user_data + + @property + def security_groups(self): + if self.launch_template: + version = self.launch_template.get_version(self.launch_template_version) + return version.security_groups + + return self.launch_config.security_groups + def update( self, availability_zones, @@ -369,6 +473,7 @@ class FakeAutoScalingGroup(BaseModel): max_size, min_size, launch_config_name, + launch_template, vpc_zone_identifier, default_cooldown, health_check_period, @@ -390,11 +495,8 @@ class FakeAutoScalingGroup(BaseModel): if max_size is not None and max_size < len(self.instance_states): desired_capacity = max_size - if launch_config_name: - self.launch_config = self.autoscaling_backend.launch_configurations[ - launch_config_name - ] - self.launch_config_name = launch_config_name + self._set_launch_configuration(launch_config_name, launch_template) + if health_check_period is not None: self.health_check_period = health_check_period if health_check_type is not None: @@ -413,12 +515,11 @@ class FakeAutoScalingGroup(BaseModel): else: self.desired_capacity = new_capacity - curr_instance_count = len(self.instance_states) + curr_instance_count = len(self.active_instances()) if self.desired_capacity == curr_instance_count: - return - - if self.desired_capacity > curr_instance_count: + pass # Nothing to do here + elif self.desired_capacity > curr_instance_count: # Need more instances count_needed = int(self.desired_capacity) - int(curr_instance_count) @@ -442,6 +543,9 @@ class FakeAutoScalingGroup(BaseModel): self.instance_states = list( set(self.instance_states) - set(instances_to_remove) ) + if self.name in self.autoscaling_backend.autoscaling_groups: + self.autoscaling_backend.update_attached_elbs(self.name) + self.autoscaling_backend.update_attached_target_groups(self.name) def get_propagated_tags(self): propagated_tags = {} @@ -450,18 +554,19 @@ class FakeAutoScalingGroup(BaseModel): # boto3 and cloudformation use PropagateAtLaunch if "propagate_at_launch" in tag and tag["propagate_at_launch"] == "true": propagated_tags[tag["key"]] = tag["value"] - if "PropagateAtLaunch" in tag and tag["PropagateAtLaunch"]: + if "PropagateAtLaunch" in tag and tag["PropagateAtLaunch"] == "true": propagated_tags[tag["Key"]] = tag["Value"] return propagated_tags def replace_autoscaling_group_instances(self, count_needed, propagated_tags): propagated_tags[ASG_NAME_TAG] = self.name + reservation = self.autoscaling_backend.ec2_backend.add_instances( - self.launch_config.image_id, + self.image_id, count_needed, - self.launch_config.user_data, - self.launch_config.security_groups, - instance_type=self.launch_config.instance_type, + self.user_data, + self.security_groups, + instance_type=self.instance_type, tags={"instance": propagated_tags}, placement=random.choice(self.availability_zones), ) @@ -553,6 +658,7 @@ class AutoScalingBackend(BaseBackend): max_size, min_size, launch_config_name, + launch_template, vpc_zone_identifier, default_cooldown, health_check_period, @@ -576,7 +682,19 @@ class AutoScalingBackend(BaseBackend): health_check_period = 300 else: health_check_period = make_int(health_check_period) - if launch_config_name is None and instance_id is not None: + + # TODO: Add MixedInstancesPolicy once implemented. + # Verify only a single launch config-like parameter is provided. + params = [launch_config_name, launch_template, instance_id] + num_params = sum([1 for param in params if param]) + + if num_params != 1: + raise ValidationError( + "Valid requests must contain either LaunchTemplate, LaunchConfigurationName, " + "InstanceId or MixedInstancesPolicy parameter." + ) + + if instance_id: try: instance = self.ec2_backend.get_instance(instance_id) launch_config_name = name @@ -593,6 +711,7 @@ class AutoScalingBackend(BaseBackend): max_size=max_size, min_size=min_size, launch_config_name=launch_config_name, + launch_template=launch_template, vpc_zone_identifier=vpc_zone_identifier, default_cooldown=default_cooldown, health_check_period=health_check_period, @@ -602,6 +721,7 @@ class AutoScalingBackend(BaseBackend): placement_group=placement_group, termination_policies=termination_policies, autoscaling_backend=self, + ec2_backend=self.ec2_backend, tags=tags, new_instances_protected_from_scale_in=new_instances_protected_from_scale_in, ) @@ -619,6 +739,7 @@ class AutoScalingBackend(BaseBackend): max_size, min_size, launch_config_name, + launch_template, vpc_zone_identifier, default_cooldown, health_check_period, @@ -627,19 +748,28 @@ class AutoScalingBackend(BaseBackend): termination_policies, new_instances_protected_from_scale_in=None, ): + # TODO: Add MixedInstancesPolicy once implemented. + # Verify only a single launch config-like parameter is provided. + if launch_config_name and launch_template: + raise ValidationError( + "Valid requests must contain either LaunchTemplate, LaunchConfigurationName " + "or MixedInstancesPolicy parameter." + ) + group = self.autoscaling_groups[name] group.update( - availability_zones, - desired_capacity, - max_size, - min_size, - launch_config_name, - vpc_zone_identifier, - default_cooldown, - health_check_period, - health_check_type, - placement_group, - termination_policies, + availability_zones=availability_zones, + desired_capacity=desired_capacity, + max_size=max_size, + min_size=min_size, + launch_config_name=launch_config_name, + launch_template=launch_template, + vpc_zone_identifier=vpc_zone_identifier, + default_cooldown=default_cooldown, + health_check_period=health_check_period, + health_check_type=health_check_type, + placement_group=placement_group, + termination_policies=termination_policies, new_instances_protected_from_scale_in=new_instances_protected_from_scale_in, ) return group @@ -655,10 +785,16 @@ class AutoScalingBackend(BaseBackend): self.set_desired_capacity(group_name, 0) self.autoscaling_groups.pop(group_name, None) - def describe_auto_scaling_instances(self): + def describe_auto_scaling_instances(self, instance_ids): instance_states = [] for group in self.autoscaling_groups.values(): - instance_states.extend(group.instance_states) + instance_states.extend( + [ + x + for x in group.instance_states + if not instance_ids or x.instance.id in instance_ids + ] + ) return instance_states def attach_instances(self, group_name, instance_ids): @@ -682,6 +818,7 @@ class AutoScalingBackend(BaseBackend): ) group.instance_states.extend(new_instances) self.update_attached_elbs(group.name) + self.update_attached_target_groups(group.name) def set_instance_health( self, instance_id, health_status, should_respect_grace_period @@ -697,7 +834,7 @@ class AutoScalingBackend(BaseBackend): def detach_instances(self, group_name, instance_ids, should_decrement): group = self.autoscaling_groups[group_name] - original_size = len(group.instance_states) + original_size = group.desired_capacity detached_instances = [ x for x in group.instance_states if x.instance.id in instance_ids @@ -714,13 +851,8 @@ class AutoScalingBackend(BaseBackend): if should_decrement: group.desired_capacity = original_size - len(instance_ids) - else: - count_needed = len(instance_ids) - group.replace_autoscaling_group_instances( - count_needed, group.get_propagated_tags() - ) - self.update_attached_elbs(group_name) + group.set_desired_capacity(group.desired_capacity) return detached_instances def set_desired_capacity(self, group_name, desired_capacity): @@ -734,7 +866,7 @@ class AutoScalingBackend(BaseBackend): self.set_desired_capacity(group_name, desired_capacity) def change_capacity_percent(self, group_name, scaling_adjustment): - """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html + """http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html If PercentChangeInCapacity returns a value between 0 and 1, Auto Scaling will round it off to 1. If the PercentChangeInCapacity returns a value greater than 1, Auto Scaling will round it off to the @@ -785,7 +917,9 @@ class AutoScalingBackend(BaseBackend): def update_attached_elbs(self, group_name): group = self.autoscaling_groups[group_name] - group_instance_ids = set(state.instance.id for state in group.instance_states) + group_instance_ids = set( + state.instance.id for state in group.active_instances() + ) # skip this if group.load_balancers is empty # otherwise elb_backend.describe_load_balancers returns all available load balancers @@ -902,15 +1036,15 @@ class AutoScalingBackend(BaseBackend): autoscaling_group_name, autoscaling_group, ) in self.autoscaling_groups.items(): - original_instance_count = len(autoscaling_group.instance_states) + original_active_instance_count = len(autoscaling_group.active_instances()) autoscaling_group.instance_states = list( filter( lambda i_state: i_state.instance.id not in instance_ids, autoscaling_group.instance_states, ) ) - difference = original_instance_count - len( - autoscaling_group.instance_states + difference = original_active_instance_count - len( + autoscaling_group.active_instances() ) if difference > 0: autoscaling_group.replace_autoscaling_group_instances( @@ -918,6 +1052,45 @@ class AutoScalingBackend(BaseBackend): ) self.update_attached_elbs(autoscaling_group_name) + def enter_standby_instances(self, group_name, instance_ids, should_decrement): + group = self.autoscaling_groups[group_name] + original_size = group.desired_capacity + standby_instances = [] + for instance_state in group.instance_states: + if instance_state.instance.id in instance_ids: + instance_state.lifecycle_state = "Standby" + standby_instances.append(instance_state) + if should_decrement: + group.desired_capacity = group.desired_capacity - len(instance_ids) + group.set_desired_capacity(group.desired_capacity) + return standby_instances, original_size, group.desired_capacity + + def exit_standby_instances(self, group_name, instance_ids): + group = self.autoscaling_groups[group_name] + original_size = group.desired_capacity + standby_instances = [] + for instance_state in group.instance_states: + if instance_state.instance.id in instance_ids: + instance_state.lifecycle_state = "InService" + standby_instances.append(instance_state) + group.desired_capacity = group.desired_capacity + len(instance_ids) + group.set_desired_capacity(group.desired_capacity) + return standby_instances, original_size, group.desired_capacity + + def terminate_instance(self, instance_id, should_decrement): + instance = self.ec2_backend.get_instance(instance_id) + instance_state = next( + instance_state + for group in self.autoscaling_groups.values() + for instance_state in group.instance_states + if instance_state.instance.id == instance.id + ) + group = instance.autoscaling_group + original_size = group.desired_capacity + self.detach_instances(group.name, [instance.id], should_decrement) + self.ec2_backend.terminate_instances([instance.id]) + return instance_state, original_size, group.desired_capacity + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 83e2f7d5a..a9651a774 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -1,7 +1,12 @@ from __future__ import unicode_literals +import datetime from moto.core.responses import BaseResponse -from moto.core.utils import amz_crc32, amzn_request_id +from moto.core.utils import ( + amz_crc32, + amzn_request_id, + iso_8601_datetime_with_milliseconds, +) from .models import autoscaling_backends @@ -76,6 +81,7 @@ class AutoScalingResponse(BaseResponse): min_size=self._get_int_param("MinSize"), instance_id=self._get_param("InstanceId"), launch_config_name=self._get_param("LaunchConfigurationName"), + launch_template=self._get_dict_param("LaunchTemplate."), vpc_zone_identifier=self._get_param("VPCZoneIdentifier"), default_cooldown=self._get_int_param("DefaultCooldown"), health_check_period=self._get_int_param("HealthCheckGracePeriod"), @@ -192,6 +198,7 @@ class AutoScalingResponse(BaseResponse): max_size=self._get_int_param("MaxSize"), min_size=self._get_int_param("MinSize"), launch_config_name=self._get_param("LaunchConfigurationName"), + launch_template=self._get_dict_param("LaunchTemplate."), vpc_zone_identifier=self._get_param("VPCZoneIdentifier"), default_cooldown=self._get_int_param("DefaultCooldown"), health_check_period=self._get_int_param("HealthCheckGracePeriod"), @@ -226,7 +233,9 @@ class AutoScalingResponse(BaseResponse): return template.render() def describe_auto_scaling_instances(self): - instance_states = self.autoscaling_backend.describe_auto_scaling_instances() + instance_states = self.autoscaling_backend.describe_auto_scaling_instances( + instance_ids=self._get_multi_param("InstanceIds.member") + ) template = self.response_template(DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) return template.render(instance_states=instance_states) @@ -289,6 +298,50 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def enter_standby(self): + group_name = self._get_param("AutoScalingGroupName") + instance_ids = self._get_multi_param("InstanceIds.member") + should_decrement_string = self._get_param("ShouldDecrementDesiredCapacity") + if should_decrement_string == "true": + should_decrement = True + else: + should_decrement = False + ( + standby_instances, + original_size, + desired_capacity, + ) = self.autoscaling_backend.enter_standby_instances( + group_name, instance_ids, should_decrement + ) + template = self.response_template(ENTER_STANDBY_TEMPLATE) + return template.render( + standby_instances=standby_instances, + should_decrement=should_decrement, + original_size=original_size, + desired_capacity=desired_capacity, + timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), + ) + + @amz_crc32 + @amzn_request_id + def exit_standby(self): + group_name = self._get_param("AutoScalingGroupName") + instance_ids = self._get_multi_param("InstanceIds.member") + ( + standby_instances, + original_size, + desired_capacity, + ) = self.autoscaling_backend.exit_standby_instances(group_name, instance_ids) + template = self.response_template(EXIT_STANDBY_TEMPLATE) + return template.render( + standby_instances=standby_instances, + original_size=original_size, + desired_capacity=desired_capacity, + timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), + ) + def suspend_processes(self): autoscaling_group_name = self._get_param("AutoScalingGroupName") scaling_processes = self._get_multi_param("ScalingProcesses.member") @@ -308,6 +361,29 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(SET_INSTANCE_PROTECTION_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def terminate_instance_in_auto_scaling_group(self): + instance_id = self._get_param("InstanceId") + should_decrement_string = self._get_param("ShouldDecrementDesiredCapacity") + if should_decrement_string == "true": + should_decrement = True + else: + should_decrement = False + ( + instance, + original_size, + desired_capacity, + ) = self.autoscaling_backend.terminate_instance(instance_id, should_decrement) + template = self.response_template(TERMINATE_INSTANCES_TEMPLATE) + return template.render( + instance=instance, + should_decrement=should_decrement, + original_size=original_size, + desired_capacity=desired_capacity, + timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), + ) + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -499,14 +575,31 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """{{ group.health_check_type }} 2013-05-06T17:47:15.107Z + {% if group.launch_config_name %} {{ group.launch_config_name }} + {% elif group.launch_template %} + + {{ group.launch_template.id }} + {{ group.launch_template_version }} + {{ group.launch_template.name }} + + {% endif %} {% for instance_state in group.instance_states %} {{ instance_state.health_status }} {{ instance_state.instance.placement }} {{ instance_state.instance.id }} + {{ instance_state.instance.instance_type }} + {% if group.launch_config_name %} {{ group.launch_config_name }} + {% elif group.launch_template %} + + {{ group.launch_template.id }} + {{ group.launch_template_version }} + {{ group.launch_template.name }} + + {% endif %} {{ instance_state.lifecycle_state }} {{ instance_state.protected_from_scale_in|string|lower }} @@ -592,7 +685,16 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """{{ instance_state.instance.autoscaling_group.name }} {{ instance_state.instance.placement }} {{ instance_state.instance.id }} + {{ instance_state.instance.instance_type }} + {% if instance_state.instance.autoscaling_group.launch_config_name %} {{ instance_state.instance.autoscaling_group.launch_config_name }} + {% elif instance_state.instance.autoscaling_group.launch_template %} + + {{ instance_state.instance.autoscaling_group.launch_template.id }} + {{ instance_state.instance.autoscaling_group.launch_template_version }} + {{ instance_state.instance.autoscaling_group.launch_template.name }} + + {% endif %} {{ instance_state.lifecycle_state }} {{ instance_state.protected_from_scale_in|string|lower }} @@ -705,3 +807,73 @@ SET_INSTANCE_PROTECTION_TEMPLATE = """ + + + {% for instance in standby_instances %} + + 12345678-1234-1234-1234-123456789012 + {{ group_name }} + {% if should_decrement %} + At {{ timestamp }} instance {{ instance.instance.id }} was moved to standby in response to a user request, shrinking the capacity from {{ original_size }} to {{ desired_capacity }}. + {% else %} + At {{ timestamp }} instance {{ instance.instance.id }} was moved to standby in response to a user request. + {% endif %} + Moving EC2 instance to Standby: {{ instance.instance.id }} + 50 + {{ timestamp }} +
{"Subnet ID":"??","Availability Zone":"{{ instance.instance.placement }}"}
+ InProgress +
+ {% endfor %} +
+
+ + 7c6e177f-f082-11e1-ac58-3714bEXAMPLE + +""" + +EXIT_STANDBY_TEMPLATE = """ + + + {% for instance in standby_instances %} + + 12345678-1234-1234-1234-123456789012 + {{ group_name }} + Moving EC2 instance out of Standby: {{ instance.instance.id }} + 30 + At {{ timestamp }} instance {{ instance.instance.id }} was moved out of standby in response to a user request, increasing the capacity from {{ original_size }} to {{ desired_capacity }}. + {{ timestamp }} +
{"Subnet ID":"??","Availability Zone":"{{ instance.instance.placement }}"}
+ PreInService +
+ {% endfor %} +
+
+ + 7c6e177f-f082-11e1-ac58-3714bEXAMPLE + +
""" + +TERMINATE_INSTANCES_TEMPLATE = """ + + + 35b5c464-0b63-2fc7-1611-467d4a7f2497EXAMPLE + {{ group_name }} + {% if should_decrement %} + At {{ timestamp }} instance {{ instance.instance.id }} was taken out of service in response to a user request, shrinking the capacity from {{ original_size }} to {{ desired_capacity }}. + {% else %} + At {{ timestamp }} instance {{ instance.instance.id }} was taken out of service in response to a user request. + {% endif %} + Terminating EC2 instance: {{ instance.instance.id }} + 0 + {{ timestamp }} +
{"Subnet ID":"??","Availability Zone":"{{ instance.instance.placement }}"}
+ InProgress +
+
+ + a1ba8fb9-31d6-4d9a-ace1-a7f76749df11EXAMPLE + +
""" diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 939952d5e..475ef3086 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -5,6 +5,8 @@ import time from collections import defaultdict import copy import datetime +from gzip import GzipFile + import docker import docker.errors import hashlib @@ -15,18 +17,17 @@ import json import re import zipfile import uuid -import functools import tarfile import calendar import threading import traceback import weakref -import requests.adapters +import requests.exceptions from boto3 import Session from moto.awslambda.policy import Policy -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, CloudFormationModel from moto.core.exceptions import RESTError from moto.iam.models import iam_backend from moto.iam.exceptions import IAMNotFoundException @@ -45,6 +46,7 @@ from moto.sqs import sqs_backends from moto.dynamodb2 import dynamodb_backends2 from moto.dynamodbstreams import dynamodbstreams_backends from moto.core import ACCOUNT_ID +from moto.utilities.docker_utilities import DockerModel logger = logging.getLogger(__name__) @@ -53,7 +55,6 @@ try: except ImportError: from backports.tempfile import TemporaryDirectory -_orig_adapter_send = requests.adapters.HTTPAdapter.send docker_3 = docker.__version__[0] >= "3" @@ -149,8 +150,9 @@ class _DockerDataVolumeContext: raise # multiple processes trying to use same volume? -class LambdaFunction(BaseModel): +class LambdaFunction(CloudFormationModel, DockerModel): def __init__(self, spec, region, validate_s3=True, version=1): + DockerModel.__init__(self) # required self.region = region self.code = spec["Code"] @@ -160,23 +162,9 @@ class LambdaFunction(BaseModel): self.run_time = spec["Runtime"] self.logs_backend = logs_backends[self.region] self.environment_vars = spec.get("Environment", {}).get("Variables", {}) - self.docker_client = docker.from_env() self.policy = None self.state = "Active" - - # Unfortunately mocking replaces this method w/o fallback enabled, so we - # need to replace it if we detect it's been mocked - if requests.adapters.HTTPAdapter.send != _orig_adapter_send: - _orig_get_adapter = self.docker_client.api.get_adapter - - def replace_adapter_send(*args, **kwargs): - adapter = _orig_get_adapter(*args, **kwargs) - - if isinstance(adapter, requests.adapters.HTTPAdapter): - adapter.send = functools.partial(_orig_adapter_send, adapter) - return adapter - - self.docker_client.api.get_adapter = replace_adapter_send + self.reserved_concurrency = spec.get("ReservedConcurrentExecutions", None) # optional self.description = spec.get("Description", "") @@ -216,7 +204,7 @@ class LambdaFunction(BaseModel): key = None try: # FIXME: does not validate bucket region - key = s3_backend.get_key(self.code["S3Bucket"], self.code["S3Key"]) + key = s3_backend.get_object(self.code["S3Bucket"], self.code["S3Key"]) except MissingBucket: if do_validate_s3(): raise InvalidParameterValueException( @@ -283,7 +271,7 @@ class LambdaFunction(BaseModel): return config def get_code(self): - return { + code = { "Code": { "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/{1}".format( self.region, self.code["S3Key"] @@ -292,6 +280,15 @@ class LambdaFunction(BaseModel): }, "Configuration": self.get_configuration(), } + if self.reserved_concurrency: + code.update( + { + "Concurrency": { + "ReservedConcurrentExecutions": self.reserved_concurrency + } + } + ) + return code def update_configuration(self, config_updates): for key, value in config_updates.items(): @@ -308,7 +305,7 @@ class LambdaFunction(BaseModel): elif key == "Timeout": self.timeout = value elif key == "VpcConfig": - self.vpc_config = value + self._vpc_config = value elif key == "Environment": self.environment_vars = value["Variables"] @@ -342,7 +339,7 @@ class LambdaFunction(BaseModel): key = None try: # FIXME: does not validate bucket region - key = s3_backend.get_key( + key = s3_backend.get_object( updated_spec["S3Bucket"], updated_spec["S3Key"] ) except MissingBucket: @@ -379,25 +376,32 @@ class LambdaFunction(BaseModel): event = dict() if context is None: context = {} + output = None try: # TODO: I believe we can keep the container running and feed events as needed # also need to hook it up to the other services so it can make kws/s3 etc calls # Should get invoke_id /RequestId from invocation env_vars = { + "_HANDLER": self.handler, + "AWS_EXECUTION_ENV": "AWS_Lambda_{}".format(self.run_time), "AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout, "AWS_LAMBDA_FUNCTION_NAME": self.function_name, "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size, "AWS_LAMBDA_FUNCTION_VERSION": self.version, "AWS_REGION": self.region, + "AWS_ACCESS_KEY_ID": "role-account-id", + "AWS_SECRET_ACCESS_KEY": "role-secret-key", + "AWS_SESSION_TOKEN": "session-token", } env_vars.update(self.environment_vars) - container = output = exit_code = None + container = exit_code = None log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON) with _DockerDataVolumeContext(self) as data_vol: try: + self.docker_client.ping() # Verify Docker is running run_kwargs = ( dict(links={"motoserver": "motoserver"}) if settings.TEST_SERVER_MODE @@ -455,24 +459,31 @@ class LambdaFunction(BaseModel): # We only care about the response from the lambda # Which is the last line of the output, according to https://github.com/lambci/docker-lambda/issues/25 - output = output.splitlines()[-1] - return output, False + resp = output.splitlines()[-1] + logs = os.linesep.join( + [line for line in self.convert(output).splitlines()[:-1]] + ) + return resp, False, logs + except docker.errors.DockerException as e: + # Docker itself is probably not running - there will be no Lambda-logs to handle + return "error running docker: {}".format(e), True, "" except BaseException as e: traceback.print_exc() - return "error running lambda: {}".format(e), True + logs = os.linesep.join( + [line for line in self.convert(output).splitlines()[:-1]] + ) + return "error running lambda: {}".format(e), True, logs def invoke(self, body, request_headers, response_headers): - payload = dict() if body: body = json.loads(body) # Get the invocation type: - res, errored = self._invoke_lambda(code=self.code, event=body) + res, errored, logs = self._invoke_lambda(code=self.code, event=body) if request_headers.get("x-amz-invocation-type") == "RequestResponse": - encoded = base64.b64encode(res.encode("utf-8")) + encoded = base64.b64encode(logs.encode("utf-8")) response_headers["x-amz-log-result"] = encoded.decode("utf-8") - payload["result"] = response_headers["x-amz-log-result"] result = res.encode("utf-8") else: result = res @@ -481,11 +492,29 @@ class LambdaFunction(BaseModel): return result + @staticmethod + def cloudformation_name_type(): + return "FunctionName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html + return "AWS::Lambda::Function" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] + optional_properties = ( + "Description", + "MemorySize", + "Publish", + "Timeout", + "VpcConfig", + "Environment", + "ReservedConcurrentExecutions", + ) # required spec = { @@ -495,9 +524,7 @@ class LambdaFunction(BaseModel): "Role": properties["Role"], "Runtime": properties["Runtime"], } - optional_properties = ( - "Description MemorySize Publish Timeout VpcConfig Environment".split() - ) + # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the # default logic for prop in optional_properties: @@ -545,43 +572,66 @@ class LambdaFunction(BaseModel): lambda_backends[region].delete_function(self.function_name) -class EventSourceMapping(BaseModel): +class EventSourceMapping(CloudFormationModel): def __init__(self, spec): # required - self.function_arn = spec["FunctionArn"] + self.function_name = spec["FunctionName"] self.event_source_arn = spec["EventSourceArn"] + + # optional + self.batch_size = spec.get("BatchSize") + self.starting_position = spec.get("StartingPosition", "TRIM_HORIZON") + self.enabled = spec.get("Enabled", True) + self.starting_position_timestamp = spec.get("StartingPositionTimestamp", None) + + self.function_arn = spec["FunctionArn"] self.uuid = str(uuid.uuid4()) self.last_modified = time.mktime(datetime.datetime.utcnow().timetuple()) - # BatchSize service default/max mapping - batch_size_map = { + def _get_service_source_from_arn(self, event_source_arn): + return event_source_arn.split(":")[2].lower() + + def _validate_event_source(self, event_source_arn): + valid_services = ("dynamodb", "kinesis", "sqs") + service = self._get_service_source_from_arn(event_source_arn) + return True if service in valid_services else False + + @property + def event_source_arn(self): + return self._event_source_arn + + @event_source_arn.setter + def event_source_arn(self, event_source_arn): + if not self._validate_event_source(event_source_arn): + raise ValueError( + "InvalidParameterValueException", "Unsupported event source type" + ) + self._event_source_arn = event_source_arn + + @property + def batch_size(self): + return self._batch_size + + @batch_size.setter + def batch_size(self, batch_size): + batch_size_service_map = { "kinesis": (100, 10000), "dynamodb": (100, 1000), "sqs": (10, 10), } - source_type = self.event_source_arn.split(":")[2].lower() - batch_size_entry = batch_size_map.get(source_type) - if batch_size_entry: - # Use service default if not provided - batch_size = int(spec.get("BatchSize", batch_size_entry[0])) - if batch_size > batch_size_entry[1]: - raise ValueError( - "InvalidParameterValueException", - "BatchSize {} exceeds the max of {}".format( - batch_size, batch_size_entry[1] - ), - ) - else: - self.batch_size = batch_size - else: - raise ValueError( - "InvalidParameterValueException", "Unsupported event source type" - ) - # optional - self.starting_position = spec.get("StartingPosition", "TRIM_HORIZON") - self.enabled = spec.get("Enabled", True) - self.starting_position_timestamp = spec.get("StartingPositionTimestamp", None) + source_type = self._get_service_source_from_arn(self.event_source_arn) + batch_size_for_source = batch_size_service_map[source_type] + + if batch_size is None: + self._batch_size = batch_size_for_source[0] + elif batch_size > batch_size_for_source[1]: + error_message = "BatchSize {} exceeds the max of {}".format( + batch_size, batch_size_for_source[1] + ) + raise ValueError("InvalidParameterValueException", error_message) + else: + self._batch_size = int(batch_size) def get_configuration(self): return { @@ -595,32 +645,72 @@ class EventSourceMapping(BaseModel): "StateTransitionReason": "User initiated", } + def delete(self, region_name): + lambda_backend = lambda_backends[region_name] + lambda_backend.delete_event_source_mapping(self.uuid) + + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + return "AWS::Lambda::EventSourceMapping" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - func = lambda_backends[region_name].get_function(properties["FunctionName"]) - spec = { - "FunctionArn": func.function_arn, - "EventSourceArn": properties["EventSourceArn"], - "StartingPosition": properties["StartingPosition"], - "BatchSize": properties.get("BatchSize", 100), - } - optional_properties = "BatchSize Enabled StartingPositionTimestamp".split() - for prop in optional_properties: - if prop in properties: - spec[prop] = properties[prop] - return EventSourceMapping(spec) + lambda_backend = lambda_backends[region_name] + return lambda_backend.create_event_source_mapping(properties) + + @classmethod + def update_from_cloudformation_json( + cls, new_resource_name, cloudformation_json, original_resource, region_name + ): + properties = cloudformation_json["Properties"] + event_source_uuid = original_resource.uuid + lambda_backend = lambda_backends[region_name] + return lambda_backend.update_event_source_mapping(event_source_uuid, properties) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + lambda_backend = lambda_backends[region_name] + esms = lambda_backend.list_event_source_mappings( + event_source_arn=properties["EventSourceArn"], + function_name=properties["FunctionName"], + ) + + for esm in esms: + if esm.uuid == resource_name: + esm.delete(region_name) + + @property + def physical_resource_id(self): + return self.uuid -class LambdaVersion(BaseModel): +class LambdaVersion(CloudFormationModel): def __init__(self, spec): self.version = spec["Version"] def __repr__(self): return str(self.logical_resource_id) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-version.html + return "AWS::Lambda::Version" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -812,7 +902,7 @@ class LambdaBackend(BaseBackend): ) # Validate function name - func = self._lambdas.get_function_by_name_or_arn(spec.pop("FunctionName", "")) + func = self._lambdas.get_function_by_name_or_arn(spec.get("FunctionName", "")) if not func: raise RESTError("ResourceNotFoundException", "Invalid FunctionName") @@ -870,18 +960,20 @@ class LambdaBackend(BaseBackend): def update_event_source_mapping(self, uuid, spec): esm = self.get_event_source_mapping(uuid) - if esm: - if spec.get("FunctionName"): - func = self._lambdas.get_function_by_name_or_arn( - spec.get("FunctionName") - ) + if not esm: + return False + + for key, value in spec.items(): + if key == "FunctionName": + func = self._lambdas.get_function_by_name_or_arn(spec[key]) esm.function_arn = func.function_arn - if "BatchSize" in spec: - esm.batch_size = spec["BatchSize"] - if "Enabled" in spec: - esm.enabled = spec["Enabled"] - return esm - return False + elif key == "BatchSize": + esm.batch_size = spec[key] + elif key == "Enabled": + esm.enabled = spec[key] + + esm.last_modified = time.mktime(datetime.datetime.utcnow().timetuple()) + return esm def list_event_source_mappings(self, event_source_arn, function_name): esms = list(self._event_source_mappings.values()) @@ -981,7 +1073,29 @@ class LambdaBackend(BaseBackend): ] } func = self._lambdas.get_arn(function_arn) - func.invoke(json.dumps(event), {}, {}) + return func.invoke(json.dumps(event), {}, {}) + + def send_log_event( + self, function_arn, filter_name, log_group_name, log_stream_name, log_events + ): + data = { + "messageType": "DATA_MESSAGE", + "owner": ACCOUNT_ID, + "logGroup": log_group_name, + "logStream": log_stream_name, + "subscriptionFilters": [filter_name], + "logEvents": log_events, + } + + output = io.BytesIO() + with GzipFile(fileobj=output, mode="w") as f: + f.write(json.dumps(data, separators=(",", ":")).encode("utf-8")) + payload_gz_encoded = base64.b64encode(output.getvalue()).decode("utf-8") + + event = {"awslogs": {"data": payload_gz_encoded}} + + func = self._lambdas.get_arn(function_arn) + return func.invoke(json.dumps(event), {}, {}) def list_tags(self, resource): return self.get_function_by_arn(resource).tags @@ -1006,11 +1120,11 @@ class LambdaBackend(BaseBackend): return True return False - def add_policy_statement(self, function_name, raw): + def add_permission(self, function_name, raw): fn = self.get_function(function_name) fn.policy.add_statement(raw) - def del_policy_statement(self, function_name, sid, revision=""): + def remove_permission(self, function_name, sid, revision=""): fn = self.get_function(function_name) fn.policy.del_statement(sid, revision) @@ -1044,9 +1158,23 @@ class LambdaBackend(BaseBackend): if fn: payload = fn.invoke(body, headers, response_headers) response_headers["Content-Length"] = str(len(payload)) - return response_headers, payload + return payload else: - return response_headers, None + return None + + def put_function_concurrency(self, function_name, reserved_concurrency): + fn = self.get_function(function_name) + fn.reserved_concurrency = reserved_concurrency + return fn.reserved_concurrency + + def delete_function_concurrency(self, function_name): + fn = self.get_function(function_name) + fn.reserved_concurrency = None + return fn.reserved_concurrency + + def get_function_concurrency(self, function_name): + fn = self.get_function(function_name) + return fn.reserved_concurrency def do_validate_s3(): diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index bac670b8e..6447cde13 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -141,12 +141,25 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + def function_concurrency(self, request, full_url, headers): + http_method = request.method + self.setup_class(request, full_url, headers) + + if http_method == "GET": + return self._get_function_concurrency(request) + elif http_method == "DELETE": + return self._delete_function_concurrency(request) + elif http_method == "PUT": + return self._put_function_concurrency(request) + else: + raise ValueError("Cannot handle request") + def _add_policy(self, request, full_url, headers): path = request.path if hasattr(request, "path") else path_url(request.url) function_name = path.split("/")[-2] if self.lambda_backend.get_function(function_name): statement = self.body - self.lambda_backend.add_policy_statement(function_name, statement) + self.lambda_backend.add_permission(function_name, statement) return 200, {}, json.dumps({"Statement": statement}) else: return 404, {}, "{}" @@ -166,9 +179,7 @@ class LambdaResponse(BaseResponse): statement_id = path.split("/")[-1].split("?")[0] revision = querystring.get("RevisionId", "") if self.lambda_backend.get_function(function_name): - self.lambda_backend.del_policy_statement( - function_name, statement_id, revision - ) + self.lambda_backend.remove_permission(function_name, statement_id, revision) return 204, {}, "{}" else: return 404, {}, "{}" @@ -180,11 +191,19 @@ class LambdaResponse(BaseResponse): function_name = unquote(self.path.rsplit("/", 2)[-2]) qualifier = self._get_param("qualifier") - response_header, payload = self.lambda_backend.invoke( + payload = self.lambda_backend.invoke( function_name, qualifier, self.body, self.headers, response_headers ) if payload: - return 202, response_headers, payload + if request.headers.get("X-Amz-Invocation-Type") == "Event": + status_code = 202 + elif request.headers.get("X-Amz-Invocation-Type") == "DryRun": + status_code = 204 + else: + if request.headers.get("X-Amz-Log-Type") != "Tail": + del response_headers["x-amz-log-result"] + status_code = 200 + return status_code, response_headers, payload else: return 404, response_headers, "{}" @@ -295,7 +314,7 @@ class LambdaResponse(BaseResponse): code["Configuration"]["FunctionArn"] += ":$LATEST" return 200, {}, json.dumps(code) else: - return 404, {}, "{}" + return 404, {"x-amzn-ErrorType": "ResourceNotFoundException"}, "{}" def _get_aws_region(self, full_url): region = self.region_regex.search(full_url) @@ -353,3 +372,38 @@ class LambdaResponse(BaseResponse): return 200, {}, json.dumps(resp) else: return 404, {}, "{}" + + def _get_function_concurrency(self, request): + path_function_name = self.path.rsplit("/", 2)[-2] + function_name = self.lambda_backend.get_function(path_function_name) + + if function_name is None: + return 404, {}, "{}" + + resp = self.lambda_backend.get_function_concurrency(path_function_name) + return 200, {}, json.dumps({"ReservedConcurrentExecutions": resp}) + + def _delete_function_concurrency(self, request): + path_function_name = self.path.rsplit("/", 2)[-2] + function_name = self.lambda_backend.get_function(path_function_name) + + if function_name is None: + return 404, {}, "{}" + + self.lambda_backend.delete_function_concurrency(path_function_name) + + return 204, {}, "{}" + + def _put_function_concurrency(self, request): + path_function_name = self.path.rsplit("/", 2)[-2] + function = self.lambda_backend.get_function(path_function_name) + + if function is None: + return 404, {}, "{}" + + concurrency = self._get_param("ReservedConcurrentExecutions", None) + resp = self.lambda_backend.put_function_concurrency( + path_function_name, concurrency + ) + + return 200, {}, json.dumps({"ReservedConcurrentExecutions": resp}) diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index c25e58dba..03cedc5e4 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -19,4 +19,5 @@ url_paths = { r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/policy/?$": response.policy, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/configuration/?$": response.configuration, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/code/?$": response.code, + r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/concurrency/?$": response.function_concurrency, } diff --git a/moto/backends.py b/moto/backends.py index a358b8fd2..c8bac72fc 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -1,122 +1,113 @@ from __future__ import unicode_literals -from moto.acm import acm_backends -from moto.apigateway import apigateway_backends -from moto.athena import athena_backends -from moto.autoscaling import autoscaling_backends -from moto.awslambda import lambda_backends -from moto.batch import batch_backends -from moto.cloudformation import cloudformation_backends -from moto.cloudwatch import cloudwatch_backends -from moto.codecommit import codecommit_backends -from moto.codepipeline import codepipeline_backends -from moto.cognitoidentity import cognitoidentity_backends -from moto.cognitoidp import cognitoidp_backends -from moto.config import config_backends -from moto.core import moto_api_backends -from moto.datapipeline import datapipeline_backends -from moto.datasync import datasync_backends -from moto.dynamodb import dynamodb_backends -from moto.dynamodb2 import dynamodb_backends2 -from moto.dynamodbstreams import dynamodbstreams_backends -from moto.ec2 import ec2_backends -from moto.ec2_instance_connect import ec2_instance_connect_backends -from moto.ecr import ecr_backends -from moto.ecs import ecs_backends -from moto.elb import elb_backends -from moto.elbv2 import elbv2_backends -from moto.emr import emr_backends -from moto.events import events_backends -from moto.glacier import glacier_backends -from moto.glue import glue_backends -from moto.iam import iam_backends -from moto.instance_metadata import instance_metadata_backends -from moto.iot import iot_backends -from moto.iotdata import iotdata_backends -from moto.kinesis import kinesis_backends -from moto.kms import kms_backends -from moto.logs import logs_backends -from moto.opsworks import opsworks_backends -from moto.organizations import organizations_backends -from moto.polly import polly_backends -from moto.rds2 import rds2_backends -from moto.redshift import redshift_backends -from moto.resourcegroups import resourcegroups_backends -from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends -from moto.route53 import route53_backends -from moto.s3 import s3_backends -from moto.secretsmanager import secretsmanager_backends -from moto.ses import ses_backends -from moto.sns import sns_backends -from moto.sqs import sqs_backends -from moto.ssm import ssm_backends -from moto.stepfunctions import stepfunction_backends -from moto.sts import sts_backends -from moto.swf import swf_backends -from moto.xray import xray_backends +import importlib BACKENDS = { - "acm": acm_backends, - "apigateway": apigateway_backends, - "athena": athena_backends, - "autoscaling": autoscaling_backends, - "batch": batch_backends, - "cloudformation": cloudformation_backends, - "cloudwatch": cloudwatch_backends, - "codecommit": codecommit_backends, - "codepipeline": codepipeline_backends, - "cognito-identity": cognitoidentity_backends, - "cognito-idp": cognitoidp_backends, - "config": config_backends, - "datapipeline": datapipeline_backends, - "datasync": datasync_backends, - "dynamodb": dynamodb_backends, - "dynamodb2": dynamodb_backends2, - "dynamodbstreams": dynamodbstreams_backends, - "ec2": ec2_backends, - "ec2_instance_connect": ec2_instance_connect_backends, - "ecr": ecr_backends, - "ecs": ecs_backends, - "elb": elb_backends, - "elbv2": elbv2_backends, - "events": events_backends, - "emr": emr_backends, - "glacier": glacier_backends, - "glue": glue_backends, - "iam": iam_backends, - "moto_api": moto_api_backends, - "instance_metadata": instance_metadata_backends, - "logs": logs_backends, - "kinesis": kinesis_backends, - "kms": kms_backends, - "opsworks": opsworks_backends, - "organizations": organizations_backends, - "polly": polly_backends, - "redshift": redshift_backends, - "resource-groups": resourcegroups_backends, - "rds": rds2_backends, - "s3": s3_backends, - "s3bucket_path": s3_backends, - "ses": ses_backends, - "secretsmanager": secretsmanager_backends, - "sns": sns_backends, - "sqs": sqs_backends, - "ssm": ssm_backends, - "stepfunctions": stepfunction_backends, - "sts": sts_backends, - "swf": swf_backends, - "route53": route53_backends, - "lambda": lambda_backends, - "xray": xray_backends, - "resourcegroupstaggingapi": resourcegroupstaggingapi_backends, - "iot": iot_backends, - "iot-data": iotdata_backends, + "acm": ("acm", "acm_backends"), + "apigateway": ("apigateway", "apigateway_backends"), + "athena": ("athena", "athena_backends"), + "applicationautoscaling": ( + "applicationautoscaling", + "applicationautoscaling_backends", + ), + "autoscaling": ("autoscaling", "autoscaling_backends"), + "batch": ("batch", "batch_backends"), + "cloudformation": ("cloudformation", "cloudformation_backends"), + "cloudwatch": ("cloudwatch", "cloudwatch_backends"), + "codecommit": ("codecommit", "codecommit_backends"), + "codepipeline": ("codepipeline", "codepipeline_backends"), + "cognito-identity": ("cognitoidentity", "cognitoidentity_backends"), + "cognito-idp": ("cognitoidp", "cognitoidp_backends"), + "config": ("config", "config_backends"), + "datapipeline": ("datapipeline", "datapipeline_backends"), + "datasync": ("datasync", "datasync_backends"), + "dynamodb": ("dynamodb", "dynamodb_backends"), + "dynamodb2": ("dynamodb2", "dynamodb_backends2"), + "dynamodbstreams": ("dynamodbstreams", "dynamodbstreams_backends"), + "ec2": ("ec2", "ec2_backends"), + "ec2instanceconnect": ("ec2instanceconnect", "ec2instanceconnect_backends"), + "ecr": ("ecr", "ecr_backends"), + "ecs": ("ecs", "ecs_backends"), + "elasticbeanstalk": ("elasticbeanstalk", "eb_backends"), + "elb": ("elb", "elb_backends"), + "elbv2": ("elbv2", "elbv2_backends"), + "emr": ("emr", "emr_backends"), + "events": ("events", "events_backends"), + "glacier": ("glacier", "glacier_backends"), + "glue": ("glue", "glue_backends"), + "iam": ("iam", "iam_backends"), + "instance_metadata": ("instance_metadata", "instance_metadata_backends"), + "iot": ("iot", "iot_backends"), + "iot-data": ("iotdata", "iotdata_backends"), + "kinesis": ("kinesis", "kinesis_backends"), + "kms": ("kms", "kms_backends"), + "lambda": ("awslambda", "lambda_backends"), + "logs": ("logs", "logs_backends"), + "managedblockchain": ("managedblockchain", "managedblockchain_backends"), + "moto_api": ("core", "moto_api_backends"), + "opsworks": ("opsworks", "opsworks_backends"), + "organizations": ("organizations", "organizations_backends"), + "polly": ("polly", "polly_backends"), + "ram": ("ram", "ram_backends"), + "rds": ("rds2", "rds2_backends"), + "redshift": ("redshift", "redshift_backends"), + "resource-groups": ("resourcegroups", "resourcegroups_backends"), + "resourcegroupstaggingapi": ( + "resourcegroupstaggingapi", + "resourcegroupstaggingapi_backends", + ), + "route53": ("route53", "route53_backends"), + "s3": ("s3", "s3_backends"), + "s3bucket_path": ("s3", "s3_backends"), + "sagemaker": ("sagemaker", "sagemaker_backends"), + "secretsmanager": ("secretsmanager", "secretsmanager_backends"), + "ses": ("ses", "ses_backends"), + "sns": ("sns", "sns_backends"), + "sqs": ("sqs", "sqs_backends"), + "ssm": ("ssm", "ssm_backends"), + "stepfunctions": ("stepfunctions", "stepfunction_backends"), + "sts": ("sts", "sts_backends"), + "swf": ("swf", "swf_backends"), + "transcribe": ("transcribe", "transcribe_backends"), + "xray": ("xray", "xray_backends"), + "kinesisvideo": ("kinesisvideo", "kinesisvideo_backends"), + "kinesis-video-archived-media": ( + "kinesisvideoarchivedmedia", + "kinesisvideoarchivedmedia_backends", + ), + "forecast": ("forecast", "forecast_backends"), } +def _import_backend(module_name, backends_name): + module = importlib.import_module("moto." + module_name) + return getattr(module, backends_name) + + +def backends(): + for module_name, backends_name in BACKENDS.values(): + yield _import_backend(module_name, backends_name) + + +def named_backends(): + for name, (module_name, backends_name) in BACKENDS.items(): + yield name, _import_backend(module_name, backends_name) + + +def get_backend(name): + module_name, backends_name = BACKENDS[name] + return _import_backend(module_name, backends_name) + + +def search_backend(predicate): + for name, backend in named_backends(): + if predicate(backend): + return name + + def get_model(name, region_name): - for backends in BACKENDS.values(): - for region, backend in backends.items(): + for backends_ in backends(): + for region, backend in backends_.items(): if region == region_name: models = getattr(backend.__class__, "__models__", {}) if name in models: diff --git a/moto/batch/exceptions.py b/moto/batch/exceptions.py index c411f3fce..5d3ea3fd0 100644 --- a/moto/batch/exceptions.py +++ b/moto/batch/exceptions.py @@ -1,40 +1,24 @@ from __future__ import unicode_literals -import json - - -class AWSError(Exception): - CODE = None - STATUS = 400 - - def __init__(self, message, code=None, status=None): - self.message = message - self.code = code if code is not None else self.CODE - self.status = status if status is not None else self.STATUS - - def response(self): - return ( - json.dumps({"__type": self.code, "message": self.message}), - dict(status=self.status), - ) +from moto.core.exceptions import AWSError class InvalidRequestException(AWSError): - CODE = "InvalidRequestException" + TYPE = "InvalidRequestException" class InvalidParameterValueException(AWSError): - CODE = "InvalidParameterValue" + TYPE = "InvalidParameterValue" class ValidationError(AWSError): - CODE = "ValidationError" + TYPE = "ValidationError" class InternalFailure(AWSError): - CODE = "InternalFailure" + TYPE = "InternalFailure" STATUS = 500 class ClientException(AWSError): - CODE = "ClientException" + TYPE = "ClientException" STATUS = 400 diff --git a/moto/batch/models.py b/moto/batch/models.py index fc35f2997..1338beb0c 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals import re -import requests.adapters from itertools import cycle import six import datetime @@ -8,12 +7,11 @@ import time import uuid import logging import docker -import functools import threading import dateutil.parser from boto3 import Session -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.iam import iam_backends from moto.ec2 import ec2_backends from moto.ecs import ecs_backends @@ -30,8 +28,8 @@ from moto.ec2.exceptions import InvalidSubnetIdError from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID +from moto.utilities.docker_utilities import DockerModel -_orig_adapter_send = requests.adapters.HTTPAdapter.send logger = logging.getLogger(__name__) COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile( r"^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$" @@ -42,7 +40,7 @@ def datetime2int(date): return int(time.mktime(date.timetuple())) -class ComputeEnvironment(BaseModel): +class ComputeEnvironment(CloudFormationModel): def __init__( self, compute_environment_name, @@ -76,6 +74,15 @@ class ComputeEnvironment(BaseModel): def physical_resource_id(self): return self.arn + @staticmethod + def cloudformation_name_type(): + return "ComputeEnvironmentName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html + return "AWS::Batch::ComputeEnvironment" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -95,7 +102,7 @@ class ComputeEnvironment(BaseModel): return backend.get_compute_environment_by_arn(arn) -class JobQueue(BaseModel): +class JobQueue(CloudFormationModel): def __init__( self, name, priority, state, environments, env_order_json, region_name ): @@ -139,6 +146,15 @@ class JobQueue(BaseModel): def physical_resource_id(self): return self.arn + @staticmethod + def cloudformation_name_type(): + return "JobQueueName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html + return "AWS::Batch::JobQueue" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -164,7 +180,7 @@ class JobQueue(BaseModel): return backend.get_job_queue_by_arn(arn) -class JobDefinition(BaseModel): +class JobDefinition(CloudFormationModel): def __init__( self, name, @@ -264,6 +280,15 @@ class JobDefinition(BaseModel): def physical_resource_id(self): return self.arn + @staticmethod + def cloudformation_name_type(): + return "JobDefinitionName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html + return "AWS::Batch::JobDefinition" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -284,7 +309,7 @@ class JobDefinition(BaseModel): return backend.get_job_definition_by_arn(arn) -class Job(threading.Thread, BaseModel): +class Job(threading.Thread, BaseModel, DockerModel): def __init__(self, name, job_def, job_queue, log_backend, container_overrides): """ Docker Job @@ -297,11 +322,12 @@ class Job(threading.Thread, BaseModel): :type log_backend: moto.logs.models.LogsBackend """ threading.Thread.__init__(self) + DockerModel.__init__(self) self.job_name = name self.job_id = str(uuid.uuid4()) self.job_definition = job_def - self.container_overrides = container_overrides + self.container_overrides = container_overrides or {} self.job_queue = job_queue self.job_state = "SUBMITTED" # One of SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED self.job_queue.jobs.append(self) @@ -315,22 +341,8 @@ class Job(threading.Thread, BaseModel): self.daemon = True self.name = "MOTO-BATCH-" + self.job_id - self.docker_client = docker.from_env() self._log_backend = log_backend - - # Unfortunately mocking replaces this method w/o fallback enabled, so we - # need to replace it if we detect it's been mocked - if requests.adapters.HTTPAdapter.send != _orig_adapter_send: - _orig_get_adapter = self.docker_client.api.get_adapter - - def replace_adapter_send(*args, **kwargs): - adapter = _orig_get_adapter(*args, **kwargs) - - if isinstance(adapter, requests.adapters.HTTPAdapter): - adapter.send = functools.partial(_orig_adapter_send, adapter) - return adapter - - self.docker_client.api.get_adapter = replace_adapter_send + self.log_stream_name = None def describe(self): result = { @@ -338,10 +350,11 @@ class Job(threading.Thread, BaseModel): "jobId": self.job_id, "jobName": self.job_name, "jobQueue": self.job_queue.arn, - "startedAt": datetime2int(self.job_started_at), "status": self.job_state, "dependsOn": [], } + if result["status"] not in ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING"]: + result["startedAt"] = datetime2int(self.job_started_at) if self.job_stopped: result["stoppedAt"] = datetime2int(self.job_stopped_at) result["container"] = {} @@ -379,7 +392,6 @@ class Job(threading.Thread, BaseModel): """ try: self.job_state = "PENDING" - time.sleep(1) image = self.job_definition.container_properties.get( "image", "alpine:latest" @@ -412,8 +424,8 @@ class Job(threading.Thread, BaseModel): self.job_state = "RUNNABLE" # TODO setup ecs container instance - time.sleep(1) + self.job_started_at = datetime.datetime.now() self.job_state = "STARTING" log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON) container = self.docker_client.containers.run( @@ -427,58 +439,24 @@ class Job(threading.Thread, BaseModel): privileged=privileged, ) self.job_state = "RUNNING" - self.job_started_at = datetime.datetime.now() try: - # Log collection - logs_stdout = [] - logs_stderr = [] container.reload() - - # Dodgy hack, we can only check docker logs once a second, but we want to loop more - # so we can stop if asked to in a quick manner, should all go away if we go async - # There also be some dodgyness when sending an integer to docker logs and some - # events seem to be duplicated. - now = datetime.datetime.now() - i = 1 while container.status == "running" and not self.stop: - time.sleep(0.15) - if i % 10 == 0: - logs_stderr.extend( - container.logs( - stdout=False, - stderr=True, - timestamps=True, - since=datetime2int(now), - ) - .decode() - .split("\n") - ) - logs_stdout.extend( - container.logs( - stdout=True, - stderr=False, - timestamps=True, - since=datetime2int(now), - ) - .decode() - .split("\n") - ) - now = datetime.datetime.now() - container.reload() - i += 1 + container.reload() # Container should be stopped by this point... unless asked to stop if container.status == "running": container.kill() - self.job_stopped_at = datetime.datetime.now() - # Get final logs + # Log collection + logs_stdout = [] + logs_stderr = [] logs_stderr.extend( container.logs( stdout=False, stderr=True, timestamps=True, - since=datetime2int(now), + since=datetime2int(self.job_started_at), ) .decode() .split("\n") @@ -488,14 +466,12 @@ class Job(threading.Thread, BaseModel): stdout=True, stderr=False, timestamps=True, - since=datetime2int(now), + since=datetime2int(self.job_started_at), ) .decode() .split("\n") ) - self.job_state = "SUCCEEDED" if not self.stop else "FAILED" - # Process logs logs_stdout = [x for x in logs_stdout if len(x) > 0] logs_stderr = [x for x in logs_stderr if len(x) > 0] @@ -503,7 +479,10 @@ class Job(threading.Thread, BaseModel): for line in logs_stdout + logs_stderr: date, line = line.split(" ", 1) date = dateutil.parser.parse(date) - date = int(date.timestamp()) + # TODO: Replace with int(date.timestamp()) once we yeet Python2 out of the window + date = int( + (time.mktime(date.timetuple()) + date.microsecond / 1000000.0) + ) logs.append({"timestamp": date, "message": line.strip()}) # Send to cloudwatch @@ -516,6 +495,8 @@ class Job(threading.Thread, BaseModel): self._log_backend.create_log_stream(log_group, stream_name) self._log_backend.put_log_events(log_group, stream_name, logs, None) + self.job_state = "SUCCEEDED" if not self.stop else "FAILED" + except Exception as err: logger.error( "Failed to run AWS Batch container {0}. Error {1}".format( diff --git a/moto/batch/utils.py b/moto/batch/utils.py index ce9b2ffe8..d9f79e236 100644 --- a/moto/batch/utils.py +++ b/moto/batch/utils.py @@ -21,6 +21,14 @@ def lowercase_first_key(some_dict): new_dict = {} for key, value in some_dict.items(): new_key = key[0].lower() + key[1:] - new_dict[new_key] = value + try: + if isinstance(value, dict): + new_dict[new_key] = lowercase_first_key(value) + elif all([isinstance(v, dict) for v in value]): + new_dict[new_key] = [lowercase_first_key(v) for v in value] + else: + new_dict[new_key] = value + except TypeError: + new_dict[new_key] = value return new_dict diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index b32d63b32..d3fb2870d 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -8,6 +8,7 @@ from boto3 import Session from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_without_milliseconds from .parsing import ResourceMap, OutputMap from .utils import ( @@ -218,7 +219,12 @@ class FakeStack(BaseModel): self.stack_id = stack_id self.name = name self.template = template - self._parse_template() + if template != {}: + self._parse_template() + self.description = self.template_dict.get("Description") + else: + self.template_dict = {} + self.description = None self.parameters = parameters self.region_name = region_name self.notification_arns = notification_arns if notification_arns else [] @@ -234,12 +240,16 @@ class FakeStack(BaseModel): "CREATE_IN_PROGRESS", resource_status_reason="User Initiated" ) - self.description = self.template_dict.get("Description") self.cross_stack_resources = cross_stack_resources or {} self.resource_map = self._create_resource_map() self.output_map = self._create_output_map() - self._add_stack_event("CREATE_COMPLETE") - self.status = "CREATE_COMPLETE" + if create_change_set: + self.status = "CREATE_COMPLETE" + self.execution_status = "AVAILABLE" + else: + self.create_resources() + self._add_stack_event("CREATE_COMPLETE") + self.creation_time = datetime.utcnow() def _create_resource_map(self): resource_map = ResourceMap( @@ -251,7 +261,7 @@ class FakeStack(BaseModel): self.template_dict, self.cross_stack_resources, ) - resource_map.create() + resource_map.load() return resource_map def _create_output_map(self): @@ -259,6 +269,10 @@ class FakeStack(BaseModel): output_map.create() return output_map + @property + def creation_time_iso_8601(self): + return iso_8601_datetime_without_milliseconds(self.creation_time) + def _add_stack_event( self, resource_status, resource_status_reason=None, resource_properties=None ): @@ -301,8 +315,8 @@ class FakeStack(BaseModel): yaml.add_multi_constructor("", yaml_tag_constructor) try: self.template_dict = yaml.load(self.template, Loader=yaml.Loader) - except yaml.parser.ParserError: - self.template_dict = json.loads(self.template, Loader=yaml.Loader) + except (yaml.parser.ParserError, yaml.scanner.ScannerError): + self.template_dict = json.loads(self.template) @property def stack_parameters(self): @@ -320,6 +334,12 @@ class FakeStack(BaseModel): def exports(self): return self.output_map.exports + def create_resources(self): + self.resource_map.create(self.template_dict) + # Set the description of the stack + self.description = self.template_dict.get("Description") + self.status = "CREATE_COMPLETE" + def update(self, template, role_arn=None, parameters=None, tags=None): self._add_stack_event( "UPDATE_IN_PROGRESS", resource_status_reason="User Initiated" @@ -384,6 +404,9 @@ class FakeChangeSet(FakeStack): self.change_set_id = change_set_id self.change_set_name = change_set_name self.changes = self.diff(template=template, parameters=parameters) + if self.description is None: + self.description = self.template_dict.get("Description") + self.creation_time = datetime.utcnow() def diff(self, template, parameters=None): self.template = template @@ -426,6 +449,16 @@ class FakeEvent(BaseModel): self.event_id = uuid.uuid4() +def filter_stacks(all_stacks, status_filter): + filtered_stacks = [] + if not status_filter: + return all_stacks + for stack in all_stacks: + if stack.status in status_filter: + filtered_stacks.append(stack) + return filtered_stacks + + class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() @@ -574,8 +607,8 @@ class CloudFormationBackend(BaseBackend): if stack is None: raise ValidationError(stack_name) else: - stack_id = generate_stack_id(stack_name) - stack_template = template + stack_id = generate_stack_id(stack_name, region_name) + stack_template = {} change_set_id = generate_changeset_id(change_set_name, region_name) new_change_set = FakeChangeSet( @@ -630,10 +663,14 @@ class CloudFormationBackend(BaseBackend): if stack is None: raise ValidationError(stack_name) if stack.events[-1].resource_status == "REVIEW_IN_PROGRESS": + stack._add_stack_event( + "CREATE_IN_PROGRESS", resource_status_reason="User Initiated" + ) stack._add_stack_event("CREATE_COMPLETE") else: stack._add_stack_event("UPDATE_IN_PROGRESS") stack._add_stack_event("UPDATE_COMPLETE") + stack.create_resources() return True def describe_stacks(self, name_or_stack_id): @@ -654,10 +691,11 @@ class CloudFormationBackend(BaseBackend): def list_change_sets(self): return self.change_sets.values() - def list_stacks(self): - return [v for v in self.stacks.values()] + [ + def list_stacks(self, status_filter=None): + total_stacks = [v for v in self.stacks.values()] + [ v for v in self.deleted_stacks.values() ] + return filter_stacks(total_stacks, status_filter) def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 34d96acc6..50de876f3 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -1,33 +1,48 @@ from __future__ import unicode_literals import functools +import json import logging import copy import warnings import re -from moto.autoscaling import models as autoscaling_models -from moto.awslambda import models as lambda_models -from moto.batch import models as batch_models -from moto.cloudwatch import models as cloudwatch_models -from moto.cognitoidentity import models as cognitoidentity_models from moto.compat import collections_abc -from moto.datapipeline import models as datapipeline_models -from moto.dynamodb2 import models as dynamodb2_models + +# This ugly section of imports is necessary because we +# build the list of CloudFormationModel subclasses using +# CloudFormationModel.__subclasses__(). However, if the class +# definition of a subclass hasn't been executed yet - for example, if +# the subclass's module hasn't been imported yet - then that subclass +# doesn't exist yet, and __subclasses__ won't find it. +# So we import here to populate the list of subclasses. +from moto.autoscaling import models as autoscaling_models # noqa +from moto.awslambda import models as awslambda_models # noqa +from moto.batch import models as batch_models # noqa +from moto.cloudwatch import models as cloudwatch_models # noqa +from moto.datapipeline import models as datapipeline_models # noqa +from moto.dynamodb2 import models as dynamodb2_models # noqa from moto.ec2 import models as ec2_models -from moto.ecs import models as ecs_models -from moto.elb import models as elb_models -from moto.elbv2 import models as elbv2_models -from moto.iam import models as iam_models -from moto.kinesis import models as kinesis_models -from moto.kms import models as kms_models -from moto.rds import models as rds_models -from moto.rds2 import models as rds2_models -from moto.redshift import models as redshift_models -from moto.route53 import models as route53_models -from moto.s3 import models as s3_models -from moto.sns import models as sns_models -from moto.sqs import models as sqs_models -from moto.core import ACCOUNT_ID +from moto.ecr import models as ecr_models # noqa +from moto.ecs import models as ecs_models # noqa +from moto.elb import models as elb_models # noqa +from moto.elbv2 import models as elbv2_models # noqa +from moto.events import models as events_models # noqa +from moto.iam import models as iam_models # noqa +from moto.kinesis import models as kinesis_models # noqa +from moto.kms import models as kms_models # noqa +from moto.rds import models as rds_models # noqa +from moto.rds2 import models as rds2_models # noqa +from moto.redshift import models as redshift_models # noqa +from moto.route53 import models as route53_models # noqa +from moto.s3 import models as s3_models, s3_backend # noqa +from moto.s3.utils import bucket_and_name_from_url +from moto.sns import models as sns_models # noqa +from moto.sqs import models as sqs_models # noqa +from moto.stepfunctions import models as stepfunctions_models # noqa + +# End ugly list of imports + +from moto.core import ACCOUNT_ID, CloudFormationModel from .utils import random_suffix from .exceptions import ( ExportNotFound, @@ -35,78 +50,14 @@ from .exceptions import ( UnformattedGetAttTemplateException, ValidationError, ) -from boto.cloudformation.stack import Output +from moto.packages.boto.cloudformation.stack import Output -MODEL_MAP = { - "AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup, - "AWS::AutoScaling::LaunchConfiguration": autoscaling_models.FakeLaunchConfiguration, - "AWS::Batch::JobDefinition": batch_models.JobDefinition, - "AWS::Batch::JobQueue": batch_models.JobQueue, - "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, - "AWS::DynamoDB::Table": dynamodb2_models.Table, - "AWS::Kinesis::Stream": kinesis_models.Stream, - "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, - "AWS::Lambda::Function": lambda_models.LambdaFunction, - "AWS::Lambda::Version": lambda_models.LambdaVersion, - "AWS::EC2::EIP": ec2_models.ElasticAddress, - "AWS::EC2::Instance": ec2_models.Instance, - "AWS::EC2::InternetGateway": ec2_models.InternetGateway, - "AWS::EC2::NatGateway": ec2_models.NatGateway, - "AWS::EC2::NetworkInterface": ec2_models.NetworkInterface, - "AWS::EC2::Route": ec2_models.Route, - "AWS::EC2::RouteTable": ec2_models.RouteTable, - "AWS::EC2::SecurityGroup": ec2_models.SecurityGroup, - "AWS::EC2::SecurityGroupIngress": ec2_models.SecurityGroupIngress, - "AWS::EC2::SpotFleet": ec2_models.SpotFleetRequest, - "AWS::EC2::Subnet": ec2_models.Subnet, - "AWS::EC2::SubnetRouteTableAssociation": ec2_models.SubnetRouteTableAssociation, - "AWS::EC2::Volume": ec2_models.Volume, - "AWS::EC2::VolumeAttachment": ec2_models.VolumeAttachment, - "AWS::EC2::VPC": ec2_models.VPC, - "AWS::EC2::VPCGatewayAttachment": ec2_models.VPCGatewayAttachment, - "AWS::EC2::VPCPeeringConnection": ec2_models.VPCPeeringConnection, - "AWS::ECS::Cluster": ecs_models.Cluster, - "AWS::ECS::TaskDefinition": ecs_models.TaskDefinition, - "AWS::ECS::Service": ecs_models.Service, - "AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer, - "AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer, - "AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup, - "AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener, - "AWS::Cognito::IdentityPool": cognitoidentity_models.CognitoIdentity, - "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, - "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, - "AWS::IAM::Role": iam_models.Role, - "AWS::KMS::Key": kms_models.Key, - "AWS::Logs::LogGroup": cloudwatch_models.LogGroup, - "AWS::RDS::DBInstance": rds_models.Database, - "AWS::RDS::DBSecurityGroup": rds_models.SecurityGroup, - "AWS::RDS::DBSubnetGroup": rds_models.SubnetGroup, - "AWS::RDS::DBParameterGroup": rds2_models.DBParameterGroup, - "AWS::Redshift::Cluster": redshift_models.Cluster, - "AWS::Redshift::ClusterParameterGroup": redshift_models.ParameterGroup, - "AWS::Redshift::ClusterSubnetGroup": redshift_models.SubnetGroup, - "AWS::Route53::HealthCheck": route53_models.HealthCheck, - "AWS::Route53::HostedZone": route53_models.FakeZone, - "AWS::Route53::RecordSet": route53_models.RecordSet, - "AWS::Route53::RecordSetGroup": route53_models.RecordSetGroup, - "AWS::SNS::Topic": sns_models.Topic, - "AWS::S3::Bucket": s3_models.FakeBucket, - "AWS::SQS::Queue": sqs_models.Queue, -} - -# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html +# List of supported CloudFormation models +MODEL_LIST = CloudFormationModel.__subclasses__() +MODEL_MAP = {model.cloudformation_type(): model for model in MODEL_LIST} NAME_TYPE_MAP = { - "AWS::CloudWatch::Alarm": "Alarm", - "AWS::DynamoDB::Table": "TableName", - "AWS::ElastiCache::CacheCluster": "ClusterName", - "AWS::ElasticBeanstalk::Application": "ApplicationName", - "AWS::ElasticBeanstalk::Environment": "EnvironmentName", - "AWS::ElasticLoadBalancing::LoadBalancer": "LoadBalancerName", - "AWS::ElasticLoadBalancingV2::TargetGroup": "Name", - "AWS::RDS::DBInstance": "DBInstanceIdentifier", - "AWS::S3::Bucket": "BucketName", - "AWS::SNS::Topic": "TopicName", - "AWS::SQS::Queue": "QueueName", + model.cloudformation_type(): model.cloudformation_name_type() + for model in MODEL_LIST } # Just ignore these models types for now @@ -150,7 +101,10 @@ def clean_json(resource_json, resources_map): map_path = resource_json["Fn::FindInMap"][1:] result = resources_map[map_name] for path in map_path: - result = result[clean_json(path, resources_map)] + if "Fn::Transform" in result: + result = resources_map[clean_json(path, resources_map)] + else: + result = result[clean_json(path, resources_map)] return result if "Fn::GetAtt" in resource_json: @@ -196,13 +150,13 @@ def clean_json(resource_json, resources_map): ) else: fn_sub_value = clean_json(resource_json["Fn::Sub"], resources_map) - to_sub = re.findall('(?=\${)[^!^"]*?}', fn_sub_value) - literals = re.findall('(?=\${!)[^"]*?}', fn_sub_value) + to_sub = re.findall(r'(?=\${)[^!^"]*?}', fn_sub_value) + literals = re.findall(r'(?=\${!)[^"]*?}', fn_sub_value) for sub in to_sub: if "." in sub: cleaned_ref = clean_json( { - "Fn::GetAtt": re.findall('(?<=\${)[^"]*?(?=})', sub)[ + "Fn::GetAtt": re.findall(r'(?<=\${)[^"]*?(?=})', sub)[ 0 ].split(".") }, @@ -210,7 +164,7 @@ def clean_json(resource_json, resources_map): ) else: cleaned_ref = clean_json( - {"Ref": re.findall('(?<=\${)[^"]*?(?=})', sub)[0]}, + {"Ref": re.findall(r'(?<=\${)[^"]*?(?=})', sub)[0]}, resources_map, ) fn_sub_value = fn_sub_value.replace(sub, cleaned_ref) @@ -261,10 +215,14 @@ def resource_class_from_type(resource_type): if resource_type not in MODEL_MAP: logger.warning("No Moto CloudFormation support for %s", resource_type) return None + return MODEL_MAP.get(resource_type) def resource_name_property_from_type(resource_type): + for model in MODEL_LIST: + if model.cloudformation_type() == resource_type: + return model.cloudformation_name_type() return NAME_TYPE_MAP.get(resource_type) @@ -283,11 +241,21 @@ def generate_resource_name(resource_type, stack_name, logical_id): if truncated_name_prefix.endswith("-"): truncated_name_prefix = truncated_name_prefix[:-1] return "{0}-{1}".format(truncated_name_prefix, my_random_suffix) + elif resource_type == "AWS::S3::Bucket": + right_hand_part_of_name = "-{0}-{1}".format(logical_id, random_suffix()) + max_stack_name_portion_len = 63 - len(right_hand_part_of_name) + return "{0}{1}".format( + stack_name[:max_stack_name_portion_len], right_hand_part_of_name + ).lower() + elif resource_type == "AWS::IAM::Policy": + return "{0}-{1}-{2}".format(stack_name[:5], logical_id[:4], random_suffix()) else: return "{0}-{1}-{2}".format(stack_name, logical_id, random_suffix()) -def parse_resource(logical_id, resource_json, resources_map): +def parse_resource( + resource_json, resources_map, +): resource_type = resource_json["Type"] resource_class = resource_class_from_type(resource_type) if not resource_class: @@ -298,22 +266,37 @@ def parse_resource(logical_id, resource_json, resources_map): ) return None + if "Properties" not in resource_json: + resource_json["Properties"] = {} + resource_json = clean_json(resource_json, resources_map) + + return resource_class, resource_json, resource_type + + +def parse_resource_and_generate_name( + logical_id, resource_json, resources_map, +): + resource_tuple = parse_resource(resource_json, resources_map) + if not resource_tuple: + return None + resource_class, resource_json, resource_type = resource_tuple + + generated_resource_name = generate_resource_name( + resource_type, resources_map.get("AWS::StackName"), logical_id + ) + resource_name_property = resource_name_property_from_type(resource_type) if resource_name_property: - if "Properties" not in resource_json: - resource_json["Properties"] = dict() - if resource_name_property not in resource_json["Properties"]: - resource_json["Properties"][ - resource_name_property - ] = generate_resource_name( - resource_type, resources_map.get("AWS::StackName"), logical_id - ) - resource_name = resource_json["Properties"][resource_name_property] + if ( + "Properties" in resource_json + and resource_name_property in resource_json["Properties"] + ): + resource_name = resource_json["Properties"][resource_name_property] + else: + resource_name = generated_resource_name else: - resource_name = generate_resource_name( - resource_type, resources_map.get("AWS::StackName"), logical_id - ) + resource_name = generated_resource_name return resource_class, resource_json, resource_name @@ -325,12 +308,14 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n return None resource_type = resource_json["Type"] - resource_tuple = parse_resource(logical_id, resource_json, resources_map) + resource_tuple = parse_resource_and_generate_name( + logical_id, resource_json, resources_map + ) if not resource_tuple: return None - resource_class, resource_json, resource_name = resource_tuple + resource_class, resource_json, resource_physical_name = resource_tuple resource = resource_class.create_from_cloudformation_json( - resource_name, resource_json, region_name + resource_physical_name, resource_json, region_name ) resource.type = resource_type resource.logical_resource_id = logical_id @@ -338,28 +323,34 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n def parse_and_update_resource(logical_id, resource_json, resources_map, region_name): - resource_class, new_resource_json, new_resource_name = parse_resource( + resource_class, resource_json, new_resource_name = parse_resource_and_generate_name( logical_id, resource_json, resources_map ) original_resource = resources_map[logical_id] - new_resource = resource_class.update_from_cloudformation_json( - original_resource=original_resource, - new_resource_name=new_resource_name, - cloudformation_json=new_resource_json, - region_name=region_name, - ) - new_resource.type = resource_json["Type"] - new_resource.logical_resource_id = logical_id - return new_resource + if not hasattr( + resource_class.update_from_cloudformation_json, "__isabstractmethod__" + ): + new_resource = resource_class.update_from_cloudformation_json( + original_resource=original_resource, + new_resource_name=new_resource_name, + cloudformation_json=resource_json, + region_name=region_name, + ) + new_resource.type = resource_json["Type"] + new_resource.logical_resource_id = logical_id + return new_resource + else: + return None -def parse_and_delete_resource(logical_id, resource_json, resources_map, region_name): - resource_class, resource_json, resource_name = parse_resource( - logical_id, resource_json, resources_map - ) - resource_class.delete_from_cloudformation_json( - resource_name, resource_json, region_name - ) +def parse_and_delete_resource(resource_name, resource_json, resources_map, region_name): + resource_class, resource_json, _ = parse_resource(resource_json, resources_map) + if not hasattr( + resource_class.delete_from_cloudformation_json, "__isabstractmethod__" + ): + resource_class.delete_from_cloudformation_json( + resource_name, resource_json, region_name + ) def parse_condition(condition, resources_map, condition_map): @@ -423,7 +414,7 @@ class ResourceMap(collections_abc.Mapping): cross_stack_resources, ): self._template = template - self._resource_json_map = template["Resources"] + self._resource_json_map = template["Resources"] if template != {} else {} self._region_name = region_name self.input_parameters = parameters self.tags = copy.deepcopy(tags) @@ -448,6 +439,7 @@ class ResourceMap(collections_abc.Mapping): return self._parsed_resources[resource_logical_id] else: resource_json = self._resource_json_map.get(resource_logical_id) + if not resource_json: raise KeyError(resource_logical_id) new_resource = parse_and_create_resource( @@ -463,6 +455,34 @@ class ResourceMap(collections_abc.Mapping): def __len__(self): return len(self._resource_json_map) + def __get_resources_in_dependency_order(self): + resource_map = copy.deepcopy(self._resource_json_map) + resources_in_dependency_order = [] + + def recursively_get_dependencies(resource): + resource_info = resource_map[resource] + + if "DependsOn" not in resource_info: + resources_in_dependency_order.append(resource) + del resource_map[resource] + return + + dependencies = resource_info["DependsOn"] + if isinstance(dependencies, str): # Dependencies may be a string or list + dependencies = [dependencies] + + for dependency in dependencies: + if dependency in resource_map: + recursively_get_dependencies(dependency) + + resources_in_dependency_order.append(resource) + del resource_map[resource] + + while resource_map: + recursively_get_dependencies(list(resource_map.keys())[0]) + + return resources_in_dependency_order + @property def resources(self): return self._resource_json_map.keys() @@ -470,6 +490,17 @@ class ResourceMap(collections_abc.Mapping): def load_mapping(self): self._parsed_resources.update(self._template.get("Mappings", {})) + def transform_mapping(self): + for k, v in self._template.get("Mappings", {}).items(): + if "Fn::Transform" in v: + name = v["Fn::Transform"]["Name"] + params = v["Fn::Transform"]["Parameters"] + if name == "AWS::Include": + location = params["Location"] + bucket_name, name = bucket_and_name_from_url(location) + key = s3_backend.get_object(bucket_name, name) + self._parsed_resources.update(json.loads(key.value)) + def load_parameters(self): parameter_slots = self._template.get("Parameters", {}) for parameter_name, parameter in parameter_slots.items(): @@ -486,6 +517,23 @@ class ResourceMap(collections_abc.Mapping): if value_type == "CommaDelimitedList" or value_type.startswith("List"): value = value.split(",") + def _parse_number_parameter(num_string): + """CloudFormation NUMBER types can be an int or float. + Try int first and then fall back to float if that fails + """ + try: + return int(num_string) + except ValueError: + return float(num_string) + + if value_type == "List": + # The if statement directly above already converted + # to a list. Now we convert each element to a number + value = [_parse_number_parameter(v) for v in value] + + if value_type == "Number": + value = _parse_number_parameter(value) + if parameter_slot.get("NoEcho"): self.no_echo_parameter_keys.append(key) @@ -513,20 +561,25 @@ class ResourceMap(collections_abc.Mapping): for condition_name in self.lazy_condition_map: self.lazy_condition_map[condition_name] - def create(self): + def load(self): self.load_mapping() + self.transform_mapping() self.load_parameters() self.load_conditions() + def create(self, template): # Since this is a lazy map, to create every object we just need to # iterate through self. + # Assumes that self.load() has been called before + self._template = template + self._resource_json_map = template["Resources"] self.tags.update( { "aws:cloudformation:stack-name": self.get("AWS::StackName"), "aws:cloudformation:stack-id": self.get("AWS::StackId"), } ) - for resource in self.resources: + for resource in self.__get_resources_in_dependency_order(): if isinstance(self[resource], ec2_models.TaggedEC2Resource): self.tags["aws:cloudformation:logical-id"] = resource ec2_models.ec2_backends[self._region_name].create_tags( @@ -588,28 +641,36 @@ class ResourceMap(collections_abc.Mapping): ) self._parsed_resources[resource_name] = new_resource - for resource_name, resource in resources_by_action["Remove"].items(): - resource_json = old_template[resource_name] + for logical_name, _ in resources_by_action["Remove"].items(): + resource_json = old_template[logical_name] + resource = self._parsed_resources[logical_name] + # ToDo: Standardize this. + if hasattr(resource, "physical_resource_id"): + resource_name = self._parsed_resources[ + logical_name + ].physical_resource_id + else: + resource_name = None parse_and_delete_resource( resource_name, resource_json, self, self._region_name ) - self._parsed_resources.pop(resource_name) + self._parsed_resources.pop(logical_name) tries = 1 while resources_by_action["Modify"] and tries < 5: - for resource_name, resource in resources_by_action["Modify"].copy().items(): - resource_json = new_template[resource_name] + for logical_name, _ in resources_by_action["Modify"].copy().items(): + resource_json = new_template[logical_name] try: changed_resource = parse_and_update_resource( - resource_name, resource_json, self, self._region_name + logical_name, resource_json, self, self._region_name ) except Exception as e: # skip over dependency violations, and try again in a # second pass last_exception = e else: - self._parsed_resources[resource_name] = changed_resource - del resources_by_action["Modify"][resource_name] + self._parsed_resources[logical_name] = changed_resource + del resources_by_action["Modify"][logical_name] tries += 1 if tries == 5: raise last_exception @@ -623,6 +684,21 @@ class ResourceMap(collections_abc.Mapping): try: if parsed_resource and hasattr(parsed_resource, "delete"): parsed_resource.delete(self._region_name) + else: + if hasattr(parsed_resource, "physical_resource_id"): + resource_name = parsed_resource.physical_resource_id + else: + resource_name = None + + resource_json = self._resource_json_map[ + parsed_resource.logical_resource_id + ] + + parse_and_delete_resource( + resource_name, resource_json, self, self._region_name, + ) + + self._parsed_resources.pop(parsed_resource.logical_resource_id) except Exception as e: # skip over dependency violations, and try again in a # second pass diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 77a3051fd..c7ced0186 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -10,6 +10,31 @@ from moto.s3 import s3_backend from moto.core import ACCOUNT_ID from .models import cloudformation_backends from .exceptions import ValidationError +from .utils import yaml_tag_constructor + + +def get_template_summary_response_from_template(template_body): + def get_resource_types(template_dict): + resources = {} + for key, value in template_dict.items(): + if key == "Resources": + resources = value + + resource_types = [] + for key, value in resources.items(): + resource_types.append(value["Type"]) + return resource_types + + yaml.add_multi_constructor("", yaml_tag_constructor) + + try: + template_dict = yaml.load(template_body, Loader=yaml.Loader) + except (yaml.parser.ParserError, yaml.scanner.ScannerError): + template_dict = json.loads(template_body) + + resources_types = get_resource_types(template_dict) + template_dict["resourceTypes"] = resources_types + return template_dict class CloudFormationResponse(BaseResponse): @@ -36,7 +61,7 @@ class CloudFormationResponse(BaseResponse): bucket_name = template_url_parts.netloc.split(".")[0] key_name = template_url_parts.path.lstrip("/") - key = s3_backend.get_key(bucket_name, key_name) + key = s3_backend.get_object(bucket_name, key_name) return key.value.decode("utf-8") def create_stack(self): @@ -50,6 +75,12 @@ class CloudFormationResponse(BaseResponse): for item in self._get_list_prefix("Tags.member") ) + if self.stack_name_exists(new_stack_name=stack_name): + template = self.response_template( + CREATE_STACK_NAME_EXISTS_RESPONSE_TEMPLATE + ) + return 400, {"status": 400}, template.render(name=stack_name) + # Hack dict-comprehension parameters = dict( [ @@ -82,6 +113,12 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(CREATE_STACK_RESPONSE_TEMPLATE) return template.render(stack=stack) + def stack_name_exists(self, new_stack_name): + for stack in self.cloudformation_backend.stacks.values(): + if stack.name == new_stack_name: + return True + return False + @amzn_request_id def create_change_set(self): stack_name = self._get_param("StackName") @@ -221,7 +258,8 @@ class CloudFormationResponse(BaseResponse): return template.render(change_sets=change_sets) def list_stacks(self): - stacks = self.cloudformation_backend.list_stacks() + status_filter = self._get_multi_param("StackStatusFilter.member") + stacks = self.cloudformation_backend.list_stacks(status_filter) template = self.response_template(LIST_STACKS_RESPONSE) return template.render(stacks=stacks) @@ -256,6 +294,20 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(GET_TEMPLATE_RESPONSE_TEMPLATE) return template.render(stack=stack) + def get_template_summary(self): + stack_name = self._get_param("StackName") + template_url = self._get_param("TemplateURL") + stack_body = self._get_param("TemplateBody") + + if stack_name: + stack_body = self.cloudformation_backend.get_stack(stack_name).template + elif template_url: + stack_body = self._get_stack_from_s3_url(template_url) + + template_summary = get_template_summary_response_from_template(stack_body) + template = self.response_template(GET_TEMPLATE_SUMMARY_TEMPLATE) + return template.render(template_summary=template_summary) + def update_stack(self): stack_name = self._get_param("StackName") role_arn = self._get_param("RoleARN") @@ -339,19 +391,22 @@ class CloudFormationResponse(BaseResponse): return template.render(exports=exports, next_token=next_token) def validate_template(self): - cfn_lint = self.cloudformation_backend.validate_template( - self._get_param("TemplateBody") - ) + template_body = self._get_param("TemplateBody") + template_url = self._get_param("TemplateURL") + if template_url: + template_body = self._get_stack_from_s3_url(template_url) + + cfn_lint = self.cloudformation_backend.validate_template(template_body) if cfn_lint: raise ValidationError(cfn_lint[0].message) description = "" try: - description = json.loads(self._get_param("TemplateBody"))["Description"] + description = json.loads(template_body)["Description"] except (ValueError, KeyError): pass try: - description = yaml.load(self._get_param("TemplateBody"))["Description"] - except (yaml.ParserError, KeyError): + description = yaml.load(template_body, Loader=yaml.Loader)["Description"] + except (yaml.parser.ParserError, yaml.scanner.ScannerError, KeyError): pass template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE) return template.render(description=description) @@ -564,6 +619,15 @@ CREATE_STACK_RESPONSE_TEMPLATE = """ """ +CREATE_STACK_NAME_EXISTS_RESPONSE_TEMPLATE = """ + + Sender + AlreadyExistsException + Stack [{{ name }}] already exists + + 950ff8d7-812a-44b3-bb0c-9b271b954104 +""" + UPDATE_STACK_RESPONSE_TEMPLATE = """ {{ stack.stack_id }} @@ -609,7 +673,7 @@ DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE = """ {% endfor %} - 2011-05-23T15:47:44Z + {{ change_set.creation_time_iso_8601 }} {{ change_set.execution_status }} {{ change_set.status }} {{ change_set.status_reason }} @@ -662,7 +726,11 @@ DESCRIBE_STACKS_TEMPLATE = """ {{ stack.name }} {{ stack.stack_id }} - 2010-07-27T22:28:28Z + {% if stack.change_set_id %} + {{ stack.change_set_id }} + {% endif %} + {{ stack.description }} + {{ stack.creation_time_iso_8601 }} {{ stack.status }} {% if stack.notification_arns %} @@ -714,7 +782,6 @@ DESCRIBE_STACKS_TEMPLATE = """ """ - DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE = """ @@ -729,7 +796,6 @@ DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE = """ """ - DESCRIBE_STACK_RESOURCES_RESPONSE = """ @@ -748,7 +814,6 @@ DESCRIBE_STACK_RESOURCES_RESPONSE = """ """ - DESCRIBE_STACK_EVENTS_RESPONSE = """ @@ -773,7 +838,6 @@ DESCRIBE_STACK_EVENTS_RESPONSE = """ @@ -794,7 +858,6 @@ LIST_CHANGE_SETS_RESPONSE = """ """ - LIST_STACKS_RESPONSE = """ @@ -803,7 +866,7 @@ LIST_STACKS_RESPONSE = """ {{ stack.stack_id }} {{ stack.status }} {{ stack.name }} - 2011-05-23T15:47:44Z + {{ stack.creation_time_iso_8601 }} {{ stack.description }} {% endfor %} @@ -811,7 +874,6 @@ LIST_STACKS_RESPONSE = """ """ - LIST_STACKS_RESOURCES_RESPONSE = """ @@ -831,7 +893,6 @@ LIST_STACKS_RESOURCES_RESPONSE = """
""" - GET_TEMPLATE_RESPONSE_TEMPLATE = """ {{ stack.template }} @@ -841,7 +902,6 @@ GET_TEMPLATE_RESPONSE_TEMPLATE = """ """ - DELETE_STACK_RESPONSE_TEMPLATE = """ 5ccc7dcd-744c-11e5-be70-example @@ -849,7 +909,6 @@ DELETE_STACK_RESPONSE_TEMPLATE = """ """ - LIST_EXPORTS_RESPONSE = """ @@ -1110,3 +1169,19 @@ LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE = ( """ ) + +GET_TEMPLATE_SUMMARY_TEMPLATE = """ + + {{ template_summary.Description }} + {% for resource in template_summary.resourceTypes %} + + {{ resource }} + + {% endfor %} + {{ template_summary.AWSTemplateFormatVersion }} + + + b9b4b068-3a41-11e5-94eb-example + + +""" diff --git a/moto/cloudformation/utils.py b/moto/cloudformation/utils.py index cd8481002..c9e522efb 100644 --- a/moto/cloudformation/utils.py +++ b/moto/cloudformation/utils.py @@ -6,7 +6,6 @@ import yaml import os import string -from cfnlint import decode, core from moto.core import ACCOUNT_ID @@ -42,8 +41,7 @@ def random_suffix(): def yaml_tag_constructor(loader, tag, node): - """convert shorthand intrinsic function to full name - """ + """convert shorthand intrinsic function to full name""" def _f(loader, tag, node): if tag == "!GetAtt": @@ -62,6 +60,8 @@ def yaml_tag_constructor(loader, tag, node): def validate_template_cfn_lint(template): + # Importing cfnlint adds a significant overhead, so we keep it local + from cfnlint import decode, core # Save the template to a temporary file -- cfn-lint requires a file filename = "file.tmp" @@ -70,7 +70,12 @@ def validate_template_cfn_lint(template): abs_filename = os.path.abspath(filename) # decode handles both yaml and json - template, matches = decode.decode(abs_filename, False) + try: + template, matches = decode.decode(abs_filename, False) + except TypeError: + # As of cfn-lint 0.39.0, the second argument (ignore_bad_template) was dropped + # https://github.com/aws-cloudformation/cfn-python-lint/pull/1580 + template, matches = decode.decode(abs_filename) # Set cfn-lint to info core.configure_logging(None) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 13b31ddfe..94668f32f 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -2,13 +2,15 @@ import json from boto3 import Session -from moto.core.utils import iso_8601_datetime_with_milliseconds -from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_without_milliseconds +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.exceptions import RESTError +from moto.logs import logs_backends from datetime import datetime, timedelta from dateutil.tz import tzutc from uuid import uuid4 -from .utils import make_arn_for_dashboard +from .utils import make_arn_for_dashboard, make_arn_for_alarm +from dateutil import parser from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID @@ -20,6 +22,41 @@ class Dimension(object): self.name = name self.value = value + def __eq__(self, item): + if isinstance(item, Dimension): + return self.name == item.name and self.value == item.value + return False + + def __ne__(self, item): # Only needed on Py2; Py3 defines it implicitly + return self != item + + +class Metric(object): + def __init__(self, metric_name, namespace, dimensions): + self.metric_name = metric_name + self.namespace = namespace + self.dimensions = dimensions + + +class MetricStat(object): + def __init__(self, metric, period, stat, unit): + self.metric = metric + self.period = period + self.stat = stat + self.unit = unit + + +class MetricDataQuery(object): + def __init__( + self, id, label, period, return_data, expression=None, metric_stat=None + ): + self.id = id + self.label = label + self.period = period + self.return_data = return_data + self.expression = expression + self.metric_stat = metric_stat + def daterange(start, stop, step=timedelta(days=1), inclusive=False): """ @@ -55,8 +92,10 @@ class FakeAlarm(BaseModel): name, namespace, metric_name, + metric_data_queries, comparison_operator, evaluation_periods, + datapoints_to_alarm, period, threshold, statistic, @@ -66,12 +105,17 @@ class FakeAlarm(BaseModel): ok_actions, insufficient_data_actions, unit, + actions_enabled, + region="us-east-1", ): self.name = name + self.alarm_arn = make_arn_for_alarm(region, DEFAULT_ACCOUNT_ID, name) self.namespace = namespace self.metric_name = metric_name + self.metric_data_queries = metric_data_queries self.comparison_operator = comparison_operator self.evaluation_periods = evaluation_periods + self.datapoints_to_alarm = datapoints_to_alarm self.period = period self.threshold = threshold self.statistic = statistic @@ -79,6 +123,7 @@ class FakeAlarm(BaseModel): self.dimensions = [ Dimension(dimension["name"], dimension["value"]) for dimension in dimensions ] + self.actions_enabled = actions_enabled self.alarm_actions = alarm_actions self.ok_actions = ok_actions self.insufficient_data_actions = insufficient_data_actions @@ -110,6 +155,18 @@ class FakeAlarm(BaseModel): self.state_updated_timestamp = datetime.utcnow() +def are_dimensions_same(metric_dimensions, dimensions): + for dimension in metric_dimensions: + for new_dimension in dimensions: + if ( + dimension.name != new_dimension.name + or dimension.value != new_dimension.value + ): + return False + + return True + + class MetricDatum(BaseModel): def __init__(self, namespace, name, value, dimensions, timestamp): self.namespace = namespace @@ -120,6 +177,23 @@ class MetricDatum(BaseModel): Dimension(dimension["Name"], dimension["Value"]) for dimension in dimensions ] + def filter(self, namespace, name, dimensions, already_present_metrics): + if namespace and namespace != self.namespace: + return False + if name and name != self.name: + return False + for metric in already_present_metrics: + if self.dimensions and are_dimensions_same( + metric.dimensions, self.dimensions + ): + return False + + if dimensions and any( + Dimension(d["Name"], d["Value"]) not in self.dimensions for d in dimensions + ): + return False + return True + class Dashboard(BaseModel): def __init__(self, name, body): @@ -146,7 +220,7 @@ class Dashboard(BaseModel): class Statistics: def __init__(self, stats, dt): - self.timestamp = iso_8601_datetime_with_milliseconds(dt) + self.timestamp = iso_8601_datetime_without_milliseconds(dt) self.values = [] self.stats = stats @@ -198,13 +272,24 @@ class CloudWatchBackend(BaseBackend): self.metric_data = [] self.paged_metric_data = {} + @property + # Retrieve a list of all OOTB metrics that are provided by metrics providers + # Computed on the fly + def aws_metric_data(self): + md = [] + for name, service in metric_providers.items(): + md.extend(service.get_cloudwatch_metrics()) + return md + def put_metric_alarm( self, name, namespace, metric_name, + metric_data_queries, comparison_operator, evaluation_periods, + datapoints_to_alarm, period, threshold, statistic, @@ -214,13 +299,17 @@ class CloudWatchBackend(BaseBackend): ok_actions, insufficient_data_actions, unit, + actions_enabled, + region="us-east-1", ): alarm = FakeAlarm( name, namespace, metric_name, + metric_data_queries, comparison_operator, evaluation_periods, + datapoints_to_alarm, period, threshold, statistic, @@ -230,7 +319,10 @@ class CloudWatchBackend(BaseBackend): ok_actions, insufficient_data_actions, unit, + actions_enabled, + region, ) + self.alarms[name] = alarm return alarm @@ -270,6 +362,13 @@ class CloudWatchBackend(BaseBackend): ) def delete_alarms(self, alarm_names): + for alarm_name in alarm_names: + if alarm_name not in self.alarms: + raise RESTError( + "ResourceNotFound", + "Alarm {0} not found".format(alarm_name), + status=404, + ) for alarm_name in alarm_names: self.alarms.pop(alarm_name, None) @@ -278,8 +377,7 @@ class CloudWatchBackend(BaseBackend): # Preserve "datetime" for get_metric_statistics comparisons timestamp = metric_member.get("Timestamp") if timestamp is not None and type(timestamp) != datetime: - timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") - timestamp = timestamp.replace(tzinfo=tzutc()) + timestamp = parser.parse(timestamp) self.metric_data.append( MetricDatum( namespace, @@ -290,6 +388,43 @@ class CloudWatchBackend(BaseBackend): ) ) + def get_metric_data(self, queries, start_time, end_time): + period_data = [ + md for md in self.metric_data if start_time <= md.timestamp <= end_time + ] + results = [] + for query in queries: + query_ns = query["metric_stat._metric._namespace"] + query_name = query["metric_stat._metric._metric_name"] + query_data = [ + md + for md in period_data + if md.namespace == query_ns and md.name == query_name + ] + metric_values = [m.value for m in query_data] + result_vals = [] + stat = query["metric_stat._stat"] + if len(metric_values) > 0: + if stat == "Average": + result_vals.append(sum(metric_values) / len(metric_values)) + elif stat == "Minimum": + result_vals.append(min(metric_values)) + elif stat == "Maximum": + result_vals.append(max(metric_values)) + elif stat == "Sum": + result_vals.append(sum(metric_values)) + + label = query["metric_stat._metric._metric_name"] + " " + stat + results.append( + { + "id": query["id"], + "label": label, + "vals": result_vals, + "timestamps": [datetime.now() for _ in result_vals], + } + ) + return results + def get_metric_statistics( self, namespace, metric_name, start_time, end_time, period, stats ): @@ -329,7 +464,7 @@ class CloudWatchBackend(BaseBackend): return data def get_all_metrics(self): - return self.metric_data + return self.metric_data + self.aws_metric_data def put_dashboard(self, name, body): self.dashboards[name] = Dashboard(name, body) @@ -381,7 +516,7 @@ class CloudWatchBackend(BaseBackend): self.alarms[alarm_name].update_state(reason, reason_data, state_value) - def list_metrics(self, next_token, namespace, metric_name): + def list_metrics(self, next_token, namespace, metric_name, dimensions): if next_token: if next_token not in self.paged_metric_data: raise RESTError( @@ -392,16 +527,21 @@ class CloudWatchBackend(BaseBackend): del self.paged_metric_data[next_token] # Cant reuse same token twice return self._get_paginated(metrics) else: - metrics = self.get_filtered_metrics(metric_name, namespace) + metrics = self.get_filtered_metrics(metric_name, namespace, dimensions) return self._get_paginated(metrics) - def get_filtered_metrics(self, metric_name, namespace): + def get_filtered_metrics(self, metric_name, namespace, dimensions): metrics = self.get_all_metrics() - if namespace: - metrics = [md for md in metrics if md.namespace == namespace] - if metric_name: - metrics = [md for md in metrics if md.name == metric_name] - return metrics + new_metrics = [] + for md in metrics: + if md.filter( + namespace=namespace, + name=metric_name, + dimensions=dimensions, + already_present_metrics=new_metrics, + ): + new_metrics.append(md) + return new_metrics def _get_paginated(self, metrics): if len(metrics) > 500: @@ -412,24 +552,31 @@ class CloudWatchBackend(BaseBackend): return None, metrics -class LogGroup(BaseModel): +class LogGroup(CloudFormationModel): def __init__(self, spec): # required self.name = spec["LogGroupName"] # optional self.tags = spec.get("Tags", []) + @staticmethod + def cloudformation_name_type(): + return "LogGroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html + return "AWS::Logs::LogGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - spec = {"LogGroupName": properties["LogGroupName"]} - optional_properties = "Tags".split() - for prop in optional_properties: - if prop in properties: - spec[prop] = properties[prop] - return LogGroup(spec) + tags = properties.get("Tags", {}) + return logs_backends[region_name].create_log_group( + resource_name, tags, **properties + ) cloudwatch_backends = {} @@ -441,3 +588,8 @@ for region in Session().get_available_regions( cloudwatch_backends[region] = CloudWatchBackend() for region in Session().get_available_regions("cloudwatch", partition_name="aws-cn"): cloudwatch_backends[region] = CloudWatchBackend() + +# List of services that provide OOTB CW metrics +# See the S3Backend constructor for an example +# TODO: We might have to separate this out per region for non-global services +metric_providers = {} diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 7872e71fd..159e24425 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -1,7 +1,7 @@ import json from moto.core.utils import amzn_request_id from moto.core.responses import BaseResponse -from .models import cloudwatch_backends +from .models import cloudwatch_backends, MetricDataQuery, MetricStat, Metric, Dimension from dateutil.parser import parse as dtparse @@ -19,8 +19,37 @@ class CloudWatchResponse(BaseResponse): name = self._get_param("AlarmName") namespace = self._get_param("Namespace") metric_name = self._get_param("MetricName") + metrics = self._get_multi_param("Metrics.member") + metric_data_queries = None + if metrics: + metric_data_queries = [ + MetricDataQuery( + id=metric.get("Id"), + label=metric.get("Label"), + period=metric.get("Period"), + return_data=metric.get("ReturnData"), + expression=metric.get("Expression"), + metric_stat=MetricStat( + metric=Metric( + metric_name=metric.get("MetricStat.Metric.MetricName"), + namespace=metric.get("MetricStat.Metric.Namespace"), + dimensions=[ + Dimension(name=dim["Name"], value=dim["Value"]) + for dim in metric["MetricStat.Metric.Dimensions.member"] + ], + ), + period=metric.get("MetricStat.Period"), + stat=metric.get("MetricStat.Stat"), + unit=metric.get("MetricStat.Unit"), + ) + if "MetricStat.Metric.MetricName" in metric + else None, + ) + for metric in metrics + ] comparison_operator = self._get_param("ComparisonOperator") evaluation_periods = self._get_param("EvaluationPeriods") + datapoints_to_alarm = self._get_param("DatapointsToAlarm") period = self._get_param("Period") threshold = self._get_param("Threshold") statistic = self._get_param("Statistic") @@ -28,6 +57,7 @@ class CloudWatchResponse(BaseResponse): dimensions = self._get_list_prefix("Dimensions.member") alarm_actions = self._get_multi_param("AlarmActions.member") ok_actions = self._get_multi_param("OKActions.member") + actions_enabled = self._get_param("ActionsEnabled") insufficient_data_actions = self._get_multi_param( "InsufficientDataActions.member" ) @@ -36,8 +66,10 @@ class CloudWatchResponse(BaseResponse): name, namespace, metric_name, + metric_data_queries, comparison_operator, evaluation_periods, + datapoints_to_alarm, period, threshold, statistic, @@ -47,6 +79,8 @@ class CloudWatchResponse(BaseResponse): ok_actions, insufficient_data_actions, unit, + actions_enabled, + self.region, ) template = self.response_template(PUT_METRIC_ALARM_TEMPLATE) return template.render(alarm=alarm) @@ -90,6 +124,18 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(PUT_METRIC_DATA_TEMPLATE) return template.render() + @amzn_request_id + def get_metric_data(self): + start = dtparse(self._get_param("StartTime")) + end = dtparse(self._get_param("EndTime")) + queries = self._get_list_prefix("MetricDataQueries.member") + results = self.cloudwatch_backend.get_metric_data( + start_time=start, end_time=end, queries=queries + ) + + template = self.response_template(GET_METRIC_DATA_TEMPLATE) + return template.render(results=results) + @amzn_request_id def get_metric_statistics(self): namespace = self._get_param("Namespace") @@ -122,9 +168,10 @@ class CloudWatchResponse(BaseResponse): def list_metrics(self): namespace = self._get_param("Namespace") metric_name = self._get_param("MetricName") + dimensions = self._get_multi_param("Dimensions.member") next_token = self._get_param("NextToken") next_token, metrics = self.cloudwatch_backend.list_metrics( - next_token, namespace, metric_name + next_token, namespace, metric_name, dimensions ) template = self.response_template(LIST_METRICS_TEMPLATE) return template.render(metrics=metrics, next_token=next_token) @@ -146,9 +193,23 @@ class CloudWatchResponse(BaseResponse): def describe_alarm_history(self): raise NotImplementedError() + @staticmethod + def filter_alarms(alarms, metric_name, namespace): + metric_filtered_alarms = [] + + for alarm in alarms: + if alarm.metric_name == metric_name and alarm.namespace == namespace: + metric_filtered_alarms.append(alarm) + return metric_filtered_alarms + @amzn_request_id def describe_alarms_for_metric(self): - raise NotImplementedError() + alarms = self.cloudwatch_backend.get_all_alarms() + namespace = self._get_param("Namespace") + metric_name = self._get_param("MetricName") + filtered_alarms = self.filter_alarms(alarms, metric_name, namespace) + template = self.response_template(DESCRIBE_METRIC_ALARMS_TEMPLATE) + return template.render(alarms=filtered_alarms) @amzn_request_id def disable_alarm_actions(self): @@ -227,7 +288,115 @@ DESCRIBE_ALARMS_TEMPLATE = """ + + + {% for alarm in alarms %} + + {{ alarm.actions_enabled }} + + {% for action in alarm.alarm_actions %} + {{ action }} + {% endfor %} + + {{ alarm.alarm_arn }} {{ alarm.configuration_updated_timestamp }} {{ alarm.description }} {{ alarm.name }} @@ -264,8 +433,8 @@ DESCRIBE_ALARMS_TEMPLATE = """ +""" DELETE_METRIC_ALARMS_TEMPLATE = """ @@ -283,6 +452,35 @@ PUT_METRIC_DATA_TEMPLATE = """ + + + {{ request_id }} + + + + + {% for result in results %} + + {{ result.id }} + + Complete + + {% for val in result.timestamps %} + {{ val }} + {% endfor %} + + + {% for val in result.vals %} + {{ val }} + {% endfor %} + + + {% endfor %} + + +""" + GET_METRIC_STATISTICS_TEMPLATE = """ diff --git a/moto/cloudwatch/utils.py b/moto/cloudwatch/utils.py index ee33a4402..896133d04 100644 --- a/moto/cloudwatch/utils.py +++ b/moto/cloudwatch/utils.py @@ -3,3 +3,7 @@ from __future__ import unicode_literals def make_arn_for_dashboard(account_id, name): return "arn:aws:cloudwatch::{0}dashboard/{1}".format(account_id, name) + + +def make_arn_for_alarm(region, account_id, alarm_name): + return "arn:aws:cloudwatch:{0}:{1}:alarm:{2}".format(region, account_id, alarm_name) diff --git a/moto/codecommit/models.py b/moto/codecommit/models.py index 6a4e82ad2..ad99e8f3d 100644 --- a/moto/codecommit/models.py +++ b/moto/codecommit/models.py @@ -2,7 +2,7 @@ from boto3 import Session from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from datetime import datetime -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID from .exceptions import RepositoryDoesNotExistException, RepositoryNameExistsException import uuid diff --git a/moto/codepipeline/models.py b/moto/codepipeline/models.py index 50f07deb0..4d2b9c0f9 100644 --- a/moto/codepipeline/models.py +++ b/moto/codepipeline/models.py @@ -15,9 +15,7 @@ from moto.codepipeline.exceptions import ( InvalidTagsException, TooManyTagsException, ) -from moto.core import BaseBackend, BaseModel - -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel class CodePipeline(BaseModel): diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py index 6143d5121..54016ad17 100644 --- a/moto/cognitoidentity/utils.py +++ b/moto/cognitoidentity/utils.py @@ -1,5 +1,5 @@ -from moto.core.utils import get_random_hex +from uuid import uuid4 def get_random_identity_id(region): - return "{0}:{1}".format(region, get_random_hex(length=19)) + return "{0}:{1}".format(region, uuid4()) diff --git a/moto/cognitoidp/exceptions.py b/moto/cognitoidp/exceptions.py index e52b7c49f..baf5f6526 100644 --- a/moto/cognitoidp/exceptions.py +++ b/moto/cognitoidp/exceptions.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import json from werkzeug.exceptions import BadRequest +from moto.core.exceptions import JsonRESTError class ResourceNotFoundError(BadRequest): @@ -42,3 +43,19 @@ class NotAuthorizedError(BadRequest): self.description = json.dumps( {"message": message, "__type": "NotAuthorizedException"} ) + + +class UserNotConfirmedException(BadRequest): + def __init__(self, message): + super(UserNotConfirmedException, self).__init__() + self.description = json.dumps( + {"message": message, "__type": "UserNotConfirmedException"} + ) + + +class InvalidParameterException(JsonRESTError): + def __init__(self, msg=None): + self.code = 400 + super(InvalidParameterException, self).__init__( + "InvalidParameterException", msg or "A parameter is specified incorrectly." + ) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 2f2f7e870..7078583fa 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -14,17 +14,22 @@ from jose import jws from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID from .exceptions import ( GroupExistsException, NotAuthorizedError, ResourceNotFoundError, UserNotFoundError, UsernameExistsException, + UserNotConfirmedException, + InvalidParameterException, ) +from .utils import create_id, check_secret_hash UserStatus = { "FORCE_CHANGE_PASSWORD": "FORCE_CHANGE_PASSWORD", "CONFIRMED": "CONFIRMED", + "UNCONFIRMED": "UNCONFIRMED", } @@ -69,6 +74,9 @@ class CognitoIdpUserPool(BaseModel): def __init__(self, region, name, extended_config): self.region = region self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex)) + self.arn = "arn:aws:cognito-idp:{}:{}:userpool/{}".format( + self.region, DEFAULT_ACCOUNT_ID, self.id + ) self.name = name self.status = None self.extended_config = extended_config or {} @@ -79,6 +87,7 @@ class CognitoIdpUserPool(BaseModel): self.identity_providers = OrderedDict() self.groups = OrderedDict() self.users = OrderedDict() + self.resource_servers = OrderedDict() self.refresh_tokens = {} self.access_tokens = {} self.id_tokens = {} @@ -91,6 +100,7 @@ class CognitoIdpUserPool(BaseModel): def _base_json(self): return { "Id": self.id, + "Arn": self.arn, "Name": self.name, "Status": self.status, "CreationDate": time.mktime(self.creation_date.timetuple()), @@ -123,8 +133,12 @@ class CognitoIdpUserPool(BaseModel): "exp": now + expires_in, } payload.update(extra_data) + headers = {"kid": "dummy"} # KID as present in jwks-public.json - return jws.sign(payload, self.json_web_key, algorithm="RS256"), expires_in + return ( + jws.sign(payload, self.json_web_key, headers, algorithm="RS256"), + expires_in, + ) def create_id_token(self, client_id, username): extra_data = self.get_user_extra_data_by_client_id(client_id, username) @@ -201,10 +215,11 @@ class CognitoIdpUserPoolDomain(BaseModel): class CognitoIdpUserPoolClient(BaseModel): - def __init__(self, user_pool_id, extended_config): + def __init__(self, user_pool_id, generate_secret, extended_config): self.user_pool_id = user_pool_id - self.id = str(uuid.uuid4()) + self.id = create_id() self.secret = str(uuid.uuid4()) + self.generate_secret = generate_secret or False self.extended_config = extended_config or {} def _base_json(self): @@ -216,6 +231,8 @@ class CognitoIdpUserPoolClient(BaseModel): def to_json(self, extended=False): user_pool_client_json = self._base_json() + if self.generate_secret: + user_pool_client_json.update({"ClientSecret": self.secret}) if extended: user_pool_client_json.update(self.extended_config) @@ -285,6 +302,9 @@ class CognitoIdpUser(BaseModel): self.attributes = attributes self.create_date = datetime.datetime.utcnow() self.last_modified_date = datetime.datetime.utcnow() + self.sms_mfa_enabled = False + self.software_token_mfa_enabled = False + self.token_verified = False # Groups this user is a member of. # Note that these links are bidirectional. @@ -301,6 +321,11 @@ class CognitoIdpUser(BaseModel): # list_users brings back "Attributes" while admin_get_user brings back "UserAttributes". def to_json(self, extended=False, attributes_key="Attributes"): + user_mfa_setting_list = [] + if self.software_token_mfa_enabled: + user_mfa_setting_list.append("SOFTWARE_TOKEN_MFA") + elif self.sms_mfa_enabled: + user_mfa_setting_list.append("SMS_MFA") user_json = self._base_json() if extended: user_json.update( @@ -308,6 +333,7 @@ class CognitoIdpUser(BaseModel): "Enabled": self.enabled, attributes_key: self.attributes, "MFAOptions": [], + "UserMFASettingList": user_mfa_setting_list, } ) @@ -325,6 +351,26 @@ class CognitoIdpUser(BaseModel): self.attributes = expand_attrs(flat_attributes) +class CognitoResourceServer(BaseModel): + def __init__(self, user_pool_id, identifier, name, scopes): + self.user_pool_id = user_pool_id + self.identifier = identifier + self.name = name + self.scopes = scopes + + def to_json(self): + res = { + "UserPoolId": self.user_pool_id, + "Identifier": self.identifier, + "Name": self.name, + } + + if len(self.scopes) != 0: + res.update({"Scopes": self.scopes}) + + return res + + class CognitoIdpBackend(BaseBackend): def __init__(self, region): super(CognitoIdpBackend, self).__init__() @@ -393,12 +439,14 @@ class CognitoIdpBackend(BaseBackend): return user_pool_domain # User pool client - def create_user_pool_client(self, user_pool_id, extended_config): + def create_user_pool_client(self, user_pool_id, generate_secret, extended_config): user_pool = self.user_pools.get(user_pool_id) if not user_pool: raise ResourceNotFoundError(user_pool_id) - user_pool_client = CognitoIdpUserPoolClient(user_pool_id, extended_config) + user_pool_client = CognitoIdpUserPoolClient( + user_pool_id, generate_secret, extended_config + ) user_pool.clients[user_pool_client.id] = user_pool_client return user_pool_client @@ -693,6 +741,9 @@ class CognitoIdpBackend(BaseBackend): def respond_to_auth_challenge( self, session, client_id, challenge_name, challenge_responses ): + if challenge_name == "PASSWORD_VERIFIER": + session = challenge_responses.get("PASSWORD_CLAIM_SECRET_BLOCK") + user_pool = self.sessions.get(session) if not user_pool: raise ResourceNotFoundError(session) @@ -713,6 +764,62 @@ class CognitoIdpBackend(BaseBackend): del self.sessions[session] return self._log_user_in(user_pool, client, username) + elif challenge_name == "PASSWORD_VERIFIER": + username = challenge_responses.get("USERNAME") + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + password_claim_signature = challenge_responses.get( + "PASSWORD_CLAIM_SIGNATURE" + ) + if not password_claim_signature: + raise ResourceNotFoundError(password_claim_signature) + password_claim_secret_block = challenge_responses.get( + "PASSWORD_CLAIM_SECRET_BLOCK" + ) + if not password_claim_secret_block: + raise ResourceNotFoundError(password_claim_secret_block) + timestamp = challenge_responses.get("TIMESTAMP") + if not timestamp: + raise ResourceNotFoundError(timestamp) + + if user.software_token_mfa_enabled: + return { + "ChallengeName": "SOFTWARE_TOKEN_MFA", + "Session": session, + "ChallengeParameters": {}, + } + + if user.sms_mfa_enabled: + return { + "ChallengeName": "SMS_MFA", + "Session": session, + "ChallengeParameters": {}, + } + + del self.sessions[session] + return self._log_user_in(user_pool, client, username) + elif challenge_name == "SOFTWARE_TOKEN_MFA": + username = challenge_responses.get("USERNAME") + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + software_token_mfa_code = challenge_responses.get("SOFTWARE_TOKEN_MFA_CODE") + if not software_token_mfa_code: + raise ResourceNotFoundError(software_token_mfa_code) + + if client.generate_secret: + secret_hash = challenge_responses.get("SECRET_HASH") + if not check_secret_hash( + client.secret, client.id, username, secret_hash + ): + raise NotAuthorizedError(secret_hash) + + del self.sessions[session] + return self._log_user_in(user_pool, client, username) + else: return {} @@ -754,6 +861,187 @@ class CognitoIdpBackend(BaseBackend): user = user_pool.users[username] user.update_attributes(attributes) + def create_resource_server(self, user_pool_id, identifier, name, scopes): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if identifier in user_pool.resource_servers: + raise InvalidParameterException( + "%s already exists in user pool %s." % (identifier, user_pool_id) + ) + + resource_server = CognitoResourceServer(user_pool_id, identifier, name, scopes) + user_pool.resource_servers[identifier] = resource_server + return resource_server + + def sign_up(self, client_id, username, password, attributes): + user_pool = None + for p in self.user_pools.values(): + if client_id in p.clients: + user_pool = p + if user_pool is None: + raise ResourceNotFoundError(client_id) + + user = CognitoIdpUser( + user_pool_id=user_pool.id, + username=username, + password=password, + attributes=attributes, + status=UserStatus["UNCONFIRMED"], + ) + user_pool.users[user.username] = user + return user + + def confirm_sign_up(self, client_id, username, confirmation_code): + user_pool = None + for p in self.user_pools.values(): + if client_id in p.clients: + user_pool = p + if user_pool is None: + raise ResourceNotFoundError(client_id) + + if username not in user_pool.users: + raise UserNotFoundError(username) + + user = user_pool.users[username] + user.status = UserStatus["CONFIRMED"] + return "" + + def initiate_auth(self, client_id, auth_flow, auth_parameters): + user_pool = None + for p in self.user_pools.values(): + if client_id in p.clients: + user_pool = p + if user_pool is None: + raise ResourceNotFoundError(client_id) + + client = p.clients.get(client_id) + + if auth_flow == "USER_SRP_AUTH": + username = auth_parameters.get("USERNAME") + srp_a = auth_parameters.get("SRP_A") + if not srp_a: + raise ResourceNotFoundError(srp_a) + if client.generate_secret: + secret_hash = auth_parameters.get("SECRET_HASH") + if not check_secret_hash( + client.secret, client.id, username, secret_hash + ): + raise NotAuthorizedError(secret_hash) + + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + if user.status == UserStatus["UNCONFIRMED"]: + raise UserNotConfirmedException("User is not confirmed.") + + session = str(uuid.uuid4()) + self.sessions[session] = user_pool + + return { + "ChallengeName": "PASSWORD_VERIFIER", + "Session": session, + "ChallengeParameters": { + "SALT": str(uuid.uuid4()), + "SRP_B": str(uuid.uuid4()), + "USERNAME": user.id, + "USER_ID_FOR_SRP": user.id, + "SECRET_BLOCK": session, + }, + } + elif auth_flow == "REFRESH_TOKEN": + refresh_token = auth_parameters.get("REFRESH_TOKEN") + if not refresh_token: + raise ResourceNotFoundError(refresh_token) + + client_id, username = user_pool.refresh_tokens[refresh_token] + if not username: + raise ResourceNotFoundError(username) + + if client.generate_secret: + secret_hash = auth_parameters.get("SECRET_HASH") + if not check_secret_hash( + client.secret, client.id, username, secret_hash + ): + raise NotAuthorizedError(secret_hash) + + ( + id_token, + access_token, + expires_in, + ) = user_pool.create_tokens_from_refresh_token(refresh_token) + + return { + "AuthenticationResult": { + "IdToken": id_token, + "AccessToken": access_token, + "ExpiresIn": expires_in, + } + } + else: + return None + + def associate_software_token(self, access_token): + for user_pool in self.user_pools.values(): + if access_token in user_pool.access_tokens: + _, username = user_pool.access_tokens[access_token] + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + return {"SecretCode": str(uuid.uuid4())} + else: + raise NotAuthorizedError(access_token) + + def verify_software_token(self, access_token, user_code): + for user_pool in self.user_pools.values(): + if access_token in user_pool.access_tokens: + _, username = user_pool.access_tokens[access_token] + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + user.token_verified = True + + return {"Status": "SUCCESS"} + else: + raise NotAuthorizedError(access_token) + + def set_user_mfa_preference( + self, access_token, software_token_mfa_settings, sms_mfa_settings + ): + for user_pool in self.user_pools.values(): + if access_token in user_pool.access_tokens: + _, username = user_pool.access_tokens[access_token] + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + if software_token_mfa_settings["Enabled"]: + if user.token_verified: + user.software_token_mfa_enabled = True + else: + raise InvalidParameterException( + "User has not verified software token mfa" + ) + + elif sms_mfa_settings["Enabled"]: + user.sms_mfa_enabled = True + + return None + else: + raise NotAuthorizedError(access_token) + + def admin_set_user_password(self, user_pool_id, username, password, permanent): + user = self.admin_get_user(user_pool_id, username) + user.password = password + if permanent: + user.status = UserStatus["CONFIRMED"] + else: + user.status = UserStatus["FORCE_CHANGE_PASSWORD"] + cognitoidp_backends = {} for region in Session().get_available_regions("cognito-idp"): @@ -778,5 +1066,7 @@ def find_region_by_value(key, value): if key == "access_token" and value in user_pool.access_tokens: return region - - return cognitoidp_backends.keys()[0] + # If we can't find the `client_id` or `access_token`, we just pass + # back a default backend region, which will raise the appropriate + # error message (e.g. NotAuthorized or NotFound). + return list(cognitoidp_backends)[0] diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 6c89c4806..e10a12282 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -4,7 +4,7 @@ import json import os from moto.core.responses import BaseResponse -from .models import cognitoidp_backends, find_region_by_value +from .models import cognitoidp_backends, find_region_by_value, UserStatus class CognitoIdpResponse(BaseResponse): @@ -84,8 +84,9 @@ class CognitoIdpResponse(BaseResponse): # User pool client def create_user_pool_client(self): user_pool_id = self.parameters.pop("UserPoolId") + generate_secret = self.parameters.pop("GenerateSecret", False) user_pool_client = cognitoidp_backends[self.region].create_user_pool_client( - user_pool_id, self.parameters + user_pool_id, generate_secret, self.parameters ) return json.dumps({"UserPoolClient": user_pool_client.to_json(extended=True)}) @@ -286,7 +287,7 @@ class CognitoIdpResponse(BaseResponse): user_pool_id, limit=limit, pagination_token=token ) if filt: - name, value = filt.replace('"', "").split("=") + name, value = filt.replace('"', "").replace(" ", "").split("=") users = [ user for user in users @@ -378,6 +379,86 @@ class CognitoIdpResponse(BaseResponse): ) return "" + # Resource Server + def create_resource_server(self): + user_pool_id = self._get_param("UserPoolId") + identifier = self._get_param("Identifier") + name = self._get_param("Name") + scopes = self._get_param("Scopes") + resource_server = cognitoidp_backends[self.region].create_resource_server( + user_pool_id, identifier, name, scopes + ) + return json.dumps({"ResourceServer": resource_server.to_json()}) + + def sign_up(self): + client_id = self._get_param("ClientId") + username = self._get_param("Username") + password = self._get_param("Password") + user = cognitoidp_backends[self.region].sign_up( + client_id=client_id, + username=username, + password=password, + attributes=self._get_param("UserAttributes", []), + ) + return json.dumps( + { + "UserConfirmed": user.status == UserStatus["CONFIRMED"], + "UserSub": user.id, + } + ) + + def confirm_sign_up(self): + client_id = self._get_param("ClientId") + username = self._get_param("Username") + confirmation_code = self._get_param("ConfirmationCode") + cognitoidp_backends[self.region].confirm_sign_up( + client_id=client_id, username=username, confirmation_code=confirmation_code, + ) + return "" + + def initiate_auth(self): + client_id = self._get_param("ClientId") + auth_flow = self._get_param("AuthFlow") + auth_parameters = self._get_param("AuthParameters") + + auth_result = cognitoidp_backends[self.region].initiate_auth( + client_id, auth_flow, auth_parameters + ) + + return json.dumps(auth_result) + + def associate_software_token(self): + access_token = self._get_param("AccessToken") + result = cognitoidp_backends[self.region].associate_software_token(access_token) + return json.dumps(result) + + def verify_software_token(self): + access_token = self._get_param("AccessToken") + user_code = self._get_param("UserCode") + result = cognitoidp_backends[self.region].verify_software_token( + access_token, user_code + ) + return json.dumps(result) + + def set_user_mfa_preference(self): + access_token = self._get_param("AccessToken") + software_token_mfa_settings = self._get_param("SoftwareTokenMfaSettings") + sms_mfa_settings = self._get_param("SMSMfaSettings") + cognitoidp_backends[self.region].set_user_mfa_preference( + access_token, software_token_mfa_settings, sms_mfa_settings + ) + return "" + + def admin_set_user_password(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + password = self._get_param("Password") + permanent = self._get_param("Permanent") + cognitoidp_backends[self.region].admin_set_user_password( + user_pool_id, username, password, permanent + ) + return "" + class CognitoIdpJsonWebKeyResponse(BaseResponse): def __init__(self): diff --git a/moto/cognitoidp/urls.py b/moto/cognitoidp/urls.py index 5d1dff1d0..09e675e70 100644 --- a/moto/cognitoidp/urls.py +++ b/moto/cognitoidp/urls.py @@ -5,5 +5,5 @@ url_bases = ["https?://cognito-idp.(.+).amazonaws.com"] url_paths = { "{0}/$": CognitoIdpResponse.dispatch, - "{0}//.well-known/jwks.json$": CognitoIdpJsonWebKeyResponse().serve_json_web_key, + "{0}/(?P[^/]+)/.well-known/jwks.json$": CognitoIdpJsonWebKeyResponse().serve_json_web_key, } diff --git a/moto/cognitoidp/utils.py b/moto/cognitoidp/utils.py new file mode 100644 index 000000000..11f34bcae --- /dev/null +++ b/moto/cognitoidp/utils.py @@ -0,0 +1,21 @@ +from __future__ import unicode_literals +import six +import random +import string +import hashlib +import hmac +import base64 + + +def create_id(): + size = 26 + chars = list(range(10)) + list(string.ascii_lowercase) + return "".join(six.text_type(random.choice(chars)) for x in range(size)) + + +def check_secret_hash(app_client_secret, app_client_id, username, secret_hash): + key = bytes(str(app_client_secret).encode("latin-1")) + msg = bytes(str(username + app_client_id).encode("latin-1")) + new_digest = hmac.new(key, msg, hashlib.sha256).digest() + SECRET_HASH = base64.b64encode(new_digest).decode() + return SECRET_HASH == secret_hash diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py index 4a0dc0d73..4030b87a3 100644 --- a/moto/config/exceptions.py +++ b/moto/config/exceptions.py @@ -366,3 +366,29 @@ class TooManyResourceKeys(JsonRESTError): message = str(message) super(TooManyResourceKeys, self).__init__("ValidationException", message) + + +class InvalidResultTokenException(JsonRESTError): + code = 400 + + def __init__(self): + message = "The resultToken provided is invalid" + super(InvalidResultTokenException, self).__init__( + "InvalidResultTokenException", message + ) + + +class ValidationException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ValidationException, self).__init__("ValidationException", message) + + +class NoSuchOrganizationConformancePackException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(NoSuchOrganizationConformancePackException, self).__init__( + "NoSuchOrganizationConformancePackException", message + ) diff --git a/moto/config/models.py b/moto/config/models.py index 45dccd1ba..99ae49e44 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -40,13 +40,17 @@ from moto.config.exceptions import ( TooManyResourceIds, ResourceNotDiscoveredException, TooManyResourceKeys, + InvalidResultTokenException, + ValidationException, + NoSuchOrganizationConformancePackException, ) from moto.core import BaseBackend, BaseModel -from moto.s3.config import s3_config_query - +from moto.s3.config import s3_account_public_access_block_query, s3_config_query from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID +from moto.iam.config import role_config_query, policy_config_query + POP_STRINGS = [ "capitalizeStart", "CapitalizeStart", @@ -58,7 +62,12 @@ POP_STRINGS = [ DEFAULT_PAGE_SIZE = 100 # Map the Config resource type to a backend: -RESOURCE_MAP = {"AWS::S3::Bucket": s3_config_query} +RESOURCE_MAP = { + "AWS::S3::Bucket": s3_config_query, + "AWS::S3::AccountPublicAccessBlock": s3_account_public_access_block_query, + "AWS::IAM::Role": role_config_query, + "AWS::IAM::Policy": policy_config_query, +} def datetime2int(date): @@ -155,7 +164,8 @@ class ConfigEmptyDictable(BaseModel): def to_dict(self): data = {} for item, value in self.__dict__.items(): - if value is not None: + # ignore private attributes + if not item.startswith("_") and value is not None: if isinstance(value, ConfigEmptyDictable): data[ snake_to_camels( @@ -363,12 +373,56 @@ class ConfigAggregationAuthorization(ConfigEmptyDictable): self.tags = tags or {} +class OrganizationConformancePack(ConfigEmptyDictable): + def __init__( + self, + region, + name, + delivery_s3_bucket, + delivery_s3_key_prefix=None, + input_parameters=None, + excluded_accounts=None, + ): + super(OrganizationConformancePack, self).__init__( + capitalize_start=True, capitalize_arn=False + ) + + self._status = "CREATE_SUCCESSFUL" + self._unique_pack_name = "{0}-{1}".format(name, random_string()) + + self.conformance_pack_input_parameters = input_parameters or [] + self.delivery_s3_bucket = delivery_s3_bucket + self.delivery_s3_key_prefix = delivery_s3_key_prefix + self.excluded_accounts = excluded_accounts or [] + self.last_update_time = datetime2int(datetime.utcnow()) + self.organization_conformance_pack_arn = "arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format( + region, DEFAULT_ACCOUNT_ID, self._unique_pack_name + ) + self.organization_conformance_pack_name = name + + def update( + self, + delivery_s3_bucket, + delivery_s3_key_prefix, + input_parameters, + excluded_accounts, + ): + self._status = "UPDATE_SUCCESSFUL" + + self.conformance_pack_input_parameters = input_parameters + self.delivery_s3_bucket = delivery_s3_bucket + self.delivery_s3_key_prefix = delivery_s3_key_prefix + self.excluded_accounts = excluded_accounts + self.last_update_time = datetime2int(datetime.utcnow()) + + class ConfigBackend(BaseBackend): def __init__(self): self.recorders = {} self.delivery_channels = {} self.config_aggregators = {} self.aggregation_authorizations = {} + self.organization_conformance_packs = {} @staticmethod def _validate_resource_types(resource_list): @@ -867,16 +921,17 @@ class ConfigBackend(BaseBackend): backend_region=backend_query_region, ) - result = { - "resourceIdentifiers": [ - { - "resourceType": identifier["type"], - "resourceId": identifier["id"], - "resourceName": identifier["name"], - } - for identifier in identifiers - ] - } + resource_identifiers = [] + for identifier in identifiers: + item = {"resourceType": identifier["type"], "resourceId": identifier["id"]} + + # Some resource types lack names: + if identifier.get("name"): + item["resourceName"] = identifier["name"] + + resource_identifiers.append(item) + + result = {"resourceIdentifiers": resource_identifiers} if new_token: result["nextToken"] = new_token @@ -925,20 +980,23 @@ class ConfigBackend(BaseBackend): limit, next_token, resource_region=resource_region, + aggregator=self.config_aggregators.get(aggregator_name).__dict__, ) - result = { - "ResourceIdentifiers": [ - { - "SourceAccountId": DEFAULT_ACCOUNT_ID, - "SourceRegion": identifier["region"], - "ResourceType": identifier["type"], - "ResourceId": identifier["id"], - "ResourceName": identifier["name"], - } - for identifier in identifiers - ] - } + resource_identifiers = [] + for identifier in identifiers: + item = { + "SourceAccountId": DEFAULT_ACCOUNT_ID, + "SourceRegion": identifier["region"], + "ResourceType": identifier["type"], + "ResourceId": identifier["id"], + } + if identifier.get("name"): + item["ResourceName"] = identifier["name"] + + resource_identifiers.append(item) + + result = {"ResourceIdentifiers": resource_identifiers} if new_token: result["NextToken"] = new_token @@ -948,9 +1006,9 @@ class ConfigBackend(BaseBackend): def get_resource_config_history(self, resource_type, id, backend_region): """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend. - NOTE: This is --NOT-- returning history as it is not supported in moto at this time. (PR's welcome!) - As such, the later_time, earlier_time, limit, and next_token are ignored as this will only - return 1 item. (If no items, it raises an exception) + NOTE: This is --NOT-- returning history as it is not supported in moto at this time. (PR's welcome!) + As such, the later_time, earlier_time, limit, and next_token are ignored as this will only + return 1 item. (If no items, it raises an exception) """ # If the type isn't implemented then we won't find the item: if resource_type not in RESOURCE_MAP: @@ -1032,10 +1090,10 @@ class ConfigBackend(BaseBackend): ): """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend. - As far a moto goes -- the only real difference between this function and the `batch_get_resource_config` function is that - this will require a Config Aggregator be set up a priori and can search based on resource regions. + As far a moto goes -- the only real difference between this function and the `batch_get_resource_config` function is that + this will require a Config Aggregator be set up a priori and can search based on resource regions. - Note: moto will IGNORE the resource account ID in the search query. + Note: moto will IGNORE the resource account ID in the search query. """ if not self.config_aggregators.get(aggregator_name): raise NoSuchConfigurationAggregatorException() @@ -1082,6 +1140,154 @@ class ConfigBackend(BaseBackend): "UnprocessedResourceIdentifiers": not_found, } + def put_evaluations(self, evaluations=None, result_token=None, test_mode=False): + if not evaluations: + raise InvalidParameterValueException( + "The Evaluations object in your request cannot be null." + "Add the required parameters and try again." + ) + + if not result_token: + raise InvalidResultTokenException() + + # Moto only supports PutEvaluations with test mode currently (missing rule and token support) + if not test_mode: + raise NotImplementedError( + "PutEvaluations without TestMode is not yet implemented" + ) + + return { + "FailedEvaluations": [], + } # At this time, moto is not adding failed evaluations. + + def put_organization_conformance_pack( + self, + region, + name, + template_s3_uri, + template_body, + delivery_s3_bucket, + delivery_s3_key_prefix, + input_parameters, + excluded_accounts, + ): + # a real validation of the content of the template is missing at the moment + if not template_s3_uri and not template_body: + raise ValidationException("Template body is invalid") + + if not re.match(r"s3://.*", template_s3_uri): + raise ValidationException( + "1 validation error detected: " + "Value '{}' at 'templateS3Uri' failed to satisfy constraint: " + "Member must satisfy regular expression pattern: " + "s3://.*".format(template_s3_uri) + ) + + pack = self.organization_conformance_packs.get(name) + + if pack: + pack.update( + delivery_s3_bucket=delivery_s3_bucket, + delivery_s3_key_prefix=delivery_s3_key_prefix, + input_parameters=input_parameters, + excluded_accounts=excluded_accounts, + ) + else: + pack = OrganizationConformancePack( + region=region, + name=name, + delivery_s3_bucket=delivery_s3_bucket, + delivery_s3_key_prefix=delivery_s3_key_prefix, + input_parameters=input_parameters, + excluded_accounts=excluded_accounts, + ) + + self.organization_conformance_packs[name] = pack + + return { + "OrganizationConformancePackArn": pack.organization_conformance_pack_arn + } + + def describe_organization_conformance_packs(self, names): + packs = [] + + for name in names: + pack = self.organization_conformance_packs.get(name) + + if not pack: + raise NoSuchOrganizationConformancePackException( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + packs.append(pack.to_dict()) + + return {"OrganizationConformancePacks": packs} + + def describe_organization_conformance_pack_statuses(self, names): + packs = [] + statuses = [] + + if names: + for name in names: + pack = self.organization_conformance_packs.get(name) + + if not pack: + raise NoSuchOrganizationConformancePackException( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + packs.append(pack) + else: + packs = list(self.organization_conformance_packs.values()) + + for pack in packs: + statuses.append( + { + "OrganizationConformancePackName": pack.organization_conformance_pack_name, + "Status": pack._status, + "LastUpdateTime": pack.last_update_time, + } + ) + + return {"OrganizationConformancePackStatuses": statuses} + + def get_organization_conformance_pack_detailed_status(self, name): + pack = self.organization_conformance_packs.get(name) + + if not pack: + raise NoSuchOrganizationConformancePackException( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + # actually here would be a list of all accounts in the organization + statuses = [ + { + "AccountId": DEFAULT_ACCOUNT_ID, + "ConformancePackName": "OrgConformsPack-{0}".format( + pack._unique_pack_name + ), + "Status": pack._status, + "LastUpdateTime": datetime2int(datetime.utcnow()), + } + ] + + return {"OrganizationConformancePackDetailedStatuses": statuses} + + def delete_organization_conformance_pack(self, name): + pack = self.organization_conformance_packs.get(name) + + if not pack: + raise NoSuchOrganizationConformancePackException( + "Could not find an OrganizationConformancePack for given request with resourceName {}".format( + name + ) + ) + + self.organization_conformance_packs.pop(name) + config_backends = {} for region in Session().get_available_regions("config"): diff --git a/moto/config/responses.py b/moto/config/responses.py index e977945c9..7dcc9a01b 100644 --- a/moto/config/responses.py +++ b/moto/config/responses.py @@ -151,3 +151,54 @@ class ConfigResponse(BaseResponse): self._get_param("ResourceIdentifiers"), ) return json.dumps(schema) + + def put_evaluations(self): + evaluations = self.config_backend.put_evaluations( + self._get_param("Evaluations"), + self._get_param("ResultToken"), + self._get_param("TestMode"), + ) + return json.dumps(evaluations) + + def put_organization_conformance_pack(self): + conformance_pack = self.config_backend.put_organization_conformance_pack( + region=self.region, + name=self._get_param("OrganizationConformancePackName"), + template_s3_uri=self._get_param("TemplateS3Uri"), + template_body=self._get_param("TemplateBody"), + delivery_s3_bucket=self._get_param("DeliveryS3Bucket"), + delivery_s3_key_prefix=self._get_param("DeliveryS3KeyPrefix"), + input_parameters=self._get_param("ConformancePackInputParameters"), + excluded_accounts=self._get_param("ExcludedAccounts"), + ) + + return json.dumps(conformance_pack) + + def describe_organization_conformance_packs(self): + conformance_packs = self.config_backend.describe_organization_conformance_packs( + self._get_param("OrganizationConformancePackNames") + ) + + return json.dumps(conformance_packs) + + def describe_organization_conformance_pack_statuses(self): + statuses = self.config_backend.describe_organization_conformance_pack_statuses( + self._get_param("OrganizationConformancePackNames") + ) + + return json.dumps(statuses) + + def get_organization_conformance_pack_detailed_status(self): + # 'Filters' parameter is not implemented yet + statuses = self.config_backend.get_organization_conformance_pack_detailed_status( + self._get_param("OrganizationConformancePackName") + ) + + return json.dumps(statuses) + + def delete_organization_conformance_pack(self): + self.config_backend.delete_organization_conformance_pack( + self._get_param("OrganizationConformancePackName") + ) + + return "" diff --git a/moto/core/__init__.py b/moto/core/__init__.py index 045124fab..09f5b1e16 100644 --- a/moto/core/__init__.py +++ b/moto/core/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import BaseModel, BaseBackend, moto_api_backend, ACCOUNT_ID # noqa +from .models import CloudFormationModel # noqa from .responses import ActionAuthenticatorMixin moto_api_backends = {"global": moto_api_backend} diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py index ea91eda63..6938f9bf1 100644 --- a/moto/core/exceptions.py +++ b/moto/core/exceptions.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from werkzeug.exceptions import HTTPException from jinja2 import DictLoader, Environment +import json SINGLE_ERROR_RESPONSE = """ @@ -109,6 +110,22 @@ class AuthFailureError(RESTError): ) +class AWSError(Exception): + TYPE = None + STATUS = 400 + + def __init__(self, message, type=None, status=None): + self.message = message + self.type = type if type is not None else self.TYPE + self.status = status if status is not None else self.STATUS + + def response(self): + return ( + json.dumps({"__type": self.type, "message": self.message}), + dict(status=self.status), + ) + + class InvalidNextTokenException(JsonRESTError): """For AWS Config resource listing. This will be used by many different resource types, and so it is in moto.core.""" diff --git a/moto/core/models.py b/moto/core/models.py index 3be3bbd8e..2cd67188a 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -5,12 +5,19 @@ from __future__ import absolute_import import functools import inspect import os +import pkg_resources import re import six +import types +from abc import abstractmethod from io import BytesIO from collections import defaultdict +from botocore.config import Config from botocore.handlers import BUILTIN_HANDLERS from botocore.awsrequest import AWSResponse +from distutils.version import LooseVersion +from six.moves.urllib.parse import urlparse +from werkzeug.wrappers import Request import mock from moto import settings @@ -22,22 +29,23 @@ from .utils import ( convert_flask_to_responses_response, ) - ACCOUNT_ID = os.environ.get("MOTO_ACCOUNT_ID", "123456789012") +RESPONSES_VERSION = pkg_resources.get_distribution("responses").version class BaseMockAWS(object): nested_count = 0 def __init__(self, backends): + from moto.instance_metadata import instance_metadata_backend + from moto.core import moto_api_backend + self.backends = backends self.backends_for_urls = {} - from moto.backends import BACKENDS - default_backends = { - "instance_metadata": BACKENDS["instance_metadata"]["global"], - "moto_api": BACKENDS["moto_api"]["global"], + "instance_metadata": instance_metadata_backend, + "moto_api": moto_api_backend, } self.backends_for_urls.update(self.backends) self.backends_for_urls.update(default_backends) @@ -174,6 +182,28 @@ class CallbackResponse(responses.CallbackResponse): """ Need to override this so we can pass decode_content=False """ + if not isinstance(request, Request): + url = urlparse(request.url) + if request.body is None: + body = None + elif isinstance(request.body, six.text_type): + body = six.BytesIO(six.b(request.body)) + elif hasattr(request.body, "read"): + body = six.BytesIO(request.body.read()) + else: + body = six.BytesIO(request.body) + req = Request.from_values( + path="?".join([url.path, url.query]), + input_stream=body, + content_length=request.headers.get("Content-Length"), + content_type=request.headers.get("Content-Type"), + method=request.method, + base_url="{scheme}://{netloc}".format( + scheme=url.scheme, netloc=url.netloc + ), + headers=[(k, v) for k, v in six.iteritems(request.headers)], + ) + request = req headers = self.get_headers() result = self.callback(request) @@ -217,12 +247,46 @@ botocore_mock = responses.RequestsMock( assert_all_requests_are_fired=False, target="botocore.vendored.requests.adapters.HTTPAdapter.send", ) + responses_mock = responses._default_mock # Add passthrough to allow any other requests to work # Since this uses .startswith, it applies to http and https requests. responses_mock.add_passthru("http") +def _find_first_match_legacy(self, request): + for i, match in enumerate(self._matches): + if match.matches(request): + return match + + return None + + +def _find_first_match(self, request): + match_failed_reasons = [] + for i, match in enumerate(self._matches): + match_result, reason = match.matches(request) + if match_result: + return match, match_failed_reasons + else: + match_failed_reasons.append(reason) + + return None, match_failed_reasons + + +# Modify behaviour of the matcher to only/always return the first match +# Default behaviour is to return subsequent matches for subsequent requests, which leads to https://github.com/spulec/moto/issues/2567 +# - First request matches on the appropriate S3 URL +# - Same request, executed again, will be matched on the subsequent match, which happens to be the catch-all, not-yet-implemented, callback +# Fix: Always return the first match +if LooseVersion(RESPONSES_VERSION) < LooseVersion("0.12.1"): + responses_mock._find_match = types.MethodType( + _find_first_match_legacy, responses_mock + ) +else: + responses_mock._find_match = types.MethodType(_find_first_match, responses_mock) + + BOTOCORE_HTTP_METHODS = ["GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] @@ -329,7 +393,7 @@ class BotocoreEventMockAWS(BaseMockAWS): responses_mock.add( CallbackResponse( method=method, - url=re.compile("https?://.+.amazonaws.com/.*"), + url=re.compile(r"https?://.+.amazonaws.com/.*"), callback=not_implemented_callback, stream=True, match_querystring=False, @@ -338,7 +402,7 @@ class BotocoreEventMockAWS(BaseMockAWS): botocore_mock.add( CallbackResponse( method=method, - url=re.compile("https?://.+.amazonaws.com/.*"), + url=re.compile(r"https?://.+.amazonaws.com/.*"), callback=not_implemented_callback, stream=True, match_querystring=False, @@ -373,6 +437,13 @@ class ServerModeMockAWS(BaseMockAWS): import mock def fake_boto3_client(*args, **kwargs): + region = self._get_region(*args, **kwargs) + if region: + if "config" in kwargs: + kwargs["config"].__dict__["user_agent_extra"] += " region/" + region + else: + config = Config(user_agent_extra="region/" + region) + kwargs["config"] = config if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_client(*args, **kwargs) @@ -420,6 +491,14 @@ class ServerModeMockAWS(BaseMockAWS): if six.PY2: self._httplib_patcher.start() + def _get_region(self, *args, **kwargs): + if "region_name" in kwargs: + return kwargs["region_name"] + if type(args) == tuple and len(args) == 2: + service, region = args + return region + return None + def disable_patching(self): if self._client_patcher: self._client_patcher.stop() @@ -475,6 +554,56 @@ class BaseModel(object): return instance +# Parent class for every Model that can be instantiated by CloudFormation +# On subclasses, implement the two methods as @staticmethod to ensure correct behaviour of the CF parser +class CloudFormationModel(BaseModel): + @staticmethod + @abstractmethod + def cloudformation_name_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html + # This must be implemented as a staticmethod with no parameters + # Return None for resources that do not have a name property + pass + + @staticmethod + @abstractmethod + def cloudformation_type(): + # This must be implemented as a staticmethod with no parameters + # See for example https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html + return "AWS::SERVICE::RESOURCE" + + @abstractmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + # This must be implemented as a classmethod with parameters: + # cls, resource_name, cloudformation_json, region_name + # Extract the resource parameters from the cloudformation json + # and return an instance of the resource class + pass + + @abstractmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): + # This must be implemented as a classmethod with parameters: + # cls, original_resource, new_resource_name, cloudformation_json, region_name + # Extract the resource parameters from the cloudformation json, + # delete the old resource and return the new one. Optionally inspect + # the change in parameters and no-op when nothing has changed. + pass + + @abstractmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + # This must be implemented as a classmethod with parameters: + # cls, resource_name, cloudformation_json, region_name + # Extract the resource parameters from the cloudformation json + # and delete the resource. Do not include a return statement. + pass + + class BaseBackend(object): def _reset_model_refs(self): # Remove all references to the models stored @@ -582,6 +711,7 @@ class ConfigQueryModel(object): next_token, backend_region=None, resource_region=None, + aggregator=None, ): """For AWS Config. This will list all of the resources of the given type and optional resource name and region. @@ -606,12 +736,17 @@ class ConfigQueryModel(object): As such, the proper way to implement is to first obtain a full list of results from all the region backends, and then filter from there. It may be valuable to make this a concatenation of the region and resource name. - :param resource_region: - :param resource_ids: - :param resource_name: - :param limit: - :param next_token: + :param resource_ids: A list of resource IDs + :param resource_name: The individual name of a resource + :param limit: How many per page + :param next_token: The item that will page on :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. + :param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a + non-aggregated query. + :param aggregator: If the query is an aggregated query, *AND* the resource has "non-standard" aggregation logic (mainly, IAM), + you'll need to pass aggregator used. In most cases, this should be omitted/set to `None`. See the + conditional logic under `if aggregator` in the moto/iam/config.py for the IAM example. + :return: This should return a list of Dicts that have the following fields: [ { @@ -680,12 +815,12 @@ class deprecated_base_decorator(base_decorator): class MotoAPIBackend(BaseBackend): def reset(self): - from moto.backends import BACKENDS + import moto.backends as backends - for name, backends in BACKENDS.items(): + for name, backends_ in backends.named_backends(): if name == "moto_api": continue - for region_name, backend in backends.items(): + for region_name, backend in backends_.items(): backend.reset() self.__init__() diff --git a/moto/core/responses.py b/moto/core/responses.py index c708edb8b..1149ab0be 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -11,16 +11,14 @@ import requests import pytz -from moto.core.access_control import IAMRequest, S3IAMRequest from moto.core.exceptions import DryRunClientError from jinja2 import Environment, DictLoader, TemplateNotFound import six -from six.moves.urllib.parse import parse_qs, urlparse +from six.moves.urllib.parse import parse_qs, parse_qsl, urlparse import xmltodict -from pkg_resources import resource_filename from werkzeug.exceptions import HTTPException import boto3 @@ -32,7 +30,7 @@ log = logging.getLogger(__name__) def _decode_dict(d): - decoded = {} + decoded = OrderedDict() for key, value in d.items(): if isinstance(key, six.binary_type): newkey = key.decode("utf-8") @@ -64,9 +62,9 @@ def _decode_dict(d): class DynamicDictLoader(DictLoader): """ - Note: There's a bug in jinja2 pre-2.7.3 DictLoader where caching does not work. - Including the fixed (current) method version here to ensure performance benefit - even for those using older jinja versions. + Note: There's a bug in jinja2 pre-2.7.3 DictLoader where caching does not work. + Including the fixed (current) method version here to ensure performance benefit + even for those using older jinja versions. """ def get_source(self, environment, template): @@ -135,9 +133,13 @@ class ActionAuthenticatorMixin(object): ActionAuthenticatorMixin.request_count += 1 def _authenticate_and_authorize_normal_action(self): + from moto.iam.access_control import IAMRequest + self._authenticate_and_authorize_action(IAMRequest) def _authenticate_and_authorize_s3_action(self): + from moto.iam.access_control import S3IAMRequest + self._authenticate_and_authorize_action(S3IAMRequest) @staticmethod @@ -186,6 +188,9 @@ class BaseResponse(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): default_region = "us-east-1" # to extract region, use [^.] region_regex = re.compile(r"\.(?P[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com") + region_from_useragent_regex = re.compile( + r"region/(?P[a-z]{2}-[a-z]+-\d{1})" + ) param_list_regex = re.compile(r"(.*)\.(\d+)\.") access_key_regex = re.compile( r"AWS.*(?P(?'.format(match_pattern, match_name) - url_path = re.sub("\(\?P<(.*?)>(.*?)\)", caller, url_path) + url_path = re.sub(r"\(\?P<(.*?)>(.*?)\)", caller, url_path) if url_path.endswith("/?"): # Flask does own handling of trailing slashes @@ -187,7 +192,13 @@ def iso_8601_datetime_with_milliseconds(datetime): def iso_8601_datetime_without_milliseconds(datetime): - return datetime.strftime("%Y-%m-%dT%H:%M:%S") + "Z" + return None if datetime is None else datetime.strftime("%Y-%m-%dT%H:%M:%S") + "Z" + + +def iso_8601_datetime_without_milliseconds_s3(datetime): + return ( + None if datetime is None else datetime.strftime("%Y-%m-%dT%H:%M:%S.000") + "Z" + ) RFC1123 = "%a, %d %b %Y %H:%M:%S GMT" @@ -328,3 +339,63 @@ def py2_strip_unicode_keys(blob): blob = new_set return blob + + +def tags_from_query_string( + querystring_dict, prefix="Tag", key_suffix="Key", value_suffix="Value" +): + response_values = {} + for key, value in querystring_dict.items(): + if key.startswith(prefix) and key.endswith(key_suffix): + tag_index = key.replace(prefix + ".", "").replace("." + key_suffix, "") + tag_key = querystring_dict.get( + "{prefix}.{index}.{key_suffix}".format( + prefix=prefix, index=tag_index, key_suffix=key_suffix, + ) + )[0] + tag_value_key = "{prefix}.{index}.{value_suffix}".format( + prefix=prefix, index=tag_index, value_suffix=value_suffix, + ) + if tag_value_key in querystring_dict: + response_values[tag_key] = querystring_dict.get(tag_value_key)[0] + else: + response_values[tag_key] = None + return response_values + + +def tags_from_cloudformation_tags_list(tags_list): + """Return tags in dict form from cloudformation resource tags form (list of dicts)""" + tags = {} + for entry in tags_list: + key = entry["Key"] + value = entry["Value"] + tags[key] = value + + return tags + + +def remap_nested_keys(root, key_transform): + """This remap ("recursive map") function is used to traverse and + transform the dictionary keys of arbitrarily nested structures. + List comprehensions do not recurse, making it tedious to apply + transforms to all keys in a tree-like structure. + + A common issue for `moto` is changing the casing of dict keys: + + >>> remap_nested_keys({'KeyName': 'Value'}, camelcase_to_underscores) + {'key_name': 'Value'} + + Args: + root: The target data to traverse. Supports iterables like + :class:`list`, :class:`tuple`, and :class:`dict`. + key_transform (callable): This function is called on every + dictionary key found in *root*. + """ + if isinstance(root, (list, tuple)): + return [remap_nested_keys(item, key_transform) for item in root] + if isinstance(root, dict): + return { + key_transform(k): remap_nested_keys(v, key_transform) + for k, v in six.iteritems(root) + } + return root diff --git a/moto/datapipeline/models.py b/moto/datapipeline/models.py index d93deea61..e517b8f3e 100644 --- a/moto/datapipeline/models.py +++ b/moto/datapipeline/models.py @@ -4,7 +4,7 @@ import datetime from boto3 import Session from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys @@ -18,7 +18,7 @@ class PipelineObject(BaseModel): return {"fields": self.fields, "id": self.object_id, "name": self.name} -class Pipeline(BaseModel): +class Pipeline(CloudFormationModel): def __init__(self, name, unique_id, **kwargs): self.name = name self.unique_id = unique_id @@ -74,6 +74,15 @@ class Pipeline(BaseModel): def activate(self): self.status = "SCHEDULED" + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-datapipeline-pipeline.html + return "AWS::DataPipeline::Pipeline" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -81,9 +90,9 @@ class Pipeline(BaseModel): datapipeline_backend = datapipeline_backends[region_name] properties = cloudformation_json["Properties"] - cloudformation_unique_id = "cf-" + properties["Name"] + cloudformation_unique_id = "cf-" + resource_name pipeline = datapipeline_backend.create_pipeline( - properties["Name"], cloudformation_unique_id + resource_name, cloudformation_unique_id ) datapipeline_backend.put_pipeline_definition( pipeline.pipeline_id, properties["PipelineObjects"] diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py index f5771ec6e..1a3b4afce 100644 --- a/moto/dynamodb/models.py +++ b/moto/dynamodb/models.py @@ -4,7 +4,7 @@ import datetime import json from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import unix_time from moto.core import ACCOUNT_ID from .comparisons import get_comparison_func @@ -82,7 +82,7 @@ class Item(BaseModel): return {"Item": included} -class Table(BaseModel): +class Table(CloudFormationModel): def __init__( self, name, @@ -135,6 +135,15 @@ class Table(BaseModel): } return results + @staticmethod + def cloudformation_name_type(): + return "TableName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html + return "AWS::DynamoDB::Table" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py index 3d6e8ec1f..d141511c8 100644 --- a/moto/dynamodb2/__init__.py +++ b/moto/dynamodb2/__init__.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from .models import dynamodb_backends as dynamodb_backends2 +from moto.dynamodb2.models import dynamodb_backends as dynamodb_backends2 from ..core.models import base_decorator, deprecated_base_decorator dynamodb_backend2 = dynamodb_backends2["us-east-1"] diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 29951d92d..d17ae6875 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -251,9 +251,9 @@ class ConditionExpressionParser: def _lex_one_node(self, remaining_expression): # TODO: Handle indexing like [1] - attribute_regex = "(:|#)?[A-z0-9\-_]+" + attribute_regex = r"(:|#)?[A-z0-9\-_]+" patterns = [ - (self.Nonterminal.WHITESPACE, re.compile("^ +")), + (self.Nonterminal.WHITESPACE, re.compile(r"^ +")), ( self.Nonterminal.COMPARATOR, re.compile( @@ -270,12 +270,14 @@ class ConditionExpressionParser: ( self.Nonterminal.OPERAND, re.compile( - "^" + attribute_regex + "(\." + attribute_regex + "|\[[0-9]\])*" + r"^{attribute_regex}(\.{attribute_regex}|\[[0-9]\])*".format( + attribute_regex=attribute_regex + ) ), ), - (self.Nonterminal.COMMA, re.compile("^,")), - (self.Nonterminal.LEFT_PAREN, re.compile("^\(")), - (self.Nonterminal.RIGHT_PAREN, re.compile("^\)")), + (self.Nonterminal.COMMA, re.compile(r"^,")), + (self.Nonterminal.LEFT_PAREN, re.compile(r"^\(")), + (self.Nonterminal.RIGHT_PAREN, re.compile(r"^\)")), ] for nonterminal, pattern in patterns: @@ -285,7 +287,7 @@ class ConditionExpressionParser: break else: # pragma: no cover raise ValueError( - "Cannot parse condition starting at: " + remaining_expression + "Cannot parse condition starting at:{}".format(remaining_expression) ) node = self.Node( @@ -318,7 +320,7 @@ class ConditionExpressionParser: for child in children: self._assert( child.nonterminal == self.Nonterminal.IDENTIFIER, - "Cannot use %s in path" % child.text, + "Cannot use {} in path".format(child.text), [node], ) output.append( @@ -392,7 +394,7 @@ class ConditionExpressionParser: elif name.startswith("["): # e.g. [123] if not name.endswith("]"): # pragma: no cover - raise ValueError("Bad path element %s" % name) + raise ValueError("Bad path element {}".format(name)) return self.Node( nonterminal=self.Nonterminal.IDENTIFIER, kind=self.Kind.LITERAL, diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index 1f3b5f974..01b98b35d 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -2,9 +2,172 @@ class InvalidIndexNameError(ValueError): pass -class InvalidUpdateExpression(ValueError): - pass +class MockValidationException(ValueError): + def __init__(self, message): + self.exception_msg = message -class ItemSizeTooLarge(Exception): - message = "Item size has exceeded the maximum allowed size" +class InvalidUpdateExpressionInvalidDocumentPath(MockValidationException): + invalid_update_expression_msg = ( + "The document path provided in the update expression is invalid for update" + ) + + def __init__(self): + super(InvalidUpdateExpressionInvalidDocumentPath, self).__init__( + self.invalid_update_expression_msg + ) + + +class InvalidUpdateExpression(MockValidationException): + invalid_update_expr_msg = "Invalid UpdateExpression: {update_expression_error}" + + def __init__(self, update_expression_error): + self.update_expression_error = update_expression_error + super(InvalidUpdateExpression, self).__init__( + self.invalid_update_expr_msg.format( + update_expression_error=update_expression_error + ) + ) + + +class AttributeDoesNotExist(MockValidationException): + attr_does_not_exist_msg = ( + "The provided expression refers to an attribute that does not exist in the item" + ) + + def __init__(self): + super(AttributeDoesNotExist, self).__init__(self.attr_does_not_exist_msg) + + +class ProvidedKeyDoesNotExist(MockValidationException): + provided_key_does_not_exist_msg = ( + "The provided key element does not match the schema" + ) + + def __init__(self): + super(ProvidedKeyDoesNotExist, self).__init__( + self.provided_key_does_not_exist_msg + ) + + +class ExpressionAttributeNameNotDefined(InvalidUpdateExpression): + name_not_defined_msg = "An expression attribute name used in the document path is not defined; attribute name: {n}" + + def __init__(self, attribute_name): + self.not_defined_attribute_name = attribute_name + super(ExpressionAttributeNameNotDefined, self).__init__( + self.name_not_defined_msg.format(n=attribute_name) + ) + + +class AttributeIsReservedKeyword(InvalidUpdateExpression): + attribute_is_keyword_msg = ( + "Attribute name is a reserved keyword; reserved keyword: {keyword}" + ) + + def __init__(self, keyword): + self.keyword = keyword + super(AttributeIsReservedKeyword, self).__init__( + self.attribute_is_keyword_msg.format(keyword=keyword) + ) + + +class ExpressionAttributeValueNotDefined(InvalidUpdateExpression): + attr_value_not_defined_msg = "An expression attribute value used in expression is not defined; attribute value: {attribute_value}" + + def __init__(self, attribute_value): + self.attribute_value = attribute_value + super(ExpressionAttributeValueNotDefined, self).__init__( + self.attr_value_not_defined_msg.format(attribute_value=attribute_value) + ) + + +class UpdateExprSyntaxError(InvalidUpdateExpression): + update_expr_syntax_error_msg = "Syntax error; {error_detail}" + + def __init__(self, error_detail): + self.error_detail = error_detail + super(UpdateExprSyntaxError, self).__init__( + self.update_expr_syntax_error_msg.format(error_detail=error_detail) + ) + + +class InvalidTokenException(UpdateExprSyntaxError): + token_detail_msg = 'token: "{token}", near: "{near}"' + + def __init__(self, token, near): + self.token = token + self.near = near + super(InvalidTokenException, self).__init__( + self.token_detail_msg.format(token=token, near=near) + ) + + +class InvalidExpressionAttributeNameKey(MockValidationException): + invalid_expr_attr_name_msg = ( + 'ExpressionAttributeNames contains invalid key: Syntax error; key: "{key}"' + ) + + def __init__(self, key): + self.key = key + super(InvalidExpressionAttributeNameKey, self).__init__( + self.invalid_expr_attr_name_msg.format(key=key) + ) + + +class ItemSizeTooLarge(MockValidationException): + item_size_too_large_msg = "Item size has exceeded the maximum allowed size" + + def __init__(self): + super(ItemSizeTooLarge, self).__init__(self.item_size_too_large_msg) + + +class ItemSizeToUpdateTooLarge(MockValidationException): + item_size_to_update_too_large_msg = ( + "Item size to update has exceeded the maximum allowed size" + ) + + def __init__(self): + super(ItemSizeToUpdateTooLarge, self).__init__( + self.item_size_to_update_too_large_msg + ) + + +class IncorrectOperandType(InvalidUpdateExpression): + inv_operand_msg = "Incorrect operand type for operator or function; operator or function: {f}, operand type: {t}" + + def __init__(self, operator_or_function, operand_type): + self.operator_or_function = operator_or_function + self.operand_type = operand_type + super(IncorrectOperandType, self).__init__( + self.inv_operand_msg.format(f=operator_or_function, t=operand_type) + ) + + +class IncorrectDataType(MockValidationException): + inc_data_type_msg = "An operand in the update expression has an incorrect data type" + + def __init__(self): + super(IncorrectDataType, self).__init__(self.inc_data_type_msg) + + +class ConditionalCheckFailed(ValueError): + msg = "The conditional request failed" + + def __init__(self): + super(ConditionalCheckFailed, self).__init__(self.msg) + + +class TransactionCanceledException(ValueError): + cancel_reason_msg = "Transaction cancelled, please refer cancellation reasons for specific reasons [{}]" + + def __init__(self, errors): + msg = self.cancel_reason_msg.format(", ".join([str(err) for err in errors])) + super(TransactionCanceledException, self).__init__(msg) + + +class EmptyKeyAttributeException(MockValidationException): + empty_str_msg = "One or more parameter values were invalid: An AttributeValue may not contain an empty string" + + def __init__(self): + super(EmptyKeyAttributeException, self).__init__(self.empty_str_msg) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models/__init__.py similarity index 67% rename from moto/dynamodb2/models.py rename to moto/dynamodb2/models/__init__.py index 2313a6e41..7218fe0c9 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models/__init__.py @@ -6,18 +6,27 @@ import decimal import json import re import uuid -import six from boto3 import Session -from botocore.exceptions import ParamValidationError from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError -from .comparisons import get_comparison_func -from .comparisons import get_filter_expression -from .comparisons import get_expected -from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge +from moto.dynamodb2.comparisons import get_filter_expression +from moto.dynamodb2.comparisons import get_expected +from moto.dynamodb2.exceptions import ( + InvalidIndexNameError, + ItemSizeTooLarge, + ItemSizeToUpdateTooLarge, + ConditionalCheckFailed, + TransactionCanceledException, + EmptyKeyAttributeException, +) +from moto.dynamodb2.models.utilities import bytesize +from moto.dynamodb2.models.dynamo_type import DynamoType +from moto.dynamodb2.parsing.executors import UpdateExpressionExecutor +from moto.dynamodb2.parsing.expressions import UpdateExpressionParser +from moto.dynamodb2.parsing.validators import UpdateExpressionValidator class DynamoJsonEncoder(json.JSONEncoder): @@ -30,220 +39,6 @@ def dynamo_json_dump(dynamo_object): return json.dumps(dynamo_object, cls=DynamoJsonEncoder) -def bytesize(val): - return len(str(val).encode("utf-8")) - - -def attribute_is_list(attr): - """ - Checks if attribute denotes a list, and returns the name of the list and the given list index if so - :param attr: attr or attr[index] - :return: attr, index or None - """ - list_index_update = re.match("(.+)\\[([0-9]+)\\]", attr) - if list_index_update: - attr = list_index_update.group(1) - return attr, list_index_update.group(2) if list_index_update else None - - -class DynamoType(object): - """ - http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes - """ - - def __init__(self, type_as_dict): - if type(type_as_dict) == DynamoType: - self.type = type_as_dict.type - self.value = type_as_dict.value - else: - self.type = list(type_as_dict)[0] - self.value = list(type_as_dict.values())[0] - if self.is_list(): - self.value = [DynamoType(val) for val in self.value] - elif self.is_map(): - self.value = dict((k, DynamoType(v)) for k, v in self.value.items()) - - def get(self, key): - if not key: - return self - else: - key_head = key.split(".")[0] - key_tail = ".".join(key.split(".")[1:]) - if key_head not in self.value: - self.value[key_head] = DynamoType({"NONE": None}) - return self.value[key_head].get(key_tail) - - def set(self, key, new_value, index=None): - if index: - index = int(index) - if type(self.value) is not list: - raise InvalidUpdateExpression - if index >= len(self.value): - self.value.append(new_value) - # {'L': [DynamoType, ..]} ==> DynamoType.set() - self.value[min(index, len(self.value) - 1)].set(key, new_value) - else: - attr = (key or "").split(".").pop(0) - attr, list_index = attribute_is_list(attr) - if not key: - # {'S': value} ==> {'S': new_value} - self.type = new_value.type - self.value = new_value.value - else: - if attr not in self.value: # nonexistingattribute - type_of_new_attr = "M" if "." in key else new_value.type - self.value[attr] = DynamoType({type_of_new_attr: {}}) - # {'M': {'foo': DynamoType}} ==> DynamoType.set(new_value) - self.value[attr].set( - ".".join(key.split(".")[1:]), new_value, list_index - ) - - def delete(self, key, index=None): - if index: - if not key: - if int(index) < len(self.value): - del self.value[int(index)] - elif "." in key: - self.value[int(index)].delete(".".join(key.split(".")[1:])) - else: - self.value[int(index)].delete(key) - else: - attr = key.split(".")[0] - attr, list_index = attribute_is_list(attr) - - if list_index: - self.value[attr].delete(".".join(key.split(".")[1:]), list_index) - elif "." in key: - self.value[attr].delete(".".join(key.split(".")[1:])) - else: - self.value.pop(key) - - def filter(self, projection_expressions): - nested_projections = [ - expr[0 : expr.index(".")] for expr in projection_expressions if "." in expr - ] - if self.is_map(): - expressions_to_delete = [] - for attr in self.value: - if ( - attr not in projection_expressions - and attr not in nested_projections - ): - expressions_to_delete.append(attr) - elif attr in nested_projections: - relevant_expressions = [ - expr[len(attr + ".") :] - for expr in projection_expressions - if expr.startswith(attr + ".") - ] - self.value[attr].filter(relevant_expressions) - for expr in expressions_to_delete: - self.value.pop(expr) - - def __hash__(self): - return hash((self.type, self.value)) - - def __eq__(self, other): - return self.type == other.type and self.value == other.value - - def __lt__(self, other): - return self.cast_value < other.cast_value - - def __le__(self, other): - return self.cast_value <= other.cast_value - - def __gt__(self, other): - return self.cast_value > other.cast_value - - def __ge__(self, other): - return self.cast_value >= other.cast_value - - def __repr__(self): - return "DynamoType: {0}".format(self.to_json()) - - @property - def cast_value(self): - if self.is_number(): - try: - return int(self.value) - except ValueError: - return float(self.value) - elif self.is_set(): - sub_type = self.type[0] - return set([DynamoType({sub_type: v}).cast_value for v in self.value]) - elif self.is_list(): - return [DynamoType(v).cast_value for v in self.value] - elif self.is_map(): - return dict([(k, DynamoType(v).cast_value) for k, v in self.value.items()]) - else: - return self.value - - def child_attr(self, key): - """ - Get Map or List children by key. str for Map, int for List. - - Returns DynamoType or None. - """ - if isinstance(key, six.string_types) and self.is_map(): - if "." in key and key.split(".")[0] in self.value: - return self.value[key.split(".")[0]].child_attr( - ".".join(key.split(".")[1:]) - ) - elif "." not in key and key in self.value: - return DynamoType(self.value[key]) - - if isinstance(key, int) and self.is_list(): - idx = key - if 0 <= idx < len(self.value): - return DynamoType(self.value[idx]) - - return None - - def size(self): - if self.is_number(): - value_size = len(str(self.value)) - elif self.is_set(): - sub_type = self.type[0] - value_size = sum([DynamoType({sub_type: v}).size() for v in self.value]) - elif self.is_list(): - value_size = sum([v.size() for v in self.value]) - elif self.is_map(): - value_size = sum( - [bytesize(k) + DynamoType(v).size() for k, v in self.value.items()] - ) - elif type(self.value) == bool: - value_size = 1 - else: - value_size = bytesize(self.value) - return value_size - - def to_json(self): - return {self.type: self.value} - - def compare(self, range_comparison, range_objs): - """ - Compares this type against comparison filters - """ - range_values = [obj.cast_value for obj in range_objs] - comparison_func = get_comparison_func(range_comparison) - return comparison_func(self.cast_value, *range_values) - - def is_number(self): - return self.type == "N" - - def is_set(self): - return self.type == "SS" or self.type == "NS" or self.type == "BS" - - def is_list(self): - return self.type == "L" - - def is_map(self): - return self.type == "M" - - def same_type(self, other): - return self.type == other.type - - # https://github.com/spulec/moto/issues/1874 # Ensure that the total size of an item does not exceed 400kb class LimitedSizeDict(dict): @@ -279,9 +74,23 @@ class Item(BaseModel): for key, value in attrs.items(): self.attrs[key] = DynamoType(value) + def __eq__(self, other): + return all( + [ + self.hash_key == other.hash_key, + self.hash_key_type == other.hash_key_type, + self.range_key == other.range_key, + self.range_key_type == other.range_key_type, + self.attrs == other.attrs, + ] + ) + def __repr__(self): return "Item: {0}".format(self.to_json()) + def size(self): + return sum(bytesize(key) + value.size() for key, value in self.attrs.items()) + def to_json(self): attributes = {} for attribute_key, attribute in self.attrs.items(): @@ -299,187 +108,19 @@ class Item(BaseModel): included = self.attrs return {"Item": included} - def update( - self, update_expression, expression_attribute_names, expression_attribute_values - ): - # Update subexpressions are identifiable by the operator keyword, so split on that and - # get rid of the empty leading string. - parts = [ - p - for p in re.split( - r"\b(SET|REMOVE|ADD|DELETE)\b", update_expression, flags=re.I - ) - if p - ] - # make sure that we correctly found only operator/value pairs - assert ( - len(parts) % 2 == 0 - ), "Mismatched operators and values in update expression: '{}'".format( - update_expression - ) - for action, valstr in zip(parts[:-1:2], parts[1::2]): - action = action.upper() - - # "Should" retain arguments in side (...) - values = re.split(r",(?![^(]*\))", valstr) - for value in values: - # A Real value - value = value.lstrip(":").rstrip(",").strip() - for k, v in expression_attribute_names.items(): - value = re.sub(r"{0}\b".format(k), v, value) - - if action == "REMOVE": - key = value - attr, list_index = attribute_is_list(key.split(".")[0]) - if "." not in key: - if list_index: - new_list = DynamoType(self.attrs[attr]) - new_list.delete(None, list_index) - self.attrs[attr] = new_list - else: - self.attrs.pop(value, None) - else: - # Handle nested dict updates - self.attrs[attr].delete(".".join(key.split(".")[1:])) - elif action == "SET": - key, value = value.split("=", 1) - key = key.strip() - value = value.strip() - - # check whether key is a list - attr, list_index = attribute_is_list(key.split(".")[0]) - # If value not exists, changes value to a default if needed, else its the same as it was - value = self._get_default(value) - # If operation == list_append, get the original value and append it - value = self._get_appended_list(value, expression_attribute_values) - - if type(value) != DynamoType: - if value in expression_attribute_values: - dyn_value = DynamoType(expression_attribute_values[value]) - else: - dyn_value = DynamoType({"S": value}) - else: - dyn_value = value - - if "." in key and attr not in self.attrs: - raise ValueError # Setting nested attr not allowed if first attr does not exist yet - elif attr not in self.attrs: - self.attrs[attr] = dyn_value # set new top-level attribute - else: - self.attrs[attr].set( - ".".join(key.split(".")[1:]), dyn_value, list_index - ) # set value recursively - - elif action == "ADD": - key, value = value.split(" ", 1) - key = key.strip() - value_str = value.strip() - if value_str in expression_attribute_values: - dyn_value = DynamoType(expression_attribute_values[value]) - else: - raise TypeError - - # Handle adding numbers - value gets added to existing value, - # or added to 0 if it doesn't exist yet - if dyn_value.is_number(): - existing = self.attrs.get(key, DynamoType({"N": "0"})) - if not existing.same_type(dyn_value): - raise TypeError() - self.attrs[key] = DynamoType( - { - "N": str( - decimal.Decimal(existing.value) - + decimal.Decimal(dyn_value.value) - ) - } - ) - - # Handle adding sets - value is added to the set, or set is - # created with only this value if it doesn't exist yet - # New value must be of same set type as previous value - elif dyn_value.is_set(): - key_head = key.split(".")[0] - key_tail = ".".join(key.split(".")[1:]) - if key_head not in self.attrs: - self.attrs[key_head] = DynamoType({dyn_value.type: {}}) - existing = self.attrs.get(key_head) - existing = existing.get(key_tail) - if existing.value and not existing.same_type(dyn_value): - raise TypeError() - new_set = set(existing.value or []).union(dyn_value.value) - existing.set( - key=None, - new_value=DynamoType({dyn_value.type: list(new_set)}), - ) - else: # Number and Sets are the only supported types for ADD - raise TypeError - - elif action == "DELETE": - key, value = value.split(" ", 1) - key = key.strip() - value_str = value.strip() - if value_str in expression_attribute_values: - dyn_value = DynamoType(expression_attribute_values[value]) - else: - raise TypeError - - if not dyn_value.is_set(): - raise TypeError - key_head = key.split(".")[0] - key_tail = ".".join(key.split(".")[1:]) - existing = self.attrs.get(key_head) - existing = existing.get(key_tail) - if existing: - if not existing.same_type(dyn_value): - raise TypeError - new_set = set(existing.value).difference(dyn_value.value) - existing.set( - key=None, - new_value=DynamoType({existing.type: list(new_set)}), - ) - else: - raise NotImplementedError( - "{} update action not yet supported".format(action) - ) - - def _get_appended_list(self, value, expression_attribute_values): - if type(value) != DynamoType: - list_append_re = re.match("list_append\\((.+),(.+)\\)", value) - if list_append_re: - new_value = expression_attribute_values[list_append_re.group(2).strip()] - old_list_key = list_append_re.group(1) - # Get the existing value - old_list = self.attrs[old_list_key.split(".")[0]] - if "." in old_list_key: - # Value is nested inside a map - find the appropriate child attr - old_list = old_list.child_attr( - ".".join(old_list_key.split(".")[1:]) - ) - if not old_list.is_list(): - raise ParamValidationError - old_list.value.extend([DynamoType(v) for v in new_value["L"]]) - value = old_list - return value - - def _get_default(self, value): - if value.startswith("if_not_exists"): - # Function signature - match = re.match( - r".*if_not_exists\s*\((?P.+),\s*(?P.+)\).*", value - ) - if not match: - raise TypeError - - path, value = match.groups() - - # If it already exists, get its value so we dont overwrite it - if path in self.attrs: - value = self.attrs[path] - return value + def validate_no_empty_key_values(self, attribute_updates, key_attributes): + for attribute_name, update_action in attribute_updates.items(): + action = update_action.get("Action") or "PUT" # PUT is default + new_value = next(iter(update_action["Value"].values())) + if action == "PUT" and new_value == "" and attribute_name in key_attributes: + raise EmptyKeyAttributeException def update_with_attribute_updates(self, attribute_updates): for attribute_name, update_action in attribute_updates.items(): - action = update_action["Action"] + # Use default Action value, if no explicit Action is passed. + # Default value is 'Put', according to + # Boto3 DynamoDB.Client.update_item documentation. + action = update_action.get("Action", "PUT") if action == "DELETE" and "Value" not in update_action: if attribute_name in self.attrs: del self.attrs[attribute_name] @@ -615,7 +256,7 @@ class StreamShard(BaseModel): if old is None: event_name = "INSERT" elif new is None: - event_name = "DELETE" + event_name = "REMOVE" else: event_name = "MODIFY" seq = len(self.items) + self.starting_sequence_number @@ -642,7 +283,102 @@ class StreamShard(BaseModel): return [i.to_json() for i in self.items[start:end]] -class Table(BaseModel): +class SecondaryIndex(BaseModel): + def project(self, item): + """ + Enforces the ProjectionType of this Index (LSI/GSI) + Removes any non-wanted attributes from the item + :param item: + :return: + """ + if self.projection: + projection_type = self.projection.get("ProjectionType", None) + key_attributes = self.table_key_attrs + [ + key["AttributeName"] for key in self.schema + ] + + if projection_type == "KEYS_ONLY": + item.filter(",".join(key_attributes)) + elif projection_type == "INCLUDE": + allowed_attributes = key_attributes + self.projection.get( + "NonKeyAttributes", [] + ) + item.filter(",".join(allowed_attributes)) + # ALL is handled implicitly by not filtering + return item + + +class LocalSecondaryIndex(SecondaryIndex): + def __init__(self, index_name, schema, projection, table_key_attrs): + self.name = index_name + self.schema = schema + self.projection = projection + self.table_key_attrs = table_key_attrs + + def describe(self): + return { + "IndexName": self.name, + "KeySchema": self.schema, + "Projection": self.projection, + } + + @staticmethod + def create(dct, table_key_attrs): + return LocalSecondaryIndex( + index_name=dct["IndexName"], + schema=dct["KeySchema"], + projection=dct["Projection"], + table_key_attrs=table_key_attrs, + ) + + +class GlobalSecondaryIndex(SecondaryIndex): + def __init__( + self, + index_name, + schema, + projection, + table_key_attrs, + status="ACTIVE", + throughput=None, + ): + self.name = index_name + self.schema = schema + self.projection = projection + self.table_key_attrs = table_key_attrs + self.status = status + self.throughput = throughput or { + "ReadCapacityUnits": 0, + "WriteCapacityUnits": 0, + } + + def describe(self): + return { + "IndexName": self.name, + "KeySchema": self.schema, + "Projection": self.projection, + "IndexStatus": self.status, + "ProvisionedThroughput": self.throughput, + } + + @staticmethod + def create(dct, table_key_attrs): + return GlobalSecondaryIndex( + index_name=dct["IndexName"], + schema=dct["KeySchema"], + projection=dct["Projection"], + table_key_attrs=table_key_attrs, + throughput=dct.get("ProvisionedThroughput", None), + ) + + def update(self, u): + self.name = u.get("IndexName", self.name) + self.schema = u.get("KeySchema", self.schema) + self.projection = u.get("Projection", self.projection) + self.throughput = u.get("ProvisionedThroughput", self.throughput) + + +class Table(CloudFormationModel): def __init__( self, table_name, @@ -667,13 +403,22 @@ class Table(BaseModel): else: self.range_key_attr = elem["AttributeName"] self.range_key_type = elem["KeyType"] + self.table_key_attrs = [ + key for key in (self.hash_key_attr, self.range_key_attr) if key + ] if throughput is None: self.throughput = {"WriteCapacityUnits": 10, "ReadCapacityUnits": 10} else: self.throughput = throughput self.throughput["NumberOfDecreasesToday"] = 0 - self.indexes = indexes - self.global_indexes = global_indexes if global_indexes else [] + self.indexes = [ + LocalSecondaryIndex.create(i, self.table_key_attrs) + for i in (indexes if indexes else []) + ] + self.global_indexes = [ + GlobalSecondaryIndex.create(i, self.table_key_attrs) + for i in (global_indexes if global_indexes else []) + ] self.created_at = datetime.datetime.utcnow() self.items = defaultdict(dict) self.table_arn = self._generate_arn(table_name) @@ -684,6 +429,47 @@ class Table(BaseModel): } self.set_stream_specification(streams) self.lambda_event_source_mappings = {} + self.continuous_backups = { + "ContinuousBackupsStatus": "ENABLED", # One of 'ENABLED'|'DISABLED', it's enabled by default + "PointInTimeRecoveryDescription": { + "PointInTimeRecoveryStatus": "DISABLED" # One of 'ENABLED'|'DISABLED' + }, + } + + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.table_arn + elif attribute_name == "StreamArn" and self.stream_specification: + return self.describe()["TableDescription"]["LatestStreamArn"] + + raise UnformattedGetAttTemplateException() + + @property + def physical_resource_id(self): + return self.name + + @property + def key_attributes(self): + # A set of all the hash or range attributes for all indexes + def keys_from_index(idx): + schema = idx.schema + return [attr["AttributeName"] for attr in schema] + + fieldnames = copy.copy(self.table_key_attrs) + for idx in self.indexes + self.global_indexes: + fieldnames += keys_from_index(idx) + return fieldnames + + @staticmethod + def cloudformation_name_type(): + return "TableName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html + return "AWS::DynamoDB::Table" @classmethod def create_from_cloudformation_json( @@ -702,12 +488,21 @@ class Table(BaseModel): params["throughput"] = properties["ProvisionedThroughput"] if "LocalSecondaryIndexes" in properties: params["indexes"] = properties["LocalSecondaryIndexes"] + if "StreamSpecification" in properties: + params["streams"] = properties["StreamSpecification"] table = dynamodb_backends[region_name].create_table( - name=properties["TableName"], **params + name=resource_name, **params ) return table + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + table = dynamodb_backends[region_name].delete_table(name=resource_name) + return table + def _generate_arn(self, name): return "arn:aws:dynamodb:us-east-1:123456789011:table/" + name @@ -734,8 +529,10 @@ class Table(BaseModel): "KeySchema": self.schema, "ItemCount": len(self), "CreationDateTime": unix_time(self.created_at), - "GlobalSecondaryIndexes": [index for index in self.global_indexes], - "LocalSecondaryIndexes": [index for index in self.indexes], + "GlobalSecondaryIndexes": [ + index.describe() for index in self.global_indexes + ], + "LocalSecondaryIndexes": [index.describe() for index in self.indexes], } } if self.stream_specification and self.stream_specification["StreamEnabled"]: @@ -761,7 +558,7 @@ class Table(BaseModel): keys = [self.hash_key_attr] for index in self.global_indexes: hash_key = None - for key in index["KeySchema"]: + for key in index.schema: if key["KeyType"] == "HASH": hash_key = key["AttributeName"] keys.append(hash_key) @@ -772,7 +569,7 @@ class Table(BaseModel): keys = [self.range_key_attr] for index in self.global_indexes: range_key = None - for key in index["KeySchema"]: + for key in index.schema: if key["KeyType"] == "RANGE": range_key = keys.append(key["AttributeName"]) keys.append(range_key) @@ -787,8 +584,20 @@ class Table(BaseModel): expression_attribute_values=None, overwrite=False, ): + if self.hash_key_attr not in item_attrs.keys(): + raise KeyError( + "One or more parameter values were invalid: Missing the key " + + self.hash_key_attr + + " in the item" + ) hash_value = DynamoType(item_attrs.get(self.hash_key_attr)) if self.has_range_key: + if self.range_key_attr not in item_attrs.keys(): + raise KeyError( + "One or more parameter values were invalid: Missing the key " + + self.range_key_attr + + " in the item" + ) range_value = DynamoType(item_attrs.get(self.range_key_attr)) else: range_value = None @@ -803,21 +612,20 @@ class Table(BaseModel): else: lookup_range_value = DynamoType(expected_range_value) current = self.get_item(hash_value, lookup_range_value) - item = Item( hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs ) if not overwrite: if not get_expected(expected).expr(current): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(current): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed if range_value: self.items[hash_value][range_value] = item @@ -894,7 +702,7 @@ class Table(BaseModel): if index_name: all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name not in indexes_by_name: raise ValueError( "Invalid index: %s for table: %s. Available indexes are: %s" @@ -904,14 +712,14 @@ class Table(BaseModel): index = indexes_by_name[index_name] try: index_hash_key = [ - key for key in index["KeySchema"] if key["KeyType"] == "HASH" + key for key in index.schema if key["KeyType"] == "HASH" ][0] except IndexError: - raise ValueError("Missing Hash Key. KeySchema: %s" % index["KeySchema"]) + raise ValueError("Missing Hash Key. KeySchema: %s" % index.name) try: index_range_key = [ - key for key in index["KeySchema"] if key["KeyType"] == "RANGE" + key for key in index.schema if key["KeyType"] == "RANGE" ][0] except IndexError: index_range_key = None @@ -971,8 +779,13 @@ class Table(BaseModel): if index_name: if index_range_key: + + # Convert to float if necessary to ensure proper ordering + def conv(x): + return float(x.value) if x.type == "N" else x.value + results.sort( - key=lambda item: item.attrs[index_range_key["AttributeName"]].value + key=lambda item: conv(item.attrs[index_range_key["AttributeName"]]) if item.attrs.get(index_range_key["AttributeName"]) else None ) @@ -988,6 +801,10 @@ class Table(BaseModel): results = [item for item in results if filter_expression.expr(item)] results = copy.deepcopy(results) + if index_name: + index = self.get_index(index_name) + for result in results: + index.project(result) if projection_expression: for result in results: result.filter(projection_expression) @@ -1008,12 +825,17 @@ class Table(BaseModel): def all_indexes(self): return (self.global_indexes or []) + (self.indexes or []) + def get_index(self, index_name, err=None): + all_indexes = self.all_indexes() + indexes_by_name = dict((i.name, i) for i in all_indexes) + if err and index_name not in indexes_by_name: + raise err + return indexes_by_name[index_name] + def has_idx_items(self, index_name): - all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) - idx = indexes_by_name[index_name] - idx_col_set = set([i["AttributeName"] for i in idx["KeySchema"]]) + idx = self.get_index(index_name) + idx_col_set = set([i["AttributeName"] for i in idx.schema]) for hash_set in self.items.values(): if self.range_key_attr: @@ -1035,14 +857,12 @@ class Table(BaseModel): ): results = [] scanned_count = 0 - all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) if index_name: - if index_name not in indexes_by_name: - raise InvalidIndexNameError( - "The table does not have the specified index: %s" % index_name - ) + err = InvalidIndexNameError( + "The table does not have the specified index: %s" % index_name + ) + self.get_index(index_name, err) items = self.has_idx_items(index_name) else: items = self.all_items() @@ -1101,6 +921,14 @@ class Table(BaseModel): break last_evaluated_key = None + size_limit = 1000000 # DynamoDB has a 1MB size limit + item_size = sum(res.size() for res in results) + if item_size > size_limit: + item_size = idx = 0 + while item_size + results[idx].size() < size_limit: + item_size += results[idx].size() + idx += 1 + limit = min(limit, idx) if limit else idx if limit and len(results) > limit: results = results[:limit] last_evaluated_key = {self.hash_key_attr: results[-1].hash_key} @@ -1108,10 +936,8 @@ class Table(BaseModel): last_evaluated_key[self.range_key_attr] = results[-1].range_key if scanned_index: - all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) - idx = indexes_by_name[scanned_index] - idx_col_list = [i["AttributeName"] for i in idx["KeySchema"]] + idx = self.get_index(scanned_index) + idx_col_list = [i["AttributeName"] for i in idx.schema] for col in idx_col_list: last_evaluated_key[col] = results[-1].attrs[col] @@ -1127,6 +953,9 @@ class Table(BaseModel): return None return ret + def delete(self, region_name): + dynamodb_backends[region_name].delete_table(self.name) + class DynamoDBBackend(BaseBackend): def __init__(self, region_name=None): @@ -1168,6 +997,42 @@ class DynamoDBBackend(BaseBackend): required_table = self.tables[table] return required_table.tags + def list_tables(self, limit, exclusive_start_table_name): + all_tables = list(self.tables.keys()) + + if exclusive_start_table_name: + try: + last_table_index = all_tables.index(exclusive_start_table_name) + except ValueError: + start = len(all_tables) + else: + start = last_table_index + 1 + else: + start = 0 + + if limit: + tables = all_tables[start : start + limit] + else: + tables = all_tables[start:] + + if limit and len(all_tables) > start + limit: + return tables, tables[-1] + return tables, None + + def describe_table(self, name): + table = self.tables[name] + return table.describe(base_key="Table") + + def update_table(self, name, global_index, throughput, stream_spec): + table = self.get_table(name) + if global_index: + table = self.update_table_global_indexes(name, global_index) + if throughput: + table = self.update_table_throughput(name, throughput) + if stream_spec: + table = self.update_table_streams(name, stream_spec) + return table + def update_table_throughput(self, name, throughput): table = self.tables[name] table.throughput = throughput @@ -1185,7 +1050,7 @@ class DynamoDBBackend(BaseBackend): def update_table_global_indexes(self, name, global_index_updates): table = self.tables[name] - gsis_by_name = dict((i["IndexName"], i) for i in table.global_indexes) + gsis_by_name = dict((i.name, i) for i in table.global_indexes) for gsi_update in global_index_updates: gsi_to_create = gsi_update.get("Create") gsi_to_update = gsi_update.get("Update") @@ -1206,7 +1071,7 @@ class DynamoDBBackend(BaseBackend): if index_name not in gsis_by_name: raise ValueError( "Global Secondary Index does not exist, but tried to update: %s" - % gsi_to_update["IndexName"] + % index_name ) gsis_by_name[index_name].update(gsi_to_update) @@ -1217,7 +1082,9 @@ class DynamoDBBackend(BaseBackend): % gsi_to_create["IndexName"] ) - gsis_by_name[gsi_to_create["IndexName"]] = gsi_to_create + gsis_by_name[gsi_to_create["IndexName"]] = GlobalSecondaryIndex.create( + gsi_to_create, table.table_key_attrs, + ) # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other # parts of the codebase @@ -1378,14 +1245,21 @@ class DynamoDBBackend(BaseBackend): table_name, key, update_expression, - attribute_updates, expression_attribute_names, expression_attribute_values, + attribute_updates=None, expected=None, condition_expression=None, ): table = self.get_table(table_name) + # Support spaces between operators in an update expression + # E.g. `a = b + c` -> `a=b+c` + if update_expression: + # Parse expression to get validation errors + update_expression_ast = UpdateExpressionParser.make(update_expression) + update_expression = re.sub(r"\s*([=\+-])\s*", "\\1", update_expression) + if all([table.hash_key_attr in key, table.range_key_attr in key]): # Covers cases where table has hash and range keys, ``key`` param # will be a dict @@ -1401,19 +1275,20 @@ class DynamoDBBackend(BaseBackend): range_value = None item = table.get_item(hash_value, range_value) + orig_item = copy.deepcopy(item) if not expected: expected = {} if not get_expected(expected).expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed # Update does not fail on new items, so create one if item is None: @@ -1426,14 +1301,27 @@ class DynamoDBBackend(BaseBackend): table.put_item(data) item = table.get_item(hash_value, range_value) + if attribute_updates: + item.validate_no_empty_key_values(attribute_updates, table.key_attributes) + if update_expression: - item.update( - update_expression, - expression_attribute_names, - expression_attribute_values, - ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + item=item, + table=table, + ).validate() + try: + UpdateExpressionExecutor( + validated_ast, item, expression_attribute_names + ).execute() + except ItemSizeTooLarge: + raise ItemSizeToUpdateTooLarge() else: item.update_with_attribute_updates(attribute_updates) + if table.stream_shard is not None: + table.stream_shard.add(orig_item, item) return item def delete_item( @@ -1457,11 +1345,11 @@ class DynamoDBBackend(BaseBackend): expression_attribute_values, ) if not condition_op.expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed return table.delete_item(hash_value, range_value) - def update_ttl(self, table_name, ttl_spec): + def update_time_to_live(self, table_name, ttl_spec): table = self.tables.get(table_name) if table is None: raise JsonRESTError("ResourceNotFound", "Table not found") @@ -1478,13 +1366,147 @@ class DynamoDBBackend(BaseBackend): table.ttl["TimeToLiveStatus"] = "DISABLED" table.ttl["AttributeName"] = ttl_spec["AttributeName"] - def describe_ttl(self, table_name): + def describe_time_to_live(self, table_name): table = self.tables.get(table_name) if table is None: raise JsonRESTError("ResourceNotFound", "Table not found") return table.ttl + def transact_write_items(self, transact_items): + # Create a backup in case any of the transactions fail + original_table_state = copy.deepcopy(self.tables) + errors = [] + for item in transact_items: + try: + if "ConditionCheck" in item: + item = item["ConditionCheck"] + key = item["Key"] + table_name = item["TableName"] + condition_expression = item.get("ConditionExpression", None) + expression_attribute_names = item.get( + "ExpressionAttributeNames", None + ) + expression_attribute_values = item.get( + "ExpressionAttributeValues", None + ) + current = self.get_item(table_name, key) + + condition_op = get_filter_expression( + condition_expression, + expression_attribute_names, + expression_attribute_values, + ) + if not condition_op.expr(current): + raise ConditionalCheckFailed() + elif "Put" in item: + item = item["Put"] + attrs = item["Item"] + table_name = item["TableName"] + condition_expression = item.get("ConditionExpression", None) + expression_attribute_names = item.get( + "ExpressionAttributeNames", None + ) + expression_attribute_values = item.get( + "ExpressionAttributeValues", None + ) + self.put_item( + table_name, + attrs, + condition_expression=condition_expression, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + ) + elif "Delete" in item: + item = item["Delete"] + key = item["Key"] + table_name = item["TableName"] + condition_expression = item.get("ConditionExpression", None) + expression_attribute_names = item.get( + "ExpressionAttributeNames", None + ) + expression_attribute_values = item.get( + "ExpressionAttributeValues", None + ) + self.delete_item( + table_name, + key, + condition_expression=condition_expression, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + ) + elif "Update" in item: + item = item["Update"] + key = item["Key"] + table_name = item["TableName"] + update_expression = item["UpdateExpression"] + condition_expression = item.get("ConditionExpression", None) + expression_attribute_names = item.get( + "ExpressionAttributeNames", None + ) + expression_attribute_values = item.get( + "ExpressionAttributeValues", None + ) + self.update_item( + table_name, + key, + update_expression=update_expression, + condition_expression=condition_expression, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + ) + else: + raise ValueError + errors.append(None) + except Exception as e: # noqa: E722 Do not use bare except + errors.append(type(e).__name__) + if any(errors): + # Rollback to the original state, and reraise the errors + self.tables = original_table_state + raise TransactionCanceledException(errors) + + def describe_continuous_backups(self, table_name): + table = self.get_table(table_name) + + return table.continuous_backups + + def update_continuous_backups(self, table_name, point_in_time_spec): + table = self.get_table(table_name) + + if ( + point_in_time_spec["PointInTimeRecoveryEnabled"] + and table.continuous_backups["PointInTimeRecoveryDescription"][ + "PointInTimeRecoveryStatus" + ] + == "DISABLED" + ): + table.continuous_backups["PointInTimeRecoveryDescription"] = { + "PointInTimeRecoveryStatus": "ENABLED", + "EarliestRestorableDateTime": unix_time(), + "LatestRestorableDateTime": unix_time(), + } + elif not point_in_time_spec["PointInTimeRecoveryEnabled"]: + table.continuous_backups["PointInTimeRecoveryDescription"] = { + "PointInTimeRecoveryStatus": "DISABLED" + } + + return table.continuous_backups + + ###################### + # LIST of methods where the logic completely resides in responses.py + # Duplicated here so that the implementation coverage script is aware + # TODO: Move logic here + ###################### + + def batch_get_item(self): + pass + + def batch_write_item(self): + pass + + def transact_get_items(self): + pass + dynamodb_backends = {} for region in Session().get_available_regions("dynamodb"): diff --git a/moto/dynamodb2/models/dynamo_type.py b/moto/dynamodb2/models/dynamo_type.py new file mode 100644 index 000000000..1fc1bcef3 --- /dev/null +++ b/moto/dynamodb2/models/dynamo_type.py @@ -0,0 +1,317 @@ +import six + +from moto.dynamodb2.comparisons import get_comparison_func +from moto.dynamodb2.exceptions import InvalidUpdateExpression, IncorrectDataType +from moto.dynamodb2.models.utilities import attribute_is_list, bytesize + + +class DDBType(object): + """ + Official documentation at https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html + """ + + BINARY_SET = "BS" + NUMBER_SET = "NS" + STRING_SET = "SS" + STRING = "S" + NUMBER = "N" + MAP = "M" + LIST = "L" + BOOLEAN = "BOOL" + BINARY = "B" + NULL = "NULL" + + +class DDBTypeConversion(object): + _human_type_mapping = { + val: key.replace("_", " ") + for key, val in DDBType.__dict__.items() + if key.upper() == key + } + + @classmethod + def get_human_type(cls, abbreviated_type): + """ + Args: + abbreviated_type(str): An attribute of DDBType + + Returns: + str: The human readable form of the DDBType. + """ + try: + human_type_str = cls._human_type_mapping[abbreviated_type] + except KeyError: + raise ValueError( + "Invalid abbreviated_type {at}".format(at=abbreviated_type) + ) + + return human_type_str + + +class DynamoType(object): + """ + http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes + """ + + def __init__(self, type_as_dict): + if type(type_as_dict) == DynamoType: + self.type = type_as_dict.type + self.value = type_as_dict.value + else: + self.type = list(type_as_dict)[0] + self.value = list(type_as_dict.values())[0] + if self.is_list(): + self.value = [DynamoType(val) for val in self.value] + elif self.is_map(): + self.value = dict((k, DynamoType(v)) for k, v in self.value.items()) + + def get(self, key): + if not key: + return self + else: + key_head = key.split(".")[0] + key_tail = ".".join(key.split(".")[1:]) + if key_head not in self.value: + self.value[key_head] = DynamoType({"NONE": None}) + return self.value[key_head].get(key_tail) + + def set(self, key, new_value, index=None): + if index: + index = int(index) + if type(self.value) is not list: + raise InvalidUpdateExpression + if index >= len(self.value): + self.value.append(new_value) + # {'L': [DynamoType, ..]} ==> DynamoType.set() + self.value[min(index, len(self.value) - 1)].set(key, new_value) + else: + attr = (key or "").split(".").pop(0) + attr, list_index = attribute_is_list(attr) + if not key: + # {'S': value} ==> {'S': new_value} + self.type = new_value.type + self.value = new_value.value + else: + if attr not in self.value: # nonexistingattribute + type_of_new_attr = DDBType.MAP if "." in key else new_value.type + self.value[attr] = DynamoType({type_of_new_attr: {}}) + # {'M': {'foo': DynamoType}} ==> DynamoType.set(new_value) + self.value[attr].set( + ".".join(key.split(".")[1:]), new_value, list_index + ) + + def __contains__(self, item): + if self.type == DDBType.STRING: + return False + try: + self.__getitem__(item) + return True + except KeyError: + return False + + def delete(self, key, index=None): + if index: + if not key: + if int(index) < len(self.value): + del self.value[int(index)] + elif "." in key: + self.value[int(index)].delete(".".join(key.split(".")[1:])) + else: + self.value[int(index)].delete(key) + else: + attr = key.split(".")[0] + attr, list_index = attribute_is_list(attr) + + if list_index: + self.value[attr].delete(".".join(key.split(".")[1:]), list_index) + elif "." in key: + self.value[attr].delete(".".join(key.split(".")[1:])) + else: + self.value.pop(key) + + def filter(self, projection_expressions): + nested_projections = [ + expr[0 : expr.index(".")] for expr in projection_expressions if "." in expr + ] + if self.is_map(): + expressions_to_delete = [] + for attr in self.value: + if ( + attr not in projection_expressions + and attr not in nested_projections + ): + expressions_to_delete.append(attr) + elif attr in nested_projections: + relevant_expressions = [ + expr[len(attr + ".") :] + for expr in projection_expressions + if expr.startswith(attr + ".") + ] + self.value[attr].filter(relevant_expressions) + for expr in expressions_to_delete: + self.value.pop(expr) + + def __hash__(self): + return hash((self.type, self.value)) + + def __eq__(self, other): + return self.type == other.type and self.value == other.value + + def __ne__(self, other): + return self.type != other.type or self.value != other.value + + def __lt__(self, other): + return self.cast_value < other.cast_value + + def __le__(self, other): + return self.cast_value <= other.cast_value + + def __gt__(self, other): + return self.cast_value > other.cast_value + + def __ge__(self, other): + return self.cast_value >= other.cast_value + + def __repr__(self): + return "DynamoType: {0}".format(self.to_json()) + + def __add__(self, other): + if self.type != other.type: + raise TypeError("Different types of operandi is not allowed.") + if self.is_number(): + self_value = float(self.value) if "." in self.value else int(self.value) + other_value = float(other.value) if "." in other.value else int(other.value) + return DynamoType( + {DDBType.NUMBER: "{v}".format(v=self_value + other_value)} + ) + else: + raise IncorrectDataType() + + def __sub__(self, other): + if self.type != other.type: + raise TypeError("Different types of operandi is not allowed.") + if self.type == DDBType.NUMBER: + self_value = float(self.value) if "." in self.value else int(self.value) + other_value = float(other.value) if "." in other.value else int(other.value) + return DynamoType( + {DDBType.NUMBER: "{v}".format(v=self_value - other_value)} + ) + else: + raise TypeError("Sum only supported for Numbers.") + + def __getitem__(self, item): + if isinstance(item, six.string_types): + # If our DynamoType is a map it should be subscriptable with a key + if self.type == DDBType.MAP: + return self.value[item] + elif isinstance(item, int): + # If our DynamoType is a list is should be subscriptable with an index + if self.type == DDBType.LIST: + return self.value[item] + raise TypeError( + "This DynamoType {dt} is not subscriptable by a {it}".format( + dt=self.type, it=type(item) + ) + ) + + def __setitem__(self, key, value): + if isinstance(key, int): + if self.is_list(): + if key >= len(self.value): + # DynamoDB doesn't care you are out of box just add it to the end. + self.value.append(value) + else: + self.value[key] = value + elif isinstance(key, six.string_types): + if self.is_map(): + self.value[key] = value + else: + raise NotImplementedError("No set_item for {t}".format(t=type(key))) + + @property + def cast_value(self): + if self.is_number(): + try: + return int(self.value) + except ValueError: + return float(self.value) + elif self.is_set(): + sub_type = self.type[0] + return set([DynamoType({sub_type: v}).cast_value for v in self.value]) + elif self.is_list(): + return [DynamoType(v).cast_value for v in self.value] + elif self.is_map(): + return dict([(k, DynamoType(v).cast_value) for k, v in self.value.items()]) + else: + return self.value + + def child_attr(self, key): + """ + Get Map or List children by key. str for Map, int for List. + + Returns DynamoType or None. + """ + if isinstance(key, six.string_types) and self.is_map(): + if "." in key and key.split(".")[0] in self.value: + return self.value[key.split(".")[0]].child_attr( + ".".join(key.split(".")[1:]) + ) + elif "." not in key and key in self.value: + return DynamoType(self.value[key]) + + if isinstance(key, int) and self.is_list(): + idx = key + if 0 <= idx < len(self.value): + return DynamoType(self.value[idx]) + + return None + + def size(self): + if self.is_number(): + value_size = len(str(self.value)) + elif self.is_set(): + sub_type = self.type[0] + value_size = sum([DynamoType({sub_type: v}).size() for v in self.value]) + elif self.is_list(): + value_size = sum([v.size() for v in self.value]) + elif self.is_map(): + value_size = sum( + [bytesize(k) + DynamoType(v).size() for k, v in self.value.items()] + ) + elif type(self.value) == bool: + value_size = 1 + else: + value_size = bytesize(self.value) + return value_size + + def to_json(self): + return {self.type: self.value} + + def compare(self, range_comparison, range_objs): + """ + Compares this type against comparison filters + """ + range_values = [obj.cast_value for obj in range_objs] + comparison_func = get_comparison_func(range_comparison) + return comparison_func(self.cast_value, *range_values) + + def is_number(self): + return self.type == DDBType.NUMBER + + def is_set(self): + return self.type in (DDBType.STRING_SET, DDBType.NUMBER_SET, DDBType.BINARY_SET) + + def is_list(self): + return self.type == DDBType.LIST + + def is_map(self): + return self.type == DDBType.MAP + + def same_type(self, other): + return self.type == other.type + + def pop(self, key, *args, **kwargs): + if self.is_map() or self.is_list(): + self.value.pop(key, *args, **kwargs) + else: + raise TypeError("pop not supported for DynamoType {t}".format(t=self.type)) diff --git a/moto/dynamodb2/models/utilities.py b/moto/dynamodb2/models/utilities.py new file mode 100644 index 000000000..9dd6f1e9f --- /dev/null +++ b/moto/dynamodb2/models/utilities.py @@ -0,0 +1,17 @@ +import re + + +def bytesize(val): + return len(str(val).encode("utf-8")) + + +def attribute_is_list(attr): + """ + Checks if attribute denotes a list, and returns the name of the list and the given list index if so + :param attr: attr or attr[index] + :return: attr, index or None + """ + list_index_update = re.match("(.+)\\[([0-9]+)\\]", attr) + if list_index_update: + attr = list_index_update.group(1) + return attr, list_index_update.group(2) if list_index_update else None diff --git a/moto/dynamodb2/parsing/README.md b/moto/dynamodb2/parsing/README.md new file mode 100644 index 000000000..6c4390d02 --- /dev/null +++ b/moto/dynamodb2/parsing/README.md @@ -0,0 +1,23 @@ +# Parsing dev documentation + +Parsing happens in a structured manner and happens in different phases. +This document explains these phases. + + +## 1) Expression gets parsed into a tokenlist (tokenized) +A string gets parsed from left to right and gets converted into a list of tokens. +The tokens are available in `tokens.py`. + +## 2) Tokenlist get transformed to expression tree (AST) +This is the parsing of the token list. This parsing will result in an Abstract Syntax Tree (AST). +The different node types are available in `ast_nodes.py`. The AST is a representation that has all +the information that is in the expression but its tree form allows processing it in a structured manner. + +## 3) The AST gets validated (full semantic correctness) +The AST is used for validation. The paths and attributes are validated to be correct. At the end of the +validation all the values will be resolved. + +## 4) Update Expression gets executed using the validated AST +Finally the AST is used to execute the update expression. There should be no reason for this step to fail +since validation has completed. Due to this we have the update expressions behaving atomically (i.e. all the +actions of the update expresion are performed or none of them are performed). \ No newline at end of file diff --git a/moto/dynamodb2/parsing/__init__.py b/moto/dynamodb2/parsing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/dynamodb2/parsing/ast_nodes.py b/moto/dynamodb2/parsing/ast_nodes.py new file mode 100644 index 000000000..81735a8c9 --- /dev/null +++ b/moto/dynamodb2/parsing/ast_nodes.py @@ -0,0 +1,360 @@ +import abc +from abc import abstractmethod +from collections import deque + +import six + +from moto.dynamodb2.models import DynamoType + + +@six.add_metaclass(abc.ABCMeta) +class Node: + def __init__(self, children=None): + self.type = self.__class__.__name__ + assert children is None or isinstance(children, list) + self.children = children + self.parent = None + + if isinstance(children, list): + for child in children: + if isinstance(child, Node): + child.set_parent(self) + + def set_parent(self, parent_node): + self.parent = parent_node + + +class LeafNode(Node): + """A LeafNode is a Node where none of the children are Nodes themselves.""" + + def __init__(self, children=None): + super(LeafNode, self).__init__(children) + + +@six.add_metaclass(abc.ABCMeta) +class Expression(Node): + """ + Abstract Syntax Tree representing the expression + + For the Grammar start here and jump down into the classes at the righ-hand side to look further. Nodes marked with + a star are abstract and won't appear in the final AST. + + Expression* => UpdateExpression + Expression* => ConditionExpression + """ + + +class UpdateExpression(Expression): + """ + UpdateExpression => UpdateExpressionClause* + UpdateExpression => UpdateExpressionClause* UpdateExpression + """ + + +@six.add_metaclass(abc.ABCMeta) +class UpdateExpressionClause(UpdateExpression): + """ + UpdateExpressionClause* => UpdateExpressionSetClause + UpdateExpressionClause* => UpdateExpressionRemoveClause + UpdateExpressionClause* => UpdateExpressionAddClause + UpdateExpressionClause* => UpdateExpressionDeleteClause + """ + + +class UpdateExpressionSetClause(UpdateExpressionClause): + """ + UpdateExpressionSetClause => SET SetActions + """ + + +class UpdateExpressionSetActions(UpdateExpressionClause): + """ + UpdateExpressionSetClause => SET SetActions + + SetActions => SetAction + SetActions => SetAction , SetActions + + """ + + +class UpdateExpressionSetAction(UpdateExpressionClause): + """ + SetAction => Path = Value + """ + + +class UpdateExpressionRemoveActions(UpdateExpressionClause): + """ + UpdateExpressionSetClause => REMOVE RemoveActions + + RemoveActions => RemoveAction + RemoveActions => RemoveAction , RemoveActions + """ + + +class UpdateExpressionRemoveAction(UpdateExpressionClause): + """ + RemoveAction => Path + """ + + +class UpdateExpressionAddActions(UpdateExpressionClause): + """ + UpdateExpressionAddClause => ADD RemoveActions + + AddActions => AddAction + AddActions => AddAction , AddActions + """ + + +class UpdateExpressionAddAction(UpdateExpressionClause): + """ + AddAction => Path Value + """ + + +class UpdateExpressionDeleteActions(UpdateExpressionClause): + """ + UpdateExpressionDeleteClause => DELETE RemoveActions + + DeleteActions => DeleteAction + DeleteActions => DeleteAction , DeleteActions + """ + + +class UpdateExpressionDeleteAction(UpdateExpressionClause): + """ + DeleteAction => Path Value + """ + + +class UpdateExpressionPath(UpdateExpressionClause): + pass + + +class UpdateExpressionValue(UpdateExpressionClause): + """ + Value => Operand + Value => Operand + Value + Value => Operand - Value + """ + + +class UpdateExpressionGroupedValue(UpdateExpressionClause): + """ + GroupedValue => ( Value ) + """ + + +class UpdateExpressionRemoveClause(UpdateExpressionClause): + """ + UpdateExpressionRemoveClause => REMOVE RemoveActions + """ + + +class UpdateExpressionAddClause(UpdateExpressionClause): + """ + UpdateExpressionAddClause => ADD AddActions + """ + + +class UpdateExpressionDeleteClause(UpdateExpressionClause): + """ + UpdateExpressionDeleteClause => DELETE DeleteActions + """ + + +class ExpressionPathDescender(Node): + """Node identifying descender into nested structure (.) in expression""" + + +class ExpressionSelector(LeafNode): + """Node identifying selector [selection_index] in expresion""" + + def __init__(self, selection_index): + try: + super(ExpressionSelector, self).__init__(children=[int(selection_index)]) + except ValueError: + assert ( + False + ), "Expression selector must be an int, this is a bug in the moto library." + + def get_index(self): + return self.children[0] + + +class ExpressionAttribute(LeafNode): + """An attribute identifier as used in the DDB item""" + + def __init__(self, attribute): + super(ExpressionAttribute, self).__init__(children=[attribute]) + + def get_attribute_name(self): + return self.children[0] + + +class ExpressionAttributeName(LeafNode): + """An ExpressionAttributeName is an alias for an attribute identifier""" + + def __init__(self, attribute_name): + super(ExpressionAttributeName, self).__init__(children=[attribute_name]) + + def get_attribute_name_placeholder(self): + return self.children[0] + + +class ExpressionAttributeValue(LeafNode): + """An ExpressionAttributeValue is an alias for an value""" + + def __init__(self, value): + super(ExpressionAttributeValue, self).__init__(children=[value]) + + def get_value_name(self): + return self.children[0] + + +class ExpressionValueOperator(LeafNode): + """An ExpressionValueOperator is an operation that works on 2 values""" + + def __init__(self, value): + super(ExpressionValueOperator, self).__init__(children=[value]) + + def get_operator(self): + return self.children[0] + + +class UpdateExpressionFunction(Node): + """ + A Node representing a function of an Update Expression. The first child is the function name the others are the + arguments. + """ + + def get_function_name(self): + return self.children[0] + + def get_nth_argument(self, n=1): + """Return nth element where n is a 1-based index.""" + assert n >= 1 + return self.children[n] + + +class DDBTypedValue(Node): + """ + A node representing a DDBTyped value. This can be any structure as supported by DyanmoDB. The node only has 1 child + which is the value of type `DynamoType`. + """ + + def __init__(self, value): + assert isinstance(value, DynamoType), "DDBTypedValue must be of DynamoType" + super(DDBTypedValue, self).__init__(children=[value]) + + def get_value(self): + return self.children[0] + + +class NoneExistingPath(LeafNode): + """A placeholder for Paths that did not exist in the Item.""" + + def __init__(self, creatable=False): + super(NoneExistingPath, self).__init__(children=[creatable]) + + def is_creatable(self): + """Can this path be created if need be. For example path creating element in a dictionary or creating a new + attribute under root level of an item.""" + return self.children[0] + + +class DepthFirstTraverser(object): + """ + Helper class that allows depth first traversal and to implement custom processing for certain AST nodes. The + processor of a node must return the new resulting node. This node will be placed in the tree. Processing of a + node using this traverser should therefore only transform child nodes. The returned node will get the same parent + as the node before processing had. + """ + + @abstractmethod + def _processing_map(self): + """ + A map providing a processing function per node class type to a function that takes in a Node object and + processes it. A Node can only be processed by a single function and they are considered in order. Therefore if + multiple classes from a single class hierarchy strain are used the more specific classes have to be put before + the less specific ones. That requires overriding `nodes_to_be_processed`. If no multiple classes form a single + class hierarchy strain are used the default implementation of `nodes_to_be_processed` should be OK. + Returns: + dict: Mapping a Node Class to a processing function. + """ + pass + + def nodes_to_be_processed(self): + """Cached accessor for getting Node types that need to be processed.""" + return tuple(k for k in self._processing_map().keys()) + + def process(self, node): + """Process a Node""" + for class_key, processor in self._processing_map().items(): + if isinstance(node, class_key): + return processor(node) + + def pre_processing_of_child(self, parent_node, child_id): + """Hook that is called pre-processing of the child at position `child_id`""" + pass + + def traverse_node_recursively(self, node, child_id=-1): + """ + Traverse nodes depth first processing nodes bottom up (if root node is considered the top). + + Args: + node(Node): The node which is the last node to be processed but which allows to identify all the + work (which is in the children) + child_id(int): The index in the list of children from the parent that this node corresponds to + + Returns: + Node: The node of the new processed AST + """ + if isinstance(node, Node): + parent_node = node.parent + if node.children is not None: + for i, child_node in enumerate(node.children): + self.pre_processing_of_child(node, i) + self.traverse_node_recursively(child_node, i) + # noinspection PyTypeChecker + if isinstance(node, self.nodes_to_be_processed()): + node = self.process(node) + node.parent = parent_node + parent_node.children[child_id] = node + return node + + def traverse(self, node): + return self.traverse_node_recursively(node) + + +class NodeDepthLeftTypeFetcher(object): + """Helper class to fetch a node of a specific type. Depth left-first traversal""" + + def __init__(self, node_type, root_node): + assert issubclass(node_type, Node) + self.node_type = node_type + self.root_node = root_node + self.queue = deque() + self.add_nodes_left_to_right_depth_first(self.root_node) + + def add_nodes_left_to_right_depth_first(self, node): + if isinstance(node, Node) and node.children is not None: + for child_node in node.children: + self.add_nodes_left_to_right_depth_first(child_node) + self.queue.append(child_node) + self.queue.append(node) + + def __iter__(self): + return self + + def next(self): + return self.__next__() + + def __next__(self): + while len(self.queue) > 0: + candidate = self.queue.popleft() + if isinstance(candidate, self.node_type): + return candidate + else: + raise StopIteration diff --git a/moto/dynamodb2/parsing/executors.py b/moto/dynamodb2/parsing/executors.py new file mode 100644 index 000000000..76642542d --- /dev/null +++ b/moto/dynamodb2/parsing/executors.py @@ -0,0 +1,288 @@ +from abc import abstractmethod + +from moto.dynamodb2.exceptions import ( + IncorrectOperandType, + IncorrectDataType, + ProvidedKeyDoesNotExist, +) +from moto.dynamodb2.models import DynamoType +from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType +from moto.dynamodb2.parsing.ast_nodes import ( + UpdateExpressionSetAction, + UpdateExpressionDeleteAction, + UpdateExpressionRemoveAction, + UpdateExpressionAddAction, + UpdateExpressionPath, + DDBTypedValue, + ExpressionAttribute, + ExpressionSelector, + ExpressionAttributeName, +) +from moto.dynamodb2.parsing.validators import ExpressionPathResolver + + +class NodeExecutor(object): + def __init__(self, ast_node, expression_attribute_names): + self.node = ast_node + self.expression_attribute_names = expression_attribute_names + + @abstractmethod + def execute(self, item): + pass + + def get_item_part_for_path_nodes(self, item, path_nodes): + """ + For a list of path nodes travers the item by following the path_nodes + Args: + item(Item): + path_nodes(list): + + Returns: + + """ + if len(path_nodes) == 0: + return item.attrs + else: + return ExpressionPathResolver( + self.expression_attribute_names + ).resolve_expression_path_nodes_to_dynamo_type(item, path_nodes) + + def get_item_before_end_of_path(self, item): + """ + Get the part ot the item where the item will perform the action. For most actions this should be the parent. As + that element will need to be modified by the action. + Args: + item(Item): + + Returns: + DynamoType or dict: The path to be set + """ + return self.get_item_part_for_path_nodes( + item, self.get_path_expression_nodes()[:-1] + ) + + def get_item_at_end_of_path(self, item): + """ + For a DELETE the path points at the stringset so we need to evaluate the full path. + Args: + item(Item): + + Returns: + DynamoType or dict: The path to be set + """ + return self.get_item_part_for_path_nodes(item, self.get_path_expression_nodes()) + + # Get the part ot the item where the item will perform the action. For most actions this should be the parent. As + # that element will need to be modified by the action. + get_item_part_in_which_to_perform_action = get_item_before_end_of_path + + def get_path_expression_nodes(self): + update_expression_path = self.node.children[0] + assert isinstance(update_expression_path, UpdateExpressionPath) + return update_expression_path.children + + def get_element_to_action(self): + return self.get_path_expression_nodes()[-1] + + def get_action_value(self): + """ + + Returns: + DynamoType: The value to be set + """ + ddb_typed_value = self.node.children[1] + assert isinstance(ddb_typed_value, DDBTypedValue) + dynamo_type_value = ddb_typed_value.children[0] + assert isinstance(dynamo_type_value, DynamoType) + return dynamo_type_value + + +class SetExecutor(NodeExecutor): + def execute(self, item): + self.set( + item_part_to_modify_with_set=self.get_item_part_in_which_to_perform_action( + item + ), + element_to_set=self.get_element_to_action(), + value_to_set=self.get_action_value(), + expression_attribute_names=self.expression_attribute_names, + ) + + @classmethod + def set( + cls, + item_part_to_modify_with_set, + element_to_set, + value_to_set, + expression_attribute_names, + ): + if isinstance(element_to_set, ExpressionAttribute): + attribute_name = element_to_set.get_attribute_name() + item_part_to_modify_with_set[attribute_name] = value_to_set + elif isinstance(element_to_set, ExpressionSelector): + index = element_to_set.get_index() + item_part_to_modify_with_set[index] = value_to_set + elif isinstance(element_to_set, ExpressionAttributeName): + attribute_name = expression_attribute_names[ + element_to_set.get_attribute_name_placeholder() + ] + item_part_to_modify_with_set[attribute_name] = value_to_set + else: + raise NotImplementedError( + "Moto does not support setting {t} yet".format(t=type(element_to_set)) + ) + + +class DeleteExecutor(NodeExecutor): + operator = "operator: DELETE" + + def execute(self, item): + string_set_to_remove = self.get_action_value() + assert isinstance(string_set_to_remove, DynamoType) + if not string_set_to_remove.is_set(): + raise IncorrectOperandType( + self.operator, + DDBTypeConversion.get_human_type(string_set_to_remove.type), + ) + + string_set = self.get_item_at_end_of_path(item) + assert isinstance(string_set, DynamoType) + if string_set.type != string_set_to_remove.type: + raise IncorrectDataType() + # String set is currently implemented as a list + string_set_list = string_set.value + + stringset_to_remove_list = string_set_to_remove.value + + for value in stringset_to_remove_list: + try: + string_set_list.remove(value) + except (KeyError, ValueError): + # DynamoDB does not mind if value is not present + pass + + # DynamoDB does not support empty sets. If we've deleted + # the last item in the set, we have to remove the attribute. + if not string_set_list: + element = self.get_element_to_action() + container = self.get_item_before_end_of_path(item) + container.pop(element.get_attribute_name()) + + +class RemoveExecutor(NodeExecutor): + def execute(self, item): + element_to_remove = self.get_element_to_action() + if isinstance(element_to_remove, ExpressionAttribute): + attribute_name = element_to_remove.get_attribute_name() + self.get_item_part_in_which_to_perform_action(item).pop( + attribute_name, None + ) + elif isinstance(element_to_remove, ExpressionAttributeName): + attribute_name = self.expression_attribute_names[ + element_to_remove.get_attribute_name_placeholder() + ] + self.get_item_part_in_which_to_perform_action(item).pop( + attribute_name, None + ) + elif isinstance(element_to_remove, ExpressionSelector): + index = element_to_remove.get_index() + try: + self.get_item_part_in_which_to_perform_action(item).pop(index) + except IndexError: + # DynamoDB does not care that index is out of bounds, it will just do nothing. + pass + else: + raise NotImplementedError( + "Moto does not support setting {t} yet".format( + t=type(element_to_remove) + ) + ) + + +class AddExecutor(NodeExecutor): + def execute(self, item): + value_to_add = self.get_action_value() + if isinstance(value_to_add, DynamoType): + if value_to_add.is_set(): + try: + current_string_set = self.get_item_at_end_of_path(item) + except ProvidedKeyDoesNotExist: + current_string_set = DynamoType({value_to_add.type: []}) + SetExecutor.set( + item_part_to_modify_with_set=self.get_item_before_end_of_path( + item + ), + element_to_set=self.get_element_to_action(), + value_to_set=current_string_set, + expression_attribute_names=self.expression_attribute_names, + ) + assert isinstance(current_string_set, DynamoType) + if not current_string_set.type == value_to_add.type: + raise IncorrectDataType() + # Sets are implemented as list + for value in value_to_add.value: + if value in current_string_set.value: + continue + else: + current_string_set.value.append(value) + elif value_to_add.type == DDBType.NUMBER: + try: + existing_value = self.get_item_at_end_of_path(item) + except ProvidedKeyDoesNotExist: + existing_value = DynamoType({DDBType.NUMBER: "0"}) + + assert isinstance(existing_value, DynamoType) + if not existing_value.type == DDBType.NUMBER: + raise IncorrectDataType() + new_value = existing_value + value_to_add + SetExecutor.set( + item_part_to_modify_with_set=self.get_item_before_end_of_path(item), + element_to_set=self.get_element_to_action(), + value_to_set=new_value, + expression_attribute_names=self.expression_attribute_names, + ) + else: + raise IncorrectDataType() + + +class UpdateExpressionExecutor(object): + execution_map = { + UpdateExpressionSetAction: SetExecutor, + UpdateExpressionAddAction: AddExecutor, + UpdateExpressionRemoveAction: RemoveExecutor, + UpdateExpressionDeleteAction: DeleteExecutor, + } + + def __init__(self, update_ast, item, expression_attribute_names): + self.update_ast = update_ast + self.item = item + self.expression_attribute_names = expression_attribute_names + + def execute(self, node=None): + """ + As explained in moto.dynamodb2.parsing.expressions.NestableExpressionParserMixin._create_node the order of nodes + in the AST can be translated of the order of statements in the expression. As such we can start at the root node + and process the nodes 1-by-1. If no specific execution for the node type is defined we can execute the children + in order since it will be a container node that is expandable and left child will be first in the statement. + + Args: + node(Node): + + Returns: + None + """ + if node is None: + node = self.update_ast + + node_executor = self.get_specific_execution(node) + if node_executor is None: + for node in node.children: + self.execute(node) + else: + node_executor(node, self.expression_attribute_names).execute(self.item) + + def get_specific_execution(self, node): + for node_class in self.execution_map: + if isinstance(node, node_class): + return self.execution_map[node_class] + return None diff --git a/moto/dynamodb2/parsing/expressions.py b/moto/dynamodb2/parsing/expressions.py new file mode 100644 index 000000000..4c1d42a55 --- /dev/null +++ b/moto/dynamodb2/parsing/expressions.py @@ -0,0 +1,1040 @@ +import logging +from abc import abstractmethod +import abc +import six +from collections import deque + +from moto.dynamodb2.parsing.ast_nodes import ( + UpdateExpression, + UpdateExpressionSetClause, + UpdateExpressionSetActions, + UpdateExpressionSetAction, + UpdateExpressionRemoveActions, + UpdateExpressionRemoveAction, + UpdateExpressionPath, + UpdateExpressionValue, + UpdateExpressionGroupedValue, + UpdateExpressionRemoveClause, + ExpressionPathDescender, + ExpressionSelector, + ExpressionAttribute, + ExpressionAttributeName, + ExpressionAttributeValue, + ExpressionValueOperator, + UpdateExpressionFunction, + UpdateExpressionAddClause, + UpdateExpressionAddActions, + UpdateExpressionAddAction, + UpdateExpressionDeleteAction, + UpdateExpressionDeleteActions, + UpdateExpressionDeleteClause, +) +from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression +from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer + + +class NestableExpressionParserMixin(object): + """ + For nodes that can be nested in themselves (recursive). Take for example UpdateExpression's grammar: + + UpdateExpression => UpdateExpressionClause* + UpdateExpression => UpdateExpressionClause* UpdateExpression + + If we consider it of structure + NestableExpression => TargetClause* + NestableExpression => TargetClause* NestableExpression + + This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern. + + This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where + in the originating expression. + """ + + def __init__(self, *args, **kwargs): + self.target_clauses = deque() + + def _parse_target_clause(self, factory_class): + """ + + Args: + factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser + + Returns: + + """ + logging.debug( + "Move token pos {pos} to continue parsing with specific factory class {fc}".format( + pos=self.token_pos, fc=factory_class.__class__.__name__ + ) + ) + # noinspection PyProtectedMember + ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() + self.target_clauses.append(ast) + logging.debug( + "Continue where previous parsing ended {token_pos}".format( + token_pos=token_pos + ) + ) + self.token_pos = token_pos + + @abstractmethod + def _initializer_args(self): + """ + Get the arguments of the initializer. This is implemented by the calling class. See ExpressionParser for an + example. + + Returns: + dict: A dictionary of the initializer arguments + """ + + @classmethod + @abstractmethod + def _nestable_class(cls): + """ + Get the class of the Node that will be created that would be nested. For the example in the docstring this would + be UpdateExpression + + Returns: + class: The class of the Nodes that will be created. + """ + + def _create_node(self): + """ + target_clauses has the nodes in order of encountering. Go through them backwards and build the tree bottom up. + + This way left-deep-descending traversal will process nodes in order. + + Continuing the example of an UpdateExpression: + For example SET a=3 REMOVE b + UpdateExpression + / \ + SET a=3 UpdateExpression + | + REMOVE b + + self.target_clauses looks like: ( SET a=3 >> REMOVE b ) + Returns: + moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory. + """ + assert len(self.target_clauses) > 0, "No nodes for {cn}".format( + cn=self.__class__.__name__ + ) + target_node = self._nestable_class()(children=[self.target_clauses.pop()]) + while len(self.target_clauses) > 0: + target_node = self._nestable_class()( + children=[self.target_clauses.pop(), target_node] + ) + return target_node + + +@six.add_metaclass(abc.ABCMeta) +class ExpressionParser: + """Abstract class""" + + def __init__(self, expression_token_list, token_pos=0): + """ + + Args: + expression_token_list: + token_pos(int): Location where parsing is + """ + self.token_list = expression_token_list + self.token_pos = token_pos + + def _initializer_args(self): + return {"expression_token_list": self.token_list, "token_pos": self.token_pos} + + @abstractmethod + def _parse(self): + """ + Start parsing the token_list from token_pos for the factory type. + + Returns: + moto.dynamodb2.ast_nodes.Node: AST which is root node of resulting abstract syntax tree + """ + + @classmethod + def is_possible_start(cls, token): + return token is not None and cls._is_possible_start(token) + + @classmethod + @abstractmethod + def _is_possible_start(cls, token): + """ + + Args: + token(moto.dynamodb2.tokens.Token): + + Returns: + bool: True if token is a possible start for entries processed by `cls` + """ + + def _parse_with_pos(self): + """ + Start parsing the token_list from token_pos for the factory type and also return the resulting token_pos. + + Returns: + (ast, token_pos): tuple of AST which is root node of resulting abstract syntax tree and token_pos is the + position in the tokenlist. + """ + return self._parse(), self.token_pos + + def parse(self): + return self._parse() + + def get_next_token_type(self): + """ + Get the type of the next token to be processed + + Returns: + str: Token type or None if no more next token + """ + try: + return self.get_next_token().type + except AttributeError: + return None + + def get_next_token(self): + """ + Get the next token to be processed + + Returns: + moto.dynamodb2.tokens.Token: or None if no more next token + """ + try: + return self.token_list[self.token_pos] + except IndexError: + return None + + def get_next_token_value(self): + """ + Get the value of the next token to be processed + + Returns: + str: value or None if no more next token + """ + try: + return self.get_next_token().value + except AttributeError: + return None + + def is_at_end(self): + """Return boolean indicating whether we are at end of the parsing""" + return self.token_pos == len(self.token_list) + + def is_at_start(self): + """Return boolean indicating whether we are at start of the parsing""" + return self.token_pos == 0 + + def get_last_token_value(self): + """Get the last token that was correctly parsed or return empty string""" + if self.token_pos > 0: + return self.token_list[self.token_pos - 1].value + else: + return "" + + def get_last_token_type(self): + """Get the last token type that was correctly parsed or return None""" + if self.token_pos > 0: + return self.token_list[self.token_pos - 1].type + else: + return None + + def get_2nd_last_token_value_if_last_was_whitespace(self): + """Get the 2nd last token that was correctly parsed if last one was whitespace or return empty string""" + if self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE: + return self.token_list[self.token_pos - 2].value + else: + return "" + + def get_following_token_value(self): + """Get the token value after the one that is being parsed or empty string if non existent.""" + try: + return self.token_list[self.token_pos + 1].value + except IndexError: + return "" + + def get_following_token_type(self): + """Get the token type after the one that is being parsed or None if non existent.""" + try: + return self.token_list[self.token_pos + 1].type + except IndexError: + return None + + def get_2nd_following_token_value_if_following_was_whitespace(self): + """Get the 2nd following token that was correctly parsed if 1st one was whitespace or return empty string""" + if self.get_following_token_type() == Token.WHITESPACE: + try: + return self.token_list[self.token_pos + 2].value + except IndexError: + return "" + else: + return "" + + def skip_white_space(self): + try: + while self.get_next_token_type() == Token.WHITESPACE: + self.token_pos += 1 + except IndexError: + assert self.token_pos > 0, "We should always have positive indexes" + logging.debug("We are out of range so end is reached") + + def process_token_of_type(self, token_type): + """ + Maker sure the next token is of type `token_type` if not raise unexpected token + Args: + token_type: A token type + + Returns: + str: The value if the token is of type `token_type` + """ + if self.get_next_token_type() == token_type: + token_value = self.get_next_token_value() + self.goto_next_significant_token() + return token_value + else: + self.raise_unexpected_token() + + def goto_next_significant_token(self): + """Continue past current token and skip all whitespaces""" + self.token_pos += 1 + self.skip_white_space() + + def raise_unexpected_token(self): + if self.is_at_end(): + problematic_token = "" + problematic_token_in_near = "" + else: + problematic_token_in_near = problematic_token = self.get_next_token_value() + + near = "".join( + [ + self.get_2nd_last_token_value_if_last_was_whitespace(), + self.get_last_token_value(), + problematic_token_in_near, + self.get_following_token_value(), + self.get_2nd_following_token_value_if_following_was_whitespace(), + ] + ) + + raise InvalidTokenException(problematic_token, near) + + +class NestableBinExpressionParser(ExpressionParser): + """ + For nodes that can be nested in themselves (recursive) but with an operation. Take for example + UpdateExpressionValue's grammar: + + Value => Operand* + Value => Operand* + Value + Value => Operand* - Value + + If we consider it of structure + NestableBinExpression => TargetClause* + NestableBinExpression => TargetClause* BinOp NestableBinExpression + + This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern. + + This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where + in the originating expression. + """ + + def __init__(self, *args, **kwargs): + super(NestableBinExpressionParser, self).__init__(*args, **kwargs) + self.target_nodes = deque() + + def _parse_target_clause(self, factory_class): + """ + + Args: + factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser + + Returns: + + """ + # noinspection PyProtectedMember + ast, self.token_pos = factory_class( + **self._initializer_args() + )._parse_with_pos() + self.target_nodes.append(ast) + logging.debug( + "Continue where previous parsing ended {token_pos}".format( + token_pos=self.token_pos + ) + ) + + def _parse(self): + self._parse_target_clause(self._operand_factory_class()) + while self._binop_factory_class().is_possible_start(self.get_next_token()): + self._parse_target_clause(self._binop_factory_class()) + if self._operand_factory_class().is_possible_start(self.get_next_token()): + self._parse_target_clause(self._operand_factory_class()) + else: + self.raise_unexpected_token() + return self._create_node() + + @abstractmethod + def _operand_factory_class(self): + """ + Get the Parser class of the Operands for the Binary operations/actions. + + Returns: + class: + """ + + @abstractmethod + def _binop_factory_class(self): + """ + Get a factory that gets the possible binary operation. + + Returns: + class: A class extending ExpressionParser + """ + + def _create_node(self): + """ + target_clauses has the nodes in order of encountering. Go through them forward and build the tree bottom up. + For simplicity docstring will use Operand Node rather than the specific node + + This way left-deep-descending traversal will process nodes in order. + + Continuing the example of an UpdateExpressionValue: + For example value => a + :val - :val2 + UpdateExpressionValue + / | \ + UpdateExpressionValue BinOp Operand + / | | | | + UpdateExpressionValue BinOp Operand - :val2 + / | | + Operand + :val + | + a + + self.target_nodes looks like: ( a >> + >> :val >> - >> :val2 ) + Returns: + moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory. + """ + if len(self.target_nodes) == 1: + return UpdateExpressionValue(children=[self.target_nodes.popleft()]) + else: + target_node = UpdateExpressionValue( + children=[ + self.target_nodes.popleft(), + self.target_nodes.popleft(), + self.target_nodes.popleft(), + ] + ) + while len(self.target_nodes) >= 2: + target_node = UpdateExpressionValue( + children=[ + target_node, + self.target_nodes.popleft(), + self.target_nodes.popleft(), + ] + ) + assert len(self.target_nodes) == 0 + return target_node + + +class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): + """ + Parser to create update expressions + """ + + @classmethod + def _sub_factories(cls): + return [ + UpdateExpressionSetClauseParser, + UpdateExpressionAddClauseParser, + UpdateExpressionDeleteClauseParser, + UpdateExpressionRemoveClauseParser, + ] + + @classmethod + def _is_possible_start(cls, token): + pass + + def __init__(self, *args, **kwargs): + super(UpdateExpressionParser, self).__init__(*args, **kwargs) + NestableExpressionParserMixin.__init__(self) + + @classmethod + def _nestable_class(cls): + return UpdateExpression + + def _parse_expression_clause(self, factory_class): + return self._parse_target_clause(factory_class) + + def _parse_by_a_subfactory(self): + for sub_factory in self._sub_factories(): + if sub_factory.is_possible_start(self.get_next_token()): + self._parse_expression_clause(sub_factory) + return True + return False + + def _parse(self): + """ + Update Expression is the top-most node therefore it is expected to end up at the end of the expression. + """ + while True: + self.skip_white_space() + if self.is_at_end(): + logging.debug("End reached") + break + elif self._parse_by_a_subfactory(): + continue + else: + self.raise_unexpected_token() + + return self._create_node() + + @classmethod + def make(cls, expression_str): + token_list = ExpressionTokenizer.make_list(expression_str) + return cls(token_list).parse() + + +class UpdateExpressionSetClauseParser(ExpressionParser): + """ + UpdateExpressionSetClause => SET SetActions + """ + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.ATTRIBUTE and token.value.upper() == "SET" + + def _parse(self): + assert self.is_possible_start(self.get_next_token()) + self.goto_next_significant_token() + ast, self.token_pos = UpdateExpressionSetActionsParser( + **self._initializer_args() + )._parse_with_pos() + # noinspection PyProtectedMember + return UpdateExpressionSetClause(children=[ast]) + + +class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): + """ + UpdateExpressionSetActions + """ + + def __init__(self, *args, **kwargs): + super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) + NestableExpressionParserMixin.__init__(self) + + @classmethod + def _is_possible_start(cls, token): + raise RuntimeError( + "{class_name} cannot be identified by the next token.".format( + class_name=cls._nestable_class().__name__ + ) + ) + + @classmethod + @abstractmethod + def _nestable_class(cls): + return UpdateExpressionSetActions + + @classmethod + @abstractmethod + def _nested_expression_parser_class(cls): + """Returns the parser for the query part that creates the nested nodes""" + + def _parse(self): + """ + UpdateExpressionSetActions is inside the expression so it can be followed by others. Process SetActions one by + one until no more SetAction. + """ + self.skip_white_space() + + while self._nested_expression_parser_class().is_possible_start( + self.get_next_token() + ): + self._parse_target_clause(self._nested_expression_parser_class()) + self.skip_white_space() + if self.get_next_token_type() == Token.COMMA: + self.goto_next_significant_token() + else: + break + + if len(self.target_clauses) == 0: + logging.debug( + "Didn't encounter a single {nc} in {nepc}.".format( + nc=self._nestable_class().__name__, + nepc=self._nested_expression_parser_class().__name__, + ) + ) + self.raise_unexpected_token() + + return self._create_node() + + +class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): + """ + UpdateExpressionSetActions + """ + + @classmethod + def _nested_expression_parser_class(cls): + return UpdateExpressionSetActionParser + + @classmethod + def _nestable_class(cls): + return UpdateExpressionSetActions + + +class UpdateExpressionSetActionParser(ExpressionParser): + """ + SetAction => Path = Value + + So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value. + """ + + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionPathParser.is_possible_start(token) + + def _parse(self): + """ + UpdateExpressionSetActionParser only gets called when expecting a SetAction. So we should be aggressive on + raising invalid Tokens. We can thus do the following: + 1) Process path + 2) skip whitespace if there are any + 3) Process equal-sign token + 4) skip whitespace if there are any + 3) Process value + + """ + path, self.token_pos = UpdateExpressionPathParser( + **self._initializer_args() + )._parse_with_pos() + self.skip_white_space() + self.process_token_of_type(Token.EQUAL_SIGN) + self.skip_white_space() + value, self.token_pos = UpdateExpressionValueParser( + **self._initializer_args() + )._parse_with_pos() + return UpdateExpressionSetAction(children=[path, value]) + + +class UpdateExpressionPathParser(ExpressionParser): + """ + Paths are selectors within items to specify a part within an Item. DynamoDB does not impose much restrictions on the + data it stores but it does store more strict restrictions on how they are represented in UpdateExpression's. + + """ + + def __init__(self, *args, **kwargs): + super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) + self.path_nodes = [] + + @classmethod + def _is_possible_start(cls, token): + """ + Args: + token(Token): the token to be checked + + Returns: + bool: Whether the token could be the start of an UpdateExpressionPath + """ + if token.type == Token.ATTRIBUTE_NAME: + return True + elif token.type == Token.ATTRIBUTE and token.value.upper() != "REMOVE": + """We have to make sure remove is not passed""" + return True + return False + + def _parse(self): + return self.process_path() + + def process_path(self): + self.parse_path() + return UpdateExpressionPath(children=self.path_nodes) + + def parse_path(self): + """ + A path is comprised of: + - Attribute: the name of an attribute as how it is stored which has no special characters + - ATTRIBUTE_NAME: A placeholder that has no special characters except leading # to refer to attributes that + have a name that is not allowed in an UpdateExpression) + - DOT's: These are used to decent in a nested structure. When a DOT is in a path expression it is never part + of an attribute name but always means to descent into a MAP. We will call each descend a patch + chain + - SELECTORs: E.g.: [1] These are used to select an element in ordered datatypes like a list. + + Whitespaces can be between all these elements that build a path. For SELECTORs it is also allowed to have + whitespaces between brackets and numbers but the number cannot be split up with spaces + + Attributes and attribute_names must be separated with DOT's. + Returns: + UpdateExpressionPath: + """ + self.parse_path_chain() + while self.is_next_token_start_of_patch_chain(): + self.process_dot() + self.parse_path_chain() + + def is_next_token_start_of_patch_chain(self): + return self.get_next_token_type() == Token.DOT + + def process_dot(self): + self.path_nodes.append(ExpressionPathDescender()) + self.goto_next_significant_token() + + def parse_path_chain(self): + self.process_attribute_identifying_token() + self.skip_white_space() + while self.is_next_token_start_of_selector(): + self.process_selector() + self.skip_white_space() + + def process_attribute_identifying_token(self): + if self.get_next_token_type() == Token.ATTRIBUTE: + self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) + elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: + self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) + else: + self.raise_unexpected_token() + + self.goto_next_significant_token() + + def is_next_token_start_of_selector(self): + return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET + + def process_selector(self): + """ + Process the selector is only called when a selector must be processed. So do the following actions: + - skip opening bracket + - skip optional spaces + - read numeric literal + - skip optional spaces + - pass closing bracket + """ + self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) + selector_value = self.process_token_of_type(Token.NUMBER) + self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) + self.path_nodes.append(ExpressionSelector(selector_value)) + + +class UpdateExpressionValueParser(NestableBinExpressionParser): + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionOperandParser.is_possible_start(token) + + def _operand_factory_class(self): + return UpdateExpressionOperandParser + + def _binop_factory_class(self): + return UpdateExpressionValueOperatorParser + + +class UpdateExpressionGroupedValueParser(ExpressionParser): + """ + A grouped value is an Update Expression value clause that is surrounded by round brackets. Each Operand can be + a grouped value by itself. + """ + + def _parse(self): + self.process_token_of_type(Token.OPEN_ROUND_BRACKET) + value, self.token_pos = UpdateExpressionValueParser( + **self._initializer_args() + )._parse_with_pos() + self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) + return UpdateExpressionGroupedValue(children=value) + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.OPEN_ROUND_BRACKET + + +class UpdateExpressionValueOperatorParser(ExpressionParser): + OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] + + @classmethod + def _is_possible_start(cls, token): + return token.type in cls.OPERATION_TOKENS + + def _parse(self): + operation_value = self.get_next_token_value() + assert operation_value in self.OPERATION_TOKENS + self.goto_next_significant_token() + return ExpressionValueOperator(operation_value) + + +class UpdateExpressionOperandParser(ExpressionParser): + """ + Grammar + Operand* => AttributeValue + Operand* => UpdateExpressionFunction + Operand* => Path + Operand* => GroupedValue + """ + + @classmethod + def _sub_factories(cls): + return [ + UpdateExpressionAttributeValueParser, + UpdateExpressionFunctionParser, + UpdateExpressionPathParser, + UpdateExpressionGroupedValueParser, + ] + + @classmethod + def _is_possible_start(cls, token): + return any(parser.is_possible_start(token) for parser in cls._sub_factories()) + + def _parse(self): + for factory in self._sub_factories(): + if factory.is_possible_start(self.get_next_token()): + node, self.token_pos = factory( + **self._initializer_args() + )._parse_with_pos() + return node + self.raise_unexpected_token() + + +class UpdateExpressionAttributeValueParser(ExpressionParser): + def _parse(self): + attr_value = ExpressionAttributeValue( + self.process_token_of_type(Token.ATTRIBUTE_VALUE) + ) + return attr_value + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.ATTRIBUTE_VALUE + + +class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): + def _parse(self): + if UpdateExpressionAttributeValueParser.is_possible_start( + self.get_next_token() + ): + token, self.token_pos = UpdateExpressionAttributeValueParser( + **self._initializer_args() + )._parse_with_pos() + else: + token, self.token_pos = UpdateExpressionPathParser( + **self._initializer_args() + )._parse_with_pos() + return token + + @classmethod + def _is_possible_start(cls, token): + return any( + [ + UpdateExpressionAttributeValueParser.is_possible_start(token), + UpdateExpressionPathParser.is_possible_start(token), + ] + ) + + +class UpdateExpressionFunctionParser(ExpressionParser): + """ + A helper to process a function of an Update Expression + """ + + # Map function to the factories for its elements + FUNCTIONS = { + "if_not_exists": [ + UpdateExpressionPathParser, + UpdateExpressionAttributeValueOrPathParser, + ], + "list_append": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], + } + + @classmethod + def _is_possible_start(cls, token): + """ + Check whether a token is supposed to be a function + Args: + token(Token): the token to check + + Returns: + bool: True if token is the start of a function. + """ + if token.type == Token.ATTRIBUTE: + return token.value in cls.FUNCTIONS.keys() + else: + return False + + def _parse(self): + function_name = self.get_next_token_value() + if function_name not in self.FUNCTIONS.keys(): + # Function names are case sensitive + raise InvalidUpdateExpression(function_name) + self.goto_next_significant_token() + self.process_token_of_type(Token.OPEN_ROUND_BRACKET) + function_elements = [function_name] + function_arguments = self.FUNCTIONS[function_name] + for i, func_elem_factory in enumerate(function_arguments): + func_elem, self.token_pos = func_elem_factory( + **self._initializer_args() + )._parse_with_pos() + function_elements.append(func_elem) + if i + 1 < len(function_arguments): + self.skip_white_space() + self.process_token_of_type(Token.COMMA) + self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) + return UpdateExpressionFunction(children=function_elements) + + +class UpdateExpressionRemoveClauseParser(ExpressionParser): + """ + UpdateExpressionRemoveClause => REMOVE RemoveActions + """ + + def _parse(self): + assert self.is_possible_start(self.get_next_token()) + self.goto_next_significant_token() + ast, self.token_pos = UpdateExpressionRemoveActionsParser( + **self._initializer_args() + )._parse_with_pos() + # noinspection PyProtectedMember + return UpdateExpressionRemoveClause(children=[ast]) + + @classmethod + def _is_possible_start(cls, token): + """REMOVE is not a keyword""" + return token.type == Token.ATTRIBUTE and token.value.upper() == "REMOVE" + + +class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): + """ + UpdateExpressionSetActions + """ + + @classmethod + def _nested_expression_parser_class(cls): + return UpdateExpressionRemoveActionParser + + @classmethod + def _nestable_class(cls): + return UpdateExpressionRemoveActions + + +class UpdateExpressionRemoveActionParser(ExpressionParser): + """ + RemoveAction => Path = Value + + So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value. + """ + + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionPathParser.is_possible_start(token) + + def _parse(self): + """ + UpdateExpressionRemoveActionParser only gets called when expecting a RemoveAction. So we should be aggressive on + raising invalid Tokens. We can thus do the following: + 1) Process path + 2) skip whitespace if there are any + + """ + path, self.token_pos = UpdateExpressionPathParser( + **self._initializer_args() + )._parse_with_pos() + self.skip_white_space() + return UpdateExpressionRemoveAction(children=[path]) + + +class UpdateExpressionAddClauseParser(ExpressionParser): + def _parse(self): + assert self.is_possible_start(self.get_next_token()) + self.goto_next_significant_token() + ast, self.token_pos = UpdateExpressionAddActionsParser( + **self._initializer_args() + )._parse_with_pos() + # noinspection PyProtectedMember + return UpdateExpressionAddClause(children=[ast]) + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.ATTRIBUTE and token.value.upper() == "ADD" + + +class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): + """ + UpdateExpressionSetActions + """ + + @classmethod + def _nested_expression_parser_class(cls): + return UpdateExpressionAddActionParser + + @classmethod + def _nestable_class(cls): + return UpdateExpressionAddActions + + +@six.add_metaclass(abc.ABCMeta) +class UpdateExpressionPathValueParser(ExpressionParser): + def _parse_path_and_value(self): + """ + UpdateExpressionAddActionParser only gets called when expecting an AddAction. So we should be aggressive on + raising invalid Tokens. We can thus do the following: + 1) Process path + 2) skip whitespace if there are any + 3) Process a value + 4) skip whitespace if there are any + + Returns: + [path, value]: A list containing the Path node and the AttributeValue nodes + """ + path, self.token_pos = UpdateExpressionPathParser( + **self._initializer_args() + )._parse_with_pos() + self.skip_white_space() + value, self.token_pos = UpdateExpressionAttributeValueParser( + **self._initializer_args() + )._parse_with_pos() + self.skip_white_space() + return [path, value] + + +class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionPathParser.is_possible_start(token) + + def _parse(self): + return UpdateExpressionAddAction(children=self._parse_path_and_value()) + + +class UpdateExpressionDeleteClauseParser(ExpressionParser): + def _parse(self): + assert self.is_possible_start(self.get_next_token()) + self.goto_next_significant_token() + ast, self.token_pos = UpdateExpressionDeleteActionsParser( + **self._initializer_args() + )._parse_with_pos() + # noinspection PyProtectedMember + return UpdateExpressionDeleteClause(children=[ast]) + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.ATTRIBUTE and token.value.upper() == "DELETE" + + +class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): + """ + UpdateExpressionSetActions + """ + + @classmethod + def _nested_expression_parser_class(cls): + return UpdateExpressionDeleteActionParser + + @classmethod + def _nestable_class(cls): + return UpdateExpressionDeleteActions + + +class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionPathParser.is_possible_start(token) + + def _parse(self): + return UpdateExpressionDeleteAction(children=self._parse_path_and_value()) diff --git a/moto/dynamodb2/parsing/reserved_keywords.py b/moto/dynamodb2/parsing/reserved_keywords.py new file mode 100644 index 000000000..d82b16e98 --- /dev/null +++ b/moto/dynamodb2/parsing/reserved_keywords.py @@ -0,0 +1,29 @@ +class ReservedKeywords(list): + """ + DynamoDB has an extensive list of keywords. Keywords are considered when validating the expression Tree. + Not earlier since an update expression like "SET path = VALUE 1" fails with: + 'Invalid UpdateExpression: Syntax error; token: "1", near: "VALUE 1"' + """ + + KEYWORDS = None + + @classmethod + def get_reserved_keywords(cls): + if cls.KEYWORDS is None: + cls.KEYWORDS = cls._get_reserved_keywords() + return cls.KEYWORDS + + @classmethod + def _get_reserved_keywords(cls): + """ + Get a list of reserved keywords of DynamoDB + """ + try: + import importlib.resources as pkg_resources + except ImportError: + import importlib_resources as pkg_resources + + reserved_keywords = pkg_resources.read_text( + "moto.dynamodb2.parsing", "reserved_keywords.txt" + ) + return reserved_keywords.split() diff --git a/moto/dynamodb2/parsing/reserved_keywords.txt b/moto/dynamodb2/parsing/reserved_keywords.txt new file mode 100644 index 000000000..7c0106127 --- /dev/null +++ b/moto/dynamodb2/parsing/reserved_keywords.txt @@ -0,0 +1,573 @@ +ABORT +ABSOLUTE +ACTION +ADD +AFTER +AGENT +AGGREGATE +ALL +ALLOCATE +ALTER +ANALYZE +AND +ANY +ARCHIVE +ARE +ARRAY +AS +ASC +ASCII +ASENSITIVE +ASSERTION +ASYMMETRIC +AT +ATOMIC +ATTACH +ATTRIBUTE +AUTH +AUTHORIZATION +AUTHORIZE +AUTO +AVG +BACK +BACKUP +BASE +BATCH +BEFORE +BEGIN +BETWEEN +BIGINT +BINARY +BIT +BLOB +BLOCK +BOOLEAN +BOTH +BREADTH +BUCKET +BULK +BY +BYTE +CALL +CALLED +CALLING +CAPACITY +CASCADE +CASCADED +CASE +CAST +CATALOG +CHAR +CHARACTER +CHECK +CLASS +CLOB +CLOSE +CLUSTER +CLUSTERED +CLUSTERING +CLUSTERS +COALESCE +COLLATE +COLLATION +COLLECTION +COLUMN +COLUMNS +COMBINE +COMMENT +COMMIT +COMPACT +COMPILE +COMPRESS +CONDITION +CONFLICT +CONNECT +CONNECTION +CONSISTENCY +CONSISTENT +CONSTRAINT +CONSTRAINTS +CONSTRUCTOR +CONSUMED +CONTINUE +CONVERT +COPY +CORRESPONDING +COUNT +COUNTER +CREATE +CROSS +CUBE +CURRENT +CURSOR +CYCLE +DATA +DATABASE +DATE +DATETIME +DAY +DEALLOCATE +DEC +DECIMAL +DECLARE +DEFAULT +DEFERRABLE +DEFERRED +DEFINE +DEFINED +DEFINITION +DELETE +DELIMITED +DEPTH +DEREF +DESC +DESCRIBE +DESCRIPTOR +DETACH +DETERMINISTIC +DIAGNOSTICS +DIRECTORIES +DISABLE +DISCONNECT +DISTINCT +DISTRIBUTE +DO +DOMAIN +DOUBLE +DROP +DUMP +DURATION +DYNAMIC +EACH +ELEMENT +ELSE +ELSEIF +EMPTY +ENABLE +END +EQUAL +EQUALS +ERROR +ESCAPE +ESCAPED +EVAL +EVALUATE +EXCEEDED +EXCEPT +EXCEPTION +EXCEPTIONS +EXCLUSIVE +EXEC +EXECUTE +EXISTS +EXIT +EXPLAIN +EXPLODE +EXPORT +EXPRESSION +EXTENDED +EXTERNAL +EXTRACT +FAIL +FALSE +FAMILY +FETCH +FIELDS +FILE +FILTER +FILTERING +FINAL +FINISH +FIRST +FIXED +FLATTERN +FLOAT +FOR +FORCE +FOREIGN +FORMAT +FORWARD +FOUND +FREE +FROM +FULL +FUNCTION +FUNCTIONS +GENERAL +GENERATE +GET +GLOB +GLOBAL +GO +GOTO +GRANT +GREATER +GROUP +GROUPING +HANDLER +HASH +HAVE +HAVING +HEAP +HIDDEN +HOLD +HOUR +IDENTIFIED +IDENTITY +IF +IGNORE +IMMEDIATE +IMPORT +IN +INCLUDING +INCLUSIVE +INCREMENT +INCREMENTAL +INDEX +INDEXED +INDEXES +INDICATOR +INFINITE +INITIALLY +INLINE +INNER +INNTER +INOUT +INPUT +INSENSITIVE +INSERT +INSTEAD +INT +INTEGER +INTERSECT +INTERVAL +INTO +INVALIDATE +IS +ISOLATION +ITEM +ITEMS +ITERATE +JOIN +KEY +KEYS +LAG +LANGUAGE +LARGE +LAST +LATERAL +LEAD +LEADING +LEAVE +LEFT +LENGTH +LESS +LEVEL +LIKE +LIMIT +LIMITED +LINES +LIST +LOAD +LOCAL +LOCALTIME +LOCALTIMESTAMP +LOCATION +LOCATOR +LOCK +LOCKS +LOG +LOGED +LONG +LOOP +LOWER +MAP +MATCH +MATERIALIZED +MAX +MAXLEN +MEMBER +MERGE +METHOD +METRICS +MIN +MINUS +MINUTE +MISSING +MOD +MODE +MODIFIES +MODIFY +MODULE +MONTH +MULTI +MULTISET +NAME +NAMES +NATIONAL +NATURAL +NCHAR +NCLOB +NEW +NEXT +NO +NONE +NOT +NULL +NULLIF +NUMBER +NUMERIC +OBJECT +OF +OFFLINE +OFFSET +OLD +ON +ONLINE +ONLY +OPAQUE +OPEN +OPERATOR +OPTION +OR +ORDER +ORDINALITY +OTHER +OTHERS +OUT +OUTER +OUTPUT +OVER +OVERLAPS +OVERRIDE +OWNER +PAD +PARALLEL +PARAMETER +PARAMETERS +PARTIAL +PARTITION +PARTITIONED +PARTITIONS +PATH +PERCENT +PERCENTILE +PERMISSION +PERMISSIONS +PIPE +PIPELINED +PLAN +POOL +POSITION +PRECISION +PREPARE +PRESERVE +PRIMARY +PRIOR +PRIVATE +PRIVILEGES +PROCEDURE +PROCESSED +PROJECT +PROJECTION +PROPERTY +PROVISIONING +PUBLIC +PUT +QUERY +QUIT +QUORUM +RAISE +RANDOM +RANGE +RANK +RAW +READ +READS +REAL +REBUILD +RECORD +RECURSIVE +REDUCE +REF +REFERENCE +REFERENCES +REFERENCING +REGEXP +REGION +REINDEX +RELATIVE +RELEASE +REMAINDER +RENAME +REPEAT +REPLACE +REQUEST +RESET +RESIGNAL +RESOURCE +RESPONSE +RESTORE +RESTRICT +RESULT +RETURN +RETURNING +RETURNS +REVERSE +REVOKE +RIGHT +ROLE +ROLES +ROLLBACK +ROLLUP +ROUTINE +ROW +ROWS +RULE +RULES +SAMPLE +SATISFIES +SAVE +SAVEPOINT +SCAN +SCHEMA +SCOPE +SCROLL +SEARCH +SECOND +SECTION +SEGMENT +SEGMENTS +SELECT +SELF +SEMI +SENSITIVE +SEPARATE +SEQUENCE +SERIALIZABLE +SESSION +SET +SETS +SHARD +SHARE +SHARED +SHORT +SHOW +SIGNAL +SIMILAR +SIZE +SKEWED +SMALLINT +SNAPSHOT +SOME +SOURCE +SPACE +SPACES +SPARSE +SPECIFIC +SPECIFICTYPE +SPLIT +SQL +SQLCODE +SQLERROR +SQLEXCEPTION +SQLSTATE +SQLWARNING +START +STATE +STATIC +STATUS +STORAGE +STORE +STORED +STREAM +STRING +STRUCT +STYLE +SUB +SUBMULTISET +SUBPARTITION +SUBSTRING +SUBTYPE +SUM +SUPER +SYMMETRIC +SYNONYM +SYSTEM +TABLE +TABLESAMPLE +TEMP +TEMPORARY +TERMINATED +TEXT +THAN +THEN +THROUGHPUT +TIME +TIMESTAMP +TIMEZONE +TINYINT +TO +TOKEN +TOTAL +TOUCH +TRAILING +TRANSACTION +TRANSFORM +TRANSLATE +TRANSLATION +TREAT +TRIGGER +TRIM +TRUE +TRUNCATE +TTL +TUPLE +TYPE +UNDER +UNDO +UNION +UNIQUE +UNIT +UNKNOWN +UNLOGGED +UNNEST +UNPROCESSED +UNSIGNED +UNTIL +UPDATE +UPPER +URL +USAGE +USE +USER +USERS +USING +UUID +VACUUM +VALUE +VALUED +VALUES +VARCHAR +VARIABLE +VARIANCE +VARINT +VARYING +VIEW +VIEWS +VIRTUAL +VOID +WAIT +WHEN +WHENEVER +WHERE +WHILE +WINDOW +WITH +WITHIN +WITHOUT +WORK +WRAPPED +WRITE +YEAR +ZONE diff --git a/moto/dynamodb2/parsing/tokens.py b/moto/dynamodb2/parsing/tokens.py new file mode 100644 index 000000000..34c3151ef --- /dev/null +++ b/moto/dynamodb2/parsing/tokens.py @@ -0,0 +1,223 @@ +import re +import sys + +from moto.dynamodb2.exceptions import ( + InvalidTokenException, + InvalidExpressionAttributeNameKey, +) + + +class Token(object): + _TOKEN_INSTANCE = None + MINUS_SIGN = "-" + PLUS_SIGN = "+" + SPACE_SIGN = " " + EQUAL_SIGN = "=" + OPEN_ROUND_BRACKET = "(" + CLOSE_ROUND_BRACKET = ")" + COMMA = "," + SPACE = " " + DOT = "." + OPEN_SQUARE_BRACKET = "[" + CLOSE_SQUARE_BRACKET = "]" + + SPECIAL_CHARACTERS = [ + MINUS_SIGN, + PLUS_SIGN, + SPACE_SIGN, + EQUAL_SIGN, + OPEN_ROUND_BRACKET, + CLOSE_ROUND_BRACKET, + COMMA, + SPACE, + DOT, + OPEN_SQUARE_BRACKET, + CLOSE_SQUARE_BRACKET, + ] + + # Attribute: an identifier that is an attribute + ATTRIBUTE = 0 + # Place holder for attribute name + ATTRIBUTE_NAME = 1 + # Placeholder for attribute value starts with : + ATTRIBUTE_VALUE = 2 + # WhiteSpace shall be grouped together + WHITESPACE = 3 + # Placeholder for a number + NUMBER = 4 + + PLACEHOLDER_NAMES = { + ATTRIBUTE: "Attribute", + ATTRIBUTE_NAME: "AttributeName", + ATTRIBUTE_VALUE: "AttributeValue", + WHITESPACE: "Whitespace", + NUMBER: "Number", + } + + def __init__(self, token_type, value): + assert ( + token_type in self.SPECIAL_CHARACTERS + or token_type in self.PLACEHOLDER_NAMES + ) + self.type = token_type + self.value = value + + def __repr__(self): + if isinstance(self.type, int): + return 'Token("{tt}", "{tv}")'.format( + tt=self.PLACEHOLDER_NAMES[self.type], tv=self.value + ) + else: + return 'Token("{tt}", "{tv}")'.format(tt=self.type, tv=self.value) + + def __eq__(self, other): + return self.type == other.type and self.value == other.value + + +class ExpressionTokenizer(object): + """ + Takes a string and returns a list of tokens. While attribute names in DynamoDB must be between 1 and 255 characters + long there are no other restrictions for attribute names. For expressions however there are additional rules. If an + attribute name does not adhere then it must be passed via an ExpressionAttributeName. This tokenizer is aware of the + rules of Expression attributes. + + We consider a Token as a tuple which has the tokenType + + From https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html + 1) If an attribute name begins with a number or contains a space, a special character, or a reserved word, you + must use an expression attribute name to replace that attribute's name in the expression. + => So spaces,+,- or other special characters do identify tokens in update expressions + + 2) When using a dot (.) in an attribute name you must use expression-attribute-names. A dot in an expression + will be interpreted as a separator in a document path + + 3) For a nested structure if you want to use expression_attribute_names you must specify one per part of the + path. Since for members of expression_attribute_names the . is part of the name + + """ + + @classmethod + def is_simple_token_character(cls, character): + return character.isalnum() or character in ("_", ":", "#") + + @classmethod + def is_possible_token_boundary(cls, character): + return ( + character in Token.SPECIAL_CHARACTERS + or not cls.is_simple_token_character(character) + ) + + @classmethod + def is_expression_attribute(cls, input_string): + return re.compile("^[a-zA-Z0-9][a-zA-Z0-9_]*$").match(input_string) is not None + + @classmethod + def is_expression_attribute_name(cls, input_string): + """ + https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html + An expression attribute name must begin with a pound sign (#), and be followed by one or more alphanumeric + characters. + """ + return input_string.startswith("#") and cls.is_expression_attribute( + input_string[1:] + ) + + @classmethod + def is_expression_attribute_value(cls, input_string): + return re.compile("^:[a-zA-Z0-9_]*$").match(input_string) is not None + + def raise_unexpected_token(self): + """If during parsing an unexpected token is encountered""" + if len(self.token_list) == 0: + near = "" + else: + if len(self.token_list) == 1: + near = self.token_list[-1].value + else: + if self.token_list[-1].type == Token.WHITESPACE: + # Last token was whitespace take 2nd last token value as well to help User orientate + near = self.token_list[-2].value + self.token_list[-1].value + else: + near = self.token_list[-1].value + + problematic_token = self.staged_characters[0] + raise InvalidTokenException(problematic_token, near + self.staged_characters) + + def __init__(self, input_expression_str): + self.input_expression_str = input_expression_str + self.token_list = [] + self.staged_characters = "" + + @classmethod + def is_py2(cls): + return sys.version_info[0] == 2 + + @classmethod + def make_list(cls, input_expression_str): + if cls.is_py2(): + pass + else: + assert isinstance(input_expression_str, str) + + return ExpressionTokenizer(input_expression_str)._make_list() + + def add_token(self, token_type, token_value): + self.token_list.append(Token(token_type, token_value)) + + def add_token_from_stage(self, token_type): + self.add_token(token_type, self.staged_characters) + self.staged_characters = "" + + @classmethod + def is_numeric(cls, input_str): + return re.compile("[0-9]+").match(input_str) is not None + + def process_staged_characters(self): + if len(self.staged_characters) == 0: + return + if self.staged_characters.startswith("#"): + if self.is_expression_attribute_name(self.staged_characters): + self.add_token_from_stage(Token.ATTRIBUTE_NAME) + else: + raise InvalidExpressionAttributeNameKey(self.staged_characters) + elif self.is_numeric(self.staged_characters): + self.add_token_from_stage(Token.NUMBER) + elif self.is_expression_attribute(self.staged_characters): + self.add_token_from_stage(Token.ATTRIBUTE) + elif self.is_expression_attribute_value(self.staged_characters): + self.add_token_from_stage(Token.ATTRIBUTE_VALUE) + else: + self.raise_unexpected_token() + + def _make_list(self): + """ + Just go through characters if a character is not a token boundary stage it for adding it as a grouped token + later if it is a tokenboundary process staged characters and then process the token boundary as well. + """ + for character in self.input_expression_str: + if not self.is_possible_token_boundary(character): + self.staged_characters += character + else: + self.process_staged_characters() + + if character == Token.SPACE: + if ( + len(self.token_list) > 0 + and self.token_list[-1].type == Token.WHITESPACE + ): + self.token_list[-1].value = ( + self.token_list[-1].value + character + ) + else: + self.add_token(Token.WHITESPACE, character) + elif character in Token.SPECIAL_CHARACTERS: + self.add_token(character, character) + elif not self.is_simple_token_character(character): + self.staged_characters += character + self.raise_unexpected_token() + else: + raise NotImplementedError( + "Encountered character which was not implemented : " + character + ) + self.process_staged_characters() + return self.token_list diff --git a/moto/dynamodb2/parsing/validators.py b/moto/dynamodb2/parsing/validators.py new file mode 100644 index 000000000..79849e538 --- /dev/null +++ b/moto/dynamodb2/parsing/validators.py @@ -0,0 +1,394 @@ +""" +See docstring class Validator below for more details on validation +""" +from abc import abstractmethod +from copy import deepcopy + +from moto.dynamodb2.exceptions import ( + AttributeIsReservedKeyword, + ExpressionAttributeValueNotDefined, + AttributeDoesNotExist, + ExpressionAttributeNameNotDefined, + IncorrectOperandType, + InvalidUpdateExpressionInvalidDocumentPath, + ProvidedKeyDoesNotExist, + EmptyKeyAttributeException, +) +from moto.dynamodb2.models import DynamoType +from moto.dynamodb2.parsing.ast_nodes import ( + ExpressionAttribute, + UpdateExpressionPath, + UpdateExpressionSetAction, + UpdateExpressionAddAction, + UpdateExpressionDeleteAction, + UpdateExpressionRemoveAction, + DDBTypedValue, + ExpressionAttributeValue, + ExpressionAttributeName, + DepthFirstTraverser, + NoneExistingPath, + UpdateExpressionFunction, + ExpressionPathDescender, + UpdateExpressionValue, + ExpressionValueOperator, + ExpressionSelector, +) +from moto.dynamodb2.parsing.reserved_keywords import ReservedKeywords + + +class ExpressionAttributeValueProcessor(DepthFirstTraverser): + def __init__(self, expression_attribute_values): + self.expression_attribute_values = expression_attribute_values + + def _processing_map(self): + return { + ExpressionAttributeValue: self.replace_expression_attribute_value_with_value + } + + def replace_expression_attribute_value_with_value(self, node): + """A node representing an Expression Attribute Value. Resolve and replace value""" + assert isinstance(node, ExpressionAttributeValue) + attribute_value_name = node.get_value_name() + try: + target = self.expression_attribute_values[attribute_value_name] + except KeyError: + raise ExpressionAttributeValueNotDefined( + attribute_value=attribute_value_name + ) + return DDBTypedValue(DynamoType(target)) + + +class ExpressionPathResolver(object): + def __init__(self, expression_attribute_names): + self.expression_attribute_names = expression_attribute_names + + @classmethod + def raise_exception_if_keyword(cls, attribute): + if attribute.upper() in ReservedKeywords.get_reserved_keywords(): + raise AttributeIsReservedKeyword(attribute) + + def resolve_expression_path(self, item, update_expression_path): + assert isinstance(update_expression_path, UpdateExpressionPath) + return self.resolve_expression_path_nodes(item, update_expression_path.children) + + def resolve_expression_path_nodes(self, item, update_expression_path_nodes): + target = item.attrs + + for child in update_expression_path_nodes: + # First replace placeholder with attribute_name + attr_name = None + if isinstance(child, ExpressionAttributeName): + attr_placeholder = child.get_attribute_name_placeholder() + try: + attr_name = self.expression_attribute_names[attr_placeholder] + except KeyError: + raise ExpressionAttributeNameNotDefined(attr_placeholder) + elif isinstance(child, ExpressionAttribute): + attr_name = child.get_attribute_name() + self.raise_exception_if_keyword(attr_name) + if attr_name is not None: + # Resolv attribute_name + try: + target = target[attr_name] + except (KeyError, TypeError): + if child == update_expression_path_nodes[-1]: + return NoneExistingPath(creatable=True) + return NoneExistingPath() + else: + if isinstance(child, ExpressionPathDescender): + continue + elif isinstance(child, ExpressionSelector): + index = child.get_index() + if target.is_list(): + try: + target = target[index] + except IndexError: + # When a list goes out of bounds when assigning that is no problem when at the assignment + # side. It will just append to the list. + if child == update_expression_path_nodes[-1]: + return NoneExistingPath(creatable=True) + return NoneExistingPath() + else: + raise InvalidUpdateExpressionInvalidDocumentPath + else: + raise NotImplementedError( + "Path resolution for {t}".format(t=type(child)) + ) + if not isinstance(target, DynamoType): + print(target) + return DDBTypedValue(target) + + def resolve_expression_path_nodes_to_dynamo_type( + self, item, update_expression_path_nodes + ): + node = self.resolve_expression_path_nodes(item, update_expression_path_nodes) + if isinstance(node, NoneExistingPath): + raise ProvidedKeyDoesNotExist() + assert isinstance(node, DDBTypedValue) + return node.get_value() + + +class ExpressionAttributeResolvingProcessor(DepthFirstTraverser): + def _processing_map(self): + return { + UpdateExpressionSetAction: self.disable_resolving, + UpdateExpressionPath: self.process_expression_path_node, + } + + def __init__(self, expression_attribute_names, item): + self.expression_attribute_names = expression_attribute_names + self.item = item + self.resolving = False + + def pre_processing_of_child(self, parent_node, child_id): + """ + We have to enable resolving if we are processing a child of UpdateExpressionSetAction that is not first. + Because first argument is path to be set, 2nd argument would be the value. + """ + if isinstance( + parent_node, + ( + UpdateExpressionSetAction, + UpdateExpressionRemoveAction, + UpdateExpressionDeleteAction, + UpdateExpressionAddAction, + ), + ): + if child_id == 0: + self.resolving = False + else: + self.resolving = True + + def disable_resolving(self, node=None): + self.resolving = False + return node + + def process_expression_path_node(self, node): + """Resolve ExpressionAttribute if not part of a path and resolving is enabled.""" + if self.resolving: + return self.resolve_expression_path(node) + else: + # Still resolve but return original note to make sure path is correct Just make sure nodes are creatable. + result_node = self.resolve_expression_path(node) + if ( + isinstance(result_node, NoneExistingPath) + and not result_node.is_creatable() + ): + raise InvalidUpdateExpressionInvalidDocumentPath() + + return node + + def resolve_expression_path(self, node): + return ExpressionPathResolver( + self.expression_attribute_names + ).resolve_expression_path(self.item, node) + + +class UpdateExpressionFunctionEvaluator(DepthFirstTraverser): + """ + At time of writing there are only 2 functions for DDB UpdateExpressions. They both are specific to the SET + expression as per the official AWS docs: + https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ + Expressions.UpdateExpressions.html#Expressions.UpdateExpressions.SET + """ + + def _processing_map(self): + return {UpdateExpressionFunction: self.process_function} + + def process_function(self, node): + assert isinstance(node, UpdateExpressionFunction) + function_name = node.get_function_name() + first_arg = node.get_nth_argument(1) + second_arg = node.get_nth_argument(2) + + if function_name == "if_not_exists": + if isinstance(first_arg, NoneExistingPath): + result = second_arg + else: + result = first_arg + assert isinstance(result, (DDBTypedValue, NoneExistingPath)) + return result + elif function_name == "list_append": + first_arg = deepcopy( + self.get_list_from_ddb_typed_value(first_arg, function_name) + ) + second_arg = self.get_list_from_ddb_typed_value(second_arg, function_name) + for list_element in second_arg.value: + first_arg.value.append(list_element) + return DDBTypedValue(first_arg) + else: + raise NotImplementedError( + "Unsupported function for moto {name}".format(name=function_name) + ) + + @classmethod + def get_list_from_ddb_typed_value(cls, node, function_name): + assert isinstance(node, DDBTypedValue) + dynamo_value = node.get_value() + assert isinstance(dynamo_value, DynamoType) + if not dynamo_value.is_list(): + raise IncorrectOperandType(function_name, dynamo_value.type) + return dynamo_value + + +class NoneExistingPathChecker(DepthFirstTraverser): + """ + Pass through the AST and make sure there are no none-existing paths. + """ + + def _processing_map(self): + return {NoneExistingPath: self.raise_none_existing_path} + + def raise_none_existing_path(self, node): + raise AttributeDoesNotExist + + +class ExecuteOperations(DepthFirstTraverser): + def _processing_map(self): + return {UpdateExpressionValue: self.process_update_expression_value} + + def process_update_expression_value(self, node): + """ + If an UpdateExpressionValue only has a single child the node will be replaced with the childe. + Otherwise it has 3 children and the middle one is an ExpressionValueOperator which details how to combine them + Args: + node(Node): + + Returns: + Node: The resulting node of the operation if present or the child. + """ + assert isinstance(node, UpdateExpressionValue) + if len(node.children) == 1: + return node.children[0] + elif len(node.children) == 3: + operator_node = node.children[1] + assert isinstance(operator_node, ExpressionValueOperator) + operator = operator_node.get_operator() + left_operand = self.get_dynamo_value_from_ddb_typed_value(node.children[0]) + right_operand = self.get_dynamo_value_from_ddb_typed_value(node.children[2]) + if operator == "+": + return self.get_sum(left_operand, right_operand) + elif operator == "-": + return self.get_subtraction(left_operand, right_operand) + else: + raise NotImplementedError( + "Moto does not support operator {operator}".format( + operator=operator + ) + ) + else: + raise NotImplementedError( + "UpdateExpressionValue only has implementations for 1 or 3 children." + ) + + @classmethod + def get_dynamo_value_from_ddb_typed_value(cls, node): + assert isinstance(node, DDBTypedValue) + dynamo_value = node.get_value() + assert isinstance(dynamo_value, DynamoType) + return dynamo_value + + @classmethod + def get_sum(cls, left_operand, right_operand): + """ + Args: + left_operand(DynamoType): + right_operand(DynamoType): + + Returns: + DDBTypedValue: + """ + try: + return DDBTypedValue(left_operand + right_operand) + except TypeError: + raise IncorrectOperandType("+", left_operand.type) + + @classmethod + def get_subtraction(cls, left_operand, right_operand): + """ + Args: + left_operand(DynamoType): + right_operand(DynamoType): + + Returns: + DDBTypedValue: + """ + try: + return DDBTypedValue(left_operand - right_operand) + except TypeError: + raise IncorrectOperandType("-", left_operand.type) + + +class EmptyStringKeyValueValidator(DepthFirstTraverser): + def __init__(self, key_attributes): + self.key_attributes = key_attributes + + def _processing_map(self): + return {UpdateExpressionSetAction: self.check_for_empty_string_key_value} + + def check_for_empty_string_key_value(self, node): + """A node representing a SET action. Check that keys are not being assigned empty strings""" + assert isinstance(node, UpdateExpressionSetAction) + assert len(node.children) == 2 + key = node.children[0].children[0].children[0] + val_node = node.children[1].children[0] + if val_node.type in ["S", "B"] and key in self.key_attributes: + raise EmptyKeyAttributeException + return node + + +class Validator(object): + """ + A validator is used to validate expressions which are passed in as an AST. + """ + + def __init__( + self, + expression, + expression_attribute_names, + expression_attribute_values, + item, + table, + ): + """ + Besides validation the Validator should also replace referenced parts of an item which is cheapest upon + validation. + + Args: + expression(Node): The root node of the AST representing the expression to be validated + expression_attribute_names(ExpressionAttributeNames): + expression_attribute_values(ExpressionAttributeValues): + item(Item): The item which will be updated (pointed to by Key of update_item) + """ + self.expression_attribute_names = expression_attribute_names + self.expression_attribute_values = expression_attribute_values + self.item = item + self.table = table + self.processors = self.get_ast_processors() + self.node_to_validate = deepcopy(expression) + + @abstractmethod + def get_ast_processors(self): + """Get the different processors that go through the AST tree and processes the nodes.""" + + def validate(self): + n = self.node_to_validate + for processor in self.processors: + n = processor.traverse(n) + return n + + +class UpdateExpressionValidator(Validator): + def get_ast_processors(self): + """Get the different processors that go through the AST tree and processes the nodes.""" + processors = [ + ExpressionAttributeValueProcessor(self.expression_attribute_values), + ExpressionAttributeResolvingProcessor( + self.expression_attribute_names, self.item + ), + UpdateExpressionFunctionEvaluator(), + NoneExistingPathChecker(), + ExecuteOperations(), + EmptyStringKeyValueValidator(self.table.key_attributes), + ] + return processors diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d3767c3fd..85d265f6d 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -1,24 +1,38 @@ from __future__ import unicode_literals -import itertools + +import copy import json -import six import re +import itertools +import six + from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id -from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge -from .models import dynamodb_backends, dynamo_json_dump +from .exceptions import ( + InvalidIndexNameError, + ItemSizeTooLarge, + MockValidationException, + TransactionCanceledException, +) +from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump -def has_empty_keys_or_values(_dict): - if _dict == "": - return True - if not isinstance(_dict, dict): - return False - return any( - key == "" or value == "" or has_empty_keys_or_values(value) - for key, value in _dict.items() - ) +TRANSACTION_MAX_ITEMS = 25 + + +def put_has_empty_keys(field_updates, table): + if table: + key_names = table.key_attributes + + # string/binary fields with empty string as value + empty_str_fields = [ + key + for (key, val) in field_updates.items() + if next(iter(val.keys())) in ["S", "B"] and next(iter(val.values())) == "" + ] + return any([keyname in empty_str_fields for keyname in key_names]) + return False def get_empty_str_error(): @@ -86,19 +100,14 @@ class DynamoHandler(BaseResponse): def list_tables(self): body = self.body limit = body.get("Limit", 100) - if body.get("ExclusiveStartTableName"): - last = body.get("ExclusiveStartTableName") - start = list(self.dynamodb_backend.tables.keys()).index(last) + 1 - else: - start = 0 - all_tables = list(self.dynamodb_backend.tables.keys()) - if limit: - tables = all_tables[start : start + limit] - else: - tables = all_tables[start:] + exclusive_start_table_name = body.get("ExclusiveStartTableName") + tables, last_eval = self.dynamodb_backend.list_tables( + limit, exclusive_start_table_name + ) + response = {"TableNames": tables} - if limit and len(all_tables) > start + limit: - response["LastEvaluatedTableName"] = tables[-1] + if last_eval: + response["LastEvaluatedTableName"] = last_eval return dynamo_json_dump(response) @@ -218,33 +227,29 @@ class DynamoHandler(BaseResponse): def update_table(self): name = self.body["TableName"] - table = self.dynamodb_backend.get_table(name) - if "GlobalSecondaryIndexUpdates" in self.body: - table = self.dynamodb_backend.update_table_global_indexes( - name, self.body["GlobalSecondaryIndexUpdates"] + global_index = self.body.get("GlobalSecondaryIndexUpdates", None) + throughput = self.body.get("ProvisionedThroughput", None) + stream_spec = self.body.get("StreamSpecification", None) + try: + table = self.dynamodb_backend.update_table( + name=name, + global_index=global_index, + throughput=throughput, + stream_spec=stream_spec, ) - if "ProvisionedThroughput" in self.body: - throughput = self.body["ProvisionedThroughput"] - table = self.dynamodb_backend.update_table_throughput(name, throughput) - if "StreamSpecification" in self.body: - try: - table = self.dynamodb_backend.update_table_streams( - name, self.body["StreamSpecification"] - ) - except ValueError: - er = "com.amazonaws.dynamodb.v20111205#ResourceInUseException" - return self.error(er, "Cannot enable stream") - - return dynamo_json_dump(table.describe()) + return dynamo_json_dump(table.describe()) + except ValueError: + er = "com.amazonaws.dynamodb.v20111205#ResourceInUseException" + return self.error(er, "Cannot enable stream") def describe_table(self): name = self.body["TableName"] try: - table = self.dynamodb_backend.tables[name] + table = self.dynamodb_backend.describe_table(name) + return dynamo_json_dump(table) except KeyError: er = "com.amazonaws.dynamodb.v20111205#ResourceNotFoundException" return self.error(er, "Requested resource not found") - return dynamo_json_dump(table.describe(base_key="Table")) def put_item(self): name = self.body["TableName"] @@ -255,7 +260,7 @@ class DynamoHandler(BaseResponse): er = "com.amazonaws.dynamodb.v20111205#ValidationException" return self.error(er, "Return values set to invalid value") - if has_empty_keys_or_values(item): + if put_has_empty_keys(item, self.dynamodb_backend.get_table(name)): return get_empty_str_error() overwrite = "Expected" not in self.body @@ -292,12 +297,13 @@ class DynamoHandler(BaseResponse): ) except ItemSizeTooLarge: er = "com.amazonaws.dynamodb.v20111205#ValidationException" - return self.error(er, ItemSizeTooLarge.message) - except ValueError: + return self.error(er, ItemSizeTooLarge.item_size_too_large_msg) + except KeyError as ke: + er = "com.amazonaws.dynamodb.v20111205#ValidationException" + return self.error(er, ke.args[0]) + except ValueError as ve: er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" - return self.error( - er, "A condition specified in the operation could not be evaluated." - ) + return self.error(er, str(ve)) if result: item_dict = result.to_json() @@ -368,6 +374,26 @@ class DynamoHandler(BaseResponse): results = {"ConsumedCapacity": [], "Responses": {}, "UnprocessedKeys": {}} + # Validation: Can only request up to 100 items at the same time + # Scenario 1: We're requesting more than a 100 keys from a single table + for table_name, table_request in table_batches.items(): + if len(table_request["Keys"]) > 100: + return self.error( + "com.amazonaws.dynamodb.v20111205#ValidationException", + "1 validation error detected: Value at 'requestItems." + + table_name + + ".member.keys' failed to satisfy constraint: Member must have length less than or equal to 100", + ) + # Scenario 2: We're requesting more than a 100 keys across all tables + nr_of_keys_across_all_tables = sum( + [len(req["Keys"]) for _, req in table_batches.items()] + ) + if nr_of_keys_across_all_tables > 100: + return self.error( + "com.amazonaws.dynamodb.v20111205#ValidationException", + "Too many items requested for the BatchGetItem call", + ) + for table_name, table_request in table_batches.items(): keys = table_request["Keys"] if self._contains_duplicates(keys): @@ -408,7 +434,6 @@ class DynamoHandler(BaseResponse): def query(self): name = self.body["TableName"] - # {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}} key_condition_expression = self.body.get("KeyConditionExpression") projection_expression = self.body.get("ProjectionExpression") expression_attribute_names = self.body.get("ExpressionAttributeNames", {}) @@ -436,7 +461,7 @@ class DynamoHandler(BaseResponse): index_name = self.body.get("IndexName") if index_name: all_indexes = (table.global_indexes or []) + (table.indexes or []) - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name not in indexes_by_name: er = "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException" return self.error( @@ -446,7 +471,7 @@ class DynamoHandler(BaseResponse): ), ) - index = indexes_by_name[index_name]["KeySchema"] + index = indexes_by_name[index_name].schema else: index = table.schema @@ -455,8 +480,10 @@ class DynamoHandler(BaseResponse): for k, v in six.iteritems(self.body.get("ExpressionAttributeNames", {})) ) - if " AND " in key_condition_expression: - expressions = key_condition_expression.split(" AND ", 1) + if " and " in key_condition_expression.lower(): + expressions = re.split( + " AND ", key_condition_expression, maxsplit=1, flags=re.IGNORECASE + ) index_hash_key = [key for key in index if key["KeyType"] == "HASH"][0] hash_key_var = reverse_attribute_lookup.get( @@ -710,7 +737,8 @@ class DynamoHandler(BaseResponse): attribute_updates = self.body.get("AttributeUpdates") expression_attribute_names = self.body.get("ExpressionAttributeNames", {}) expression_attribute_values = self.body.get("ExpressionAttributeValues", {}) - existing_item = self.dynamodb_backend.get_item(name, key) + # We need to copy the item in order to avoid it being modified by the update_item operation + existing_item = copy.deepcopy(self.dynamodb_backend.get_item(name, key)) if existing_item: existing_attributes = existing_item.to_json()["Attributes"] else: @@ -726,9 +754,6 @@ class DynamoHandler(BaseResponse): er = "com.amazonaws.dynamodb.v20111205#ValidationException" return self.error(er, "Return values set to invalid value") - if has_empty_keys_or_values(expression_attribute_values): - return get_empty_str_error() - if "Expected" in self.body: expected = self.body["Expected"] else: @@ -740,31 +765,20 @@ class DynamoHandler(BaseResponse): expression_attribute_names = self.body.get("ExpressionAttributeNames", {}) expression_attribute_values = self.body.get("ExpressionAttributeValues", {}) - # Support spaces between operators in an update expression - # E.g. `a = b + c` -> `a=b+c` - if update_expression: - update_expression = re.sub(r"\s*([=\+-])\s*", "\\1", update_expression) - try: item = self.dynamodb_backend.update_item( name, key, - update_expression, - attribute_updates, - expression_attribute_names, - expression_attribute_values, - expected, - condition_expression, + update_expression=update_expression, + attribute_updates=attribute_updates, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + expected=expected, + condition_expression=condition_expression, ) - except InvalidUpdateExpression: + except MockValidationException as mve: er = "com.amazonaws.dynamodb.v20111205#ValidationException" - return self.error( - er, - "The document path provided in the update expression is invalid for update", - ) - except ItemSizeTooLarge: - er = "com.amazonaws.dynamodb.v20111205#ValidationException" - return self.error(er, ItemSizeTooLarge.message) + return self.error(er, mve.exception_msg) except ValueError: er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" return self.error( @@ -796,14 +810,36 @@ class DynamoHandler(BaseResponse): k: v for k, v in existing_attributes.items() if k in changed_attributes } elif return_values == "UPDATED_NEW": - item_dict["Attributes"] = { - k: v - for k, v in item_dict["Attributes"].items() - if k in changed_attributes - } - + item_dict["Attributes"] = self._build_updated_new_attributes( + existing_attributes, item_dict["Attributes"] + ) return dynamo_json_dump(item_dict) + def _build_updated_new_attributes(self, original, changed): + if type(changed) != type(original): + return changed + else: + if type(changed) is dict: + return { + key: self._build_updated_new_attributes( + original.get(key, None), changed[key] + ) + for key in changed.keys() + if key not in original or changed[key] != original[key] + } + elif type(changed) in (set, list): + if len(changed) != len(original): + return changed + else: + return [ + self._build_updated_new_attributes( + original[index], changed[index] + ) + for index in range(len(changed)) + ] + else: + return changed + def describe_limits(self): return json.dumps( { @@ -818,13 +854,117 @@ class DynamoHandler(BaseResponse): name = self.body["TableName"] ttl_spec = self.body["TimeToLiveSpecification"] - self.dynamodb_backend.update_ttl(name, ttl_spec) + self.dynamodb_backend.update_time_to_live(name, ttl_spec) return json.dumps({"TimeToLiveSpecification": ttl_spec}) def describe_time_to_live(self): name = self.body["TableName"] - ttl_spec = self.dynamodb_backend.describe_ttl(name) + ttl_spec = self.dynamodb_backend.describe_time_to_live(name) return json.dumps({"TimeToLiveDescription": ttl_spec}) + + def transact_get_items(self): + transact_items = self.body["TransactItems"] + responses = list() + + if len(transact_items) > TRANSACTION_MAX_ITEMS: + msg = "1 validation error detected: Value '[" + err_list = list() + request_id = 268435456 + for _ in transact_items: + request_id += 1 + hex_request_id = format(request_id, "x") + err_list.append( + "com.amazonaws.dynamodb.v20120810.TransactGetItem@%s" + % hex_request_id + ) + msg += ", ".join(err_list) + msg += ( + "'] at 'transactItems' failed to satisfy constraint: " + "Member must have length less than or equal to %s" + % TRANSACTION_MAX_ITEMS + ) + + return self.error("ValidationException", msg) + + ret_consumed_capacity = self.body.get("ReturnConsumedCapacity", "NONE") + consumed_capacity = dict() + + for transact_item in transact_items: + + table_name = transact_item["Get"]["TableName"] + key = transact_item["Get"]["Key"] + try: + item = self.dynamodb_backend.get_item(table_name, key) + except ValueError: + er = "com.amazonaws.dynamodb.v20111205#ResourceNotFoundException" + return self.error(er, "Requested resource not found") + + if not item: + responses.append({}) + continue + + item_describe = item.describe_attrs(False) + responses.append(item_describe) + + table_capacity = consumed_capacity.get(table_name, {}) + table_capacity["TableName"] = table_name + capacity_units = table_capacity.get("CapacityUnits", 0) + 2.0 + table_capacity["CapacityUnits"] = capacity_units + read_capacity_units = table_capacity.get("ReadCapacityUnits", 0) + 2.0 + table_capacity["ReadCapacityUnits"] = read_capacity_units + consumed_capacity[table_name] = table_capacity + + if ret_consumed_capacity == "INDEXES": + table_capacity["Table"] = { + "CapacityUnits": capacity_units, + "ReadCapacityUnits": read_capacity_units, + } + + result = dict() + result.update({"Responses": responses}) + if ret_consumed_capacity != "NONE": + result.update({"ConsumedCapacity": [v for v in consumed_capacity.values()]}) + + return dynamo_json_dump(result) + + def transact_write_items(self): + transact_items = self.body["TransactItems"] + try: + self.dynamodb_backend.transact_write_items(transact_items) + except TransactionCanceledException as e: + er = "com.amazonaws.dynamodb.v20111205#TransactionCanceledException" + return self.error(er, str(e)) + response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}} + return dynamo_json_dump(response) + + def describe_continuous_backups(self): + name = self.body["TableName"] + + if self.dynamodb_backend.get_table(name) is None: + return self.error( + "com.amazonaws.dynamodb.v20111205#TableNotFoundException", + "Table not found: {}".format(name), + ) + + response = self.dynamodb_backend.describe_continuous_backups(name) + + return json.dumps({"ContinuousBackupsDescription": response}) + + def update_continuous_backups(self): + name = self.body["TableName"] + point_in_time_spec = self.body["PointInTimeRecoverySpecification"] + + if self.dynamodb_backend.get_table(name) is None: + return self.error( + "com.amazonaws.dynamodb.v20111205#TableNotFoundException", + "Table not found: {}".format(name), + ) + + response = self.dynamodb_backend.update_continuous_backups( + name, point_in_time_spec + ) + + return json.dumps({"ContinuousBackupsDescription": response}) diff --git a/moto/dynamodbstreams/models.py b/moto/dynamodbstreams/models.py index dc6f0e0d3..f62c49877 100644 --- a/moto/dynamodbstreams/models.py +++ b/moto/dynamodbstreams/models.py @@ -7,7 +7,7 @@ import base64 from boto3 import Session from moto.core import BaseBackend, BaseModel -from moto.dynamodb2.models import dynamodb_backends +from moto.dynamodb2.models import dynamodb_backends, DynamoJsonEncoder class ShardIterator(BaseModel): @@ -137,7 +137,7 @@ class DynamoDBStreamsBackend(BaseBackend): def get_records(self, iterator_arn, limit): shard_iterator = self.shard_iterators[iterator_arn] - return json.dumps(shard_iterator.get(limit)) + return json.dumps(shard_iterator.get(limit), cls=DynamoJsonEncoder) dynamodbstreams_backends = {} diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index b2c1792f2..348c3f723 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -71,6 +71,24 @@ class InvalidSubnetIdError(EC2ClientError): ) +class InvalidFlowLogIdError(EC2ClientError): + def __init__(self, count, flow_log_ids): + super(InvalidFlowLogIdError, self).__init__( + "InvalidFlowLogId.NotFound", + "These flow log ids in the input list are not found: [TotalCount: {0}] {1}".format( + count, flow_log_ids + ), + ) + + +class FlowLogAlreadyExists(EC2ClientError): + def __init__(self): + super(FlowLogAlreadyExists, self).__init__( + "FlowLogAlreadyExists", + "Error. There is an existing Flow Log with the same configuration and log destination.", + ) + + class InvalidNetworkAclIdError(EC2ClientError): def __init__(self, network_acl_id): super(InvalidNetworkAclIdError, self).__init__( @@ -231,6 +249,24 @@ class InvalidVolumeAttachmentError(EC2ClientError): ) +class InvalidVolumeDetachmentError(EC2ClientError): + def __init__(self, volume_id, instance_id, device): + super(InvalidVolumeDetachmentError, self).__init__( + "InvalidAttachment.NotFound", + "The volume {0} is not attached to instance {1} as device {2}".format( + volume_id, instance_id, device + ), + ) + + +class VolumeInUseError(EC2ClientError): + def __init__(self, volume_id, instance_id): + super(VolumeInUseError, self).__init__( + "VolumeInUse", + "Volume {0} is currently attached to {1}".format(volume_id, instance_id), + ) + + class InvalidDomainError(EC2ClientError): def __init__(self, domain): super(InvalidDomainError, self).__init__( @@ -245,6 +281,14 @@ class InvalidAddressError(EC2ClientError): ) +class LogDestinationNotFoundError(EC2ClientError): + def __init__(self, bucket_name): + super(LogDestinationNotFoundError, self).__init__( + "LogDestinationNotFoundException", + "LogDestination: '{0}' does not exist.".format(bucket_name), + ) + + class InvalidAllocationIdError(EC2ClientError): def __init__(self, allocation_id): super(InvalidAllocationIdError, self).__init__( @@ -291,6 +335,33 @@ class InvalidVPCPeeringConnectionStateTransitionError(EC2ClientError): ) +class InvalidDependantParameterError(EC2ClientError): + def __init__(self, dependant_parameter, parameter, parameter_value): + super(InvalidDependantParameterError, self).__init__( + "InvalidParameter", + "{0} can't be empty if {1} is {2}.".format( + dependant_parameter, parameter, parameter_value, + ), + ) + + +class InvalidDependantParameterTypeError(EC2ClientError): + def __init__(self, dependant_parameter, parameter_value, parameter): + super(InvalidDependantParameterTypeError, self).__init__( + "InvalidParameter", + "{0} type must be {1} if {2} is provided.".format( + dependant_parameter, parameter_value, parameter, + ), + ) + + +class InvalidAggregationIntervalParameterError(EC2ClientError): + def __init__(self, parameter): + super(InvalidAggregationIntervalParameterError, self).__init__( + "InvalidParameter", "Invalid {0}".format(parameter), + ) + + class InvalidParameterValueError(EC2ClientError): def __init__(self, parameter_value): super(InvalidParameterValueError, self).__init__( @@ -502,3 +573,29 @@ class InvalidLaunchTemplateNameError(EC2ClientError): "InvalidLaunchTemplateName.AlreadyExistsException", "Launch template name already in use.", ) + + +class InvalidParameterDependency(EC2ClientError): + def __init__(self, param, param_needed): + super(InvalidParameterDependency, self).__init__( + "InvalidParameterDependency", + "The parameter [{0}] requires the parameter {1} to be set.".format( + param, param_needed + ), + ) + + +class IncorrectStateIamProfileAssociationError(EC2ClientError): + def __init__(self, instance_id): + super(IncorrectStateIamProfileAssociationError, self).__init__( + "IncorrectState", + "There is an existing association for instance {0}".format(instance_id), + ) + + +class InvalidAssociationIDIamProfileAssociationError(EC2ClientError): + def __init__(self, association_id): + super(InvalidAssociationIDIamProfileAssociationError, self).__init__( + "InvalidAssociationID.NotFound", + "An invalid association-id of '{0}' was given".format(association_id), + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index a0c886087..7676bffb4 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -15,23 +15,32 @@ from pkg_resources import resource_filename from collections import defaultdict import weakref from datetime import datetime -from boto.ec2.instance import Instance as BotoInstance, Reservation -from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType -from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest -from boto.ec2.launchspecification import LaunchSpecification +from moto.packages.boto.ec2.instance import Instance as BotoInstance, Reservation +from moto.packages.boto.ec2.blockdevicemapping import ( + BlockDeviceMapping, + BlockDeviceType, +) +from moto.packages.boto.ec2.spotinstancerequest import ( + SpotInstanceRequest as BotoSpotRequest, +) +from moto.packages.boto.ec2.launchspecification import LaunchSpecification from moto.compat import OrderedDict from moto.core import BaseBackend -from moto.core.models import Model, BaseModel +from moto.core.models import Model, BaseModel, CloudFormationModel from moto.core.utils import ( iso_8601_datetime_with_milliseconds, camelcase_to_underscores, ) +from moto.core import ACCOUNT_ID +from moto.kms import kms_backends + from .exceptions import ( CidrLimitExceeded, DependencyViolationError, EC2ClientError, FilterNotImplementedError, + FlowLogAlreadyExists, GatewayNotAttachedError, InvalidAddressError, InvalidAllocationIdError, @@ -51,6 +60,10 @@ from .exceptions import ( InvalidKeyPairDuplicateError, InvalidKeyPairFormatError, InvalidKeyPairNameError, + InvalidAggregationIntervalParameterError, + InvalidDependantParameterError, + InvalidDependantParameterTypeError, + InvalidFlowLogIdError, InvalidLaunchTemplateNameError, InvalidNetworkAclIdError, InvalidNetworkAttachmentIdError, @@ -69,7 +82,9 @@ from .exceptions import ( InvalidSubnetIdError, InvalidSubnetRangeError, InvalidVolumeIdError, + VolumeInUseError, InvalidVolumeAttachmentError, + InvalidVolumeDetachmentError, InvalidVpcCidrBlockAssociationIdError, InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionStateTransitionError, @@ -88,6 +103,9 @@ from .exceptions import ( ResourceAlreadyAssociatedError, RulesPerSecurityGroupLimitExceededError, TagLimitExceeded, + InvalidParameterDependency, + IncorrectStateIamProfileAssociationError, + InvalidAssociationIDIamProfileAssociationError, ) from .utils import ( EC2_RESOURCE_TO_PREFIX, @@ -102,6 +120,7 @@ from .utils import ( random_internet_gateway_id, random_ip, random_ipv6_cidr, + randor_ipv4_cidr, random_launch_template_id, random_nat_gateway_id, random_key_pair, @@ -110,6 +129,8 @@ from .utils import ( random_reservation_id, random_route_table_id, generate_route_id, + generate_vpc_end_point_id, + create_dns_entries, split_route_id, random_security_group_id, random_snapshot_id, @@ -117,10 +138,12 @@ from .utils import ( random_spot_request_id, random_subnet_id, random_subnet_association_id, + random_flow_log_id, random_volume_id, random_vpc_id, random_vpc_cidr_association_id, random_vpc_peering_connection_id, + random_iam_instance_profile_association_id, generic_filter, is_valid_resource_id, get_prefix, @@ -128,6 +151,8 @@ from .utils import ( is_valid_cidr, filter_internet_gateways, filter_reservations, + filter_iam_instance_profile_associations, + filter_iam_instance_profiles, random_network_acl_id, random_network_acl_subnet_association_id, random_vpn_gateway_id, @@ -154,8 +179,7 @@ AMIS = _load_resource( or resource_filename(__name__, "resources/amis.json"), ) - -OWNER_ID = "111122223333" +OWNER_ID = ACCOUNT_ID def utc_date_and_time(): @@ -213,7 +237,7 @@ class TaggedEC2Resource(BaseModel): raise FilterNotImplementedError(filter_name, method_name) -class NetworkInterface(TaggedEC2Resource): +class NetworkInterface(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, @@ -262,6 +286,15 @@ class NetworkInterface(TaggedEC2Resource): if group: self._group_set.append(group) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-networkinterface.html + return "AWS::EC2::NetworkInterface" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -448,7 +481,7 @@ class NetworkInterfaceBackend(object): return generic_filter(filters, enis) -class Instance(TaggedEC2Resource, BotoInstance): +class Instance(TaggedEC2Resource, BotoInstance, CloudFormationModel): VALID_ATTRIBUTES = { "instanceType", "kernel", @@ -555,16 +588,34 @@ class Instance(TaggedEC2Resource, BotoInstance): # worst case we'll get IP address exaustion... rarely pass + def add_block_device( + self, + size, + device_path, + snapshot_id=None, + encrypted=False, + delete_on_termination=False, + ): + volume = self.ec2_backend.create_volume( + size, self.region_name, snapshot_id, encrypted + ) + self.ec2_backend.attach_volume( + volume.id, self.id, device_path, delete_on_termination + ) + def setup_defaults(self): # Default have an instance with root volume should you not wish to # override with attach volume cmd. volume = self.ec2_backend.create_volume(8, "us-east-1a") - self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1") + self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1", True) def teardown_defaults(self): - volume_id = self.block_device_mapping["/dev/sda1"].volume_id - self.ec2_backend.detach_volume(volume_id, self.id, "/dev/sda1") - self.ec2_backend.delete_volume(volume_id) + for device_path in list(self.block_device_mapping.keys()): + volume = self.block_device_mapping[device_path] + volume_id = volume.volume_id + self.ec2_backend.detach_volume(volume_id, self.id, device_path) + if volume.delete_on_termination: + self.ec2_backend.delete_volume(volume_id) @property def get_block_device_mapping(self): @@ -597,6 +648,15 @@ class Instance(TaggedEC2Resource, BotoInstance): formatted_ip, self.region_name ) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-instance.html + return "AWS::EC2::Instance" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -619,10 +679,21 @@ class Instance(TaggedEC2Resource, BotoInstance): subnet_id=properties.get("SubnetId"), key_name=properties.get("KeyName"), private_ip=properties.get("PrivateIpAddress"), + block_device_mappings=properties.get("BlockDeviceMappings", {}), ) instance = reservation.instances[0] for tag in properties.get("Tags", []): instance.add_tag(tag["Key"], tag["Value"]) + + # Associating iam instance profile. + # TODO: Don't forget to implement replace_iam_instance_profile_association once update_from_cloudformation_json + # for ec2 instance will be implemented. + if properties.get("IamInstanceProfile"): + ec2_backend.associate_iam_instance_profile( + instance_id=instance.id, + iam_instance_profile_name=properties.get("IamInstanceProfile"), + ) + return instance @classmethod @@ -708,6 +779,15 @@ class Instance(TaggedEC2Resource, BotoInstance): "Client.UserInitiatedShutdown", ) + # Disassociate iam instance profile if associated, otherwise iam_instance_profile_associations will + # be pointing to None. + if self.ec2_backend.iam_instance_profile_associations.get(self.id): + self.ec2_backend.disassociate_iam_instance_profile( + association_id=self.ec2_backend.iam_instance_profile_associations[ + self.id + ].id + ) + def reboot(self, *args, **kwargs): self._state.name = "running" self._state.code = 16 @@ -774,7 +854,14 @@ class Instance(TaggedEC2Resource, BotoInstance): if "SubnetId" in nic: subnet = self.ec2_backend.get_subnet(nic["SubnetId"]) else: - subnet = None + # Get default Subnet + subnet = [ + subnet + for subnet in self.ec2_backend.get_all_subnets( + filters={"availabilityZone": self._placement.zone} + ) + if subnet.default_for_az + ][0] group_id = nic.get("SecurityGroupId") group_ids = [group_id] if group_id else [] @@ -821,6 +908,21 @@ class Instance(TaggedEC2Resource, BotoInstance): return self.public_ip raise UnformattedGetAttTemplateException() + def applies(self, filters): + if filters: + applicable = False + for f in filters: + acceptable_values = f["values"] + if f["name"] == "instance-state-name": + if self._state.name in acceptable_values: + applicable = True + if f["name"] == "instance-state-code": + if str(self._state.code) in acceptable_values: + applicable = True + return applicable + # If there are no filters, all instances are valid + return True + class InstanceBackend(object): def __init__(self): @@ -856,7 +958,25 @@ class InstanceBackend(object): ) new_reservation.instances.append(new_instance) new_instance.add_tags(instance_tags) - new_instance.setup_defaults() + if "block_device_mappings" in kwargs: + for block_device in kwargs["block_device_mappings"]: + device_name = block_device["DeviceName"] + volume_size = block_device["Ebs"].get("VolumeSize") + snapshot_id = block_device["Ebs"].get("SnapshotId") + encrypted = block_device["Ebs"].get("Encrypted", False) + delete_on_termination = block_device["Ebs"].get( + "DeleteOnTermination", False + ) + new_instance.add_block_device( + volume_size, + device_name, + snapshot_id, + encrypted, + delete_on_termination, + ) + else: + new_instance.setup_defaults() + return new_reservation def start_instances(self, instance_ids): @@ -920,22 +1040,29 @@ class InstanceBackend(object): value = getattr(instance, key) return instance, value - def all_instances(self): - instances = [] - for reservation in self.all_reservations(): - for instance in reservation.instances: - instances.append(instance) - return instances + def describe_instance_credit_specifications(self, instance_ids): + queried_instances = [] + for instance in self.get_multi_instances_by_id(instance_ids): + queried_instances.append(instance) + return queried_instances - def all_running_instances(self): + def all_instances(self, filters=None): instances = [] for reservation in self.all_reservations(): for instance in reservation.instances: - if instance.state_code == 16: + if instance.applies(filters): instances.append(instance) return instances - def get_multi_instances_by_id(self, instance_ids): + def all_running_instances(self, filters=None): + instances = [] + for reservation in self.all_reservations(): + for instance in reservation.instances: + if instance.state_code == 16 and instance.applies(filters): + instances.append(instance) + return instances + + def get_multi_instances_by_id(self, instance_ids, filters=None): """ :param instance_ids: A string list with instance ids :return: A list with instance objects @@ -945,7 +1072,8 @@ class InstanceBackend(object): for reservation in self.all_reservations(): for instance in reservation.instances: if instance.id in instance_ids: - result.append(instance) + if instance.applies(filters): + result.append(instance) # TODO: Trim error message down to specific invalid id. if instance_ids and len(instance_ids) > len(result): @@ -960,7 +1088,7 @@ class InstanceBackend(object): return instance def get_reservations_by_instance_ids(self, instance_ids, filters=None): - """ Go through all of the reservations and filter to only return those + """Go through all of the reservations and filter to only return those associated with the given instance_ids. """ reservations = [] @@ -1086,6 +1214,7 @@ class TagBackend(object): "subnet", "volume", "vpc", + "vpc-flow-log", "vpc-peering-connection" "vpn-connection", "vpn-gateway", ] @@ -1258,9 +1387,9 @@ class Ami(TaggedEC2Resource): elif source_ami: """ - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html - "We don't copy launch permissions, user-defined tags, or Amazon S3 bucket permissions from the source AMI to the new AMI." - ~ 2014.09.29 + http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html + "We don't copy launch permissions, user-defined tags, or Amazon S3 bucket permissions from the source AMI to the new AMI." + ~ 2014.09.29 """ self.virtualization_type = source_ami.virtualization_type self.architecture = source_ami.architecture @@ -1314,7 +1443,6 @@ class Ami(TaggedEC2Resource): class AmiBackend(object): - AMI_REGEX = re.compile("ami-[a-z0-9]+") def __init__(self): @@ -1341,7 +1469,7 @@ class AmiBackend(object): source_ami=None, name=name, description=description, - owner_id=context.get_current_user() if context else OWNER_ID, + owner_id=OWNER_ID, ) self.amis[ami_id] = ami return ami @@ -1392,14 +1520,7 @@ class AmiBackend(object): # Limit by owner ids if owners: # support filtering by Owners=['self'] - owners = list( - map( - lambda o: context.get_current_user() - if context and o == "self" - else o, - owners, - ) - ) + owners = list(map(lambda o: OWNER_ID if o == "self" else o, owners,)) images = [ami for ami in images if ami.owner_id in owners] # Generic filters @@ -1426,9 +1547,9 @@ class AmiBackend(object): # If anything is invalid, nothing is added. (No partial success.) if user_ids: """ - AWS docs: - "The AWS account ID is a 12-digit number, such as 123456789012, that you use to construct Amazon Resource Names (ARNs)." - http://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html + AWS docs: + "The AWS account ID is a 12-digit number, such as 123456789012, that you use to construct Amazon Resource Names (ARNs)." + http://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html """ for user_id in user_ids: if len(user_id) != 12 or not user_id.isdigit(): @@ -1450,6 +1571,19 @@ class AmiBackend(object): return True + def register_image(self, name=None, description=None): + ami_id = random_ami_id() + ami = Ami( + self, + ami_id, + instance=None, + source_ami=None, + name=name, + description=description, + ) + self.amis[ami_id] = ami + return ami + def remove_launch_permission(self, ami_id, user_ids=None, group=None): ami = self.describe_images(ami_ids=[ami_id])[0] self.validate_permission_targets(user_ids=user_ids, group=group) @@ -1465,9 +1599,10 @@ class AmiBackend(object): class Region(object): - def __init__(self, name, endpoint): + def __init__(self, name, endpoint, opt_in_status): self.name = name self.endpoint = endpoint + self.opt_in_status = opt_in_status class Zone(object): @@ -1478,15 +1613,57 @@ class Zone(object): class RegionsAndZonesBackend(object): + regions_opt_in_not_required = [ + "af-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ] + regions = [] for region in Session().get_available_regions("ec2"): - regions.append(Region(region, "ec2.{}.amazonaws.com".format(region))) + if region in regions_opt_in_not_required: + regions.append( + Region( + region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required" + ) + ) + else: + regions.append( + Region(region, "ec2.{}.amazonaws.com".format(region), "not-opted-in") + ) for region in Session().get_available_regions("ec2", partition_name="aws-us-gov"): - regions.append(Region(region, "ec2.{}.amazonaws.com".format(region))) + regions.append( + Region(region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required") + ) for region in Session().get_available_regions("ec2", partition_name="aws-cn"): - regions.append(Region(region, "ec2.{}.amazonaws.com.cn".format(region))) + regions.append( + Region( + region, "ec2.{}.amazonaws.com.cn".format(region), "opt-in-not-required" + ) + ) zones = { + "af-south-1": [ + Zone(region_name="af-south-1", name="af-south-1a", zone_id="afs1-az1"), + Zone(region_name="af-south-1", name="af-south-1b", zone_id="afs1-az2"), + Zone(region_name="af-south-1", name="af-south-1c", zone_id="afs1-az3"), + ], "ap-south-1": [ Zone(region_name="ap-south-1", name="ap-south-1a", zone_id="aps1-az1"), Zone(region_name="ap-south-1", name="ap-south-1b", zone_id="aps1-az3"), @@ -1599,6 +1776,11 @@ class RegionsAndZonesBackend(object): Zone(region_name="eu-central-1", name="eu-central-1b", zone_id="euc1-az3"), Zone(region_name="eu-central-1", name="eu-central-1c", zone_id="euc1-az1"), ], + "eu-south-1": [ + Zone(region_name="eu-south-1", name="eu-south-1a", zone_id="eus1-az1"), + Zone(region_name="eu-south-1", name="eu-south-1b", zone_id="eus1-az2"), + Zone(region_name="eu-south-1", name="eu-south-1c", zone_id="eus1-az3"), + ], "us-east-1": [ Zone(region_name="us-east-1", name="us-east-1a", zone_id="use1-az6"), Zone(region_name="us-east-1", name="us-east-1b", zone_id="use1-az1"), @@ -1716,18 +1898,35 @@ class SecurityRule(object): return True -class SecurityGroup(TaggedEC2Resource): +class SecurityGroup(TaggedEC2Resource, CloudFormationModel): def __init__(self, ec2_backend, group_id, name, description, vpc_id=None): self.ec2_backend = ec2_backend self.id = group_id self.name = name self.description = description self.ingress_rules = [] - self.egress_rules = [SecurityRule("-1", None, None, ["0.0.0.0/0"], [])] + self.egress_rules = [ + SecurityRule("-1", None, None, [{"CidrIp": "0.0.0.0/0"}], []) + ] self.enis = {} self.vpc_id = vpc_id self.owner_id = OWNER_ID + # Append default IPv6 egress rule for VPCs with IPv6 support + if vpc_id: + vpc = self.ec2_backend.vpcs.get(vpc_id) + if vpc and len(vpc.get_cidr_block_association_set(ipv6=True)) > 0: + self.egress_rules.append(SecurityRule("-1", None, None, [], [])) + + @staticmethod + def cloudformation_name_type(): + return "GroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-securitygroup.html + return "AWS::EC2::SecurityGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -1958,11 +2157,16 @@ class SecurityGroupBackend(object): vpc_id=None, ): group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id) - if ip_ranges and not isinstance(ip_ranges, list): - ip_ranges = [ip_ranges] + if ip_ranges: + if isinstance(ip_ranges, str) or ( + six.PY2 and isinstance(ip_ranges, unicode) # noqa + ): + ip_ranges = [{"CidrIp": str(ip_ranges)}] + elif not isinstance(ip_ranges, list): + ip_ranges = [json.loads(ip_ranges)] if ip_ranges: for cidr in ip_ranges: - if not is_valid_cidr(cidr): + if not is_valid_cidr(cidr["CidrIp"]): raise InvalidCIDRSubnetError(cidr=cidr) self._verify_group_will_respect_rule_count_limit( @@ -2040,10 +2244,14 @@ class SecurityGroupBackend(object): group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id) if ip_ranges and not isinstance(ip_ranges, list): - ip_ranges = [ip_ranges] + + if isinstance(ip_ranges, str) and "CidrIp" not in ip_ranges: + ip_ranges = [{"CidrIp": ip_ranges}] + else: + ip_ranges = [json.loads(ip_ranges)] if ip_ranges: for cidr in ip_ranges: - if not is_valid_cidr(cidr): + if not is_valid_cidr(cidr["CidrIp"]): raise InvalidCIDRSubnetError(cidr=cidr) self._verify_group_will_respect_rule_count_limit( @@ -2099,6 +2307,13 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) + # I don't believe this is required after changing the default egress rule + # to be {'CidrIp': '0.0.0.0/0'} instead of just '0.0.0.0/0' + # Not sure why this would return only the IP if it was 0.0.0.0/0 instead of + # the ip_range? + # for ip in ip_ranges: + # ip_ranges = [ip.get("CidrIp") if ip.get("CidrIp") == "0.0.0.0/0" else ip] + security_rule = SecurityRule( ip_protocol, from_port, to_port, ip_ranges, source_groups ) @@ -2127,11 +2342,20 @@ class SecurityGroupBackend(object): raise RulesPerSecurityGroupLimitExceededError -class SecurityGroupIngress(object): +class SecurityGroupIngress(CloudFormationModel): def __init__(self, security_group, properties): self.security_group = security_group self.properties = properties + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-securitygroupingress.html + return "AWS::EC2::SecurityGroupIngress" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -2195,7 +2419,7 @@ class SecurityGroupIngress(object): return cls(security_group, properties) -class VolumeAttachment(object): +class VolumeAttachment(CloudFormationModel): def __init__(self, volume, instance, device, status): self.volume = volume self.attach_time = utc_date_and_time() @@ -2203,6 +2427,15 @@ class VolumeAttachment(object): self.device = device self.status = status + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-volumeattachment.html + return "AWS::EC2::VolumeAttachment" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -2221,9 +2454,16 @@ class VolumeAttachment(object): return attachment -class Volume(TaggedEC2Resource): +class Volume(TaggedEC2Resource, CloudFormationModel): def __init__( - self, ec2_backend, volume_id, size, zone, snapshot_id=None, encrypted=False + self, + ec2_backend, + volume_id, + size, + zone, + snapshot_id=None, + encrypted=False, + kms_key_id=None, ): self.id = volume_id self.size = size @@ -2233,6 +2473,16 @@ class Volume(TaggedEC2Resource): self.snapshot_id = snapshot_id self.ec2_backend = ec2_backend self.encrypted = encrypted + self.kms_key_id = kms_key_id + + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-volume.html + return "AWS::EC2::Volume" @classmethod def create_from_cloudformation_json( @@ -2301,6 +2551,7 @@ class Snapshot(TaggedEC2Resource): self.description = description self.start_time = utc_date_and_time() self.create_volume_permission_groups = set() + self.create_volume_permission_userids = set() self.ec2_backend = ec2_backend self.status = "completed" self.encrypted = encrypted @@ -2336,7 +2587,13 @@ class EBSBackend(object): self.snapshots = {} super(EBSBackend, self).__init__() - def create_volume(self, size, zone_name, snapshot_id=None, encrypted=False): + def create_volume( + self, size, zone_name, snapshot_id=None, encrypted=False, kms_key_id=None + ): + if kms_key_id and not encrypted: + raise InvalidParameterDependency("KmsKeyId", "Encrypted") + if encrypted and not kms_key_id: + kms_key_id = self._get_default_encryption_key() volume_id = random_volume_id() zone = self.get_zone_by_name(zone_name) if snapshot_id: @@ -2345,7 +2602,7 @@ class EBSBackend(object): size = snapshot.volume.size if snapshot.encrypted: encrypted = snapshot.encrypted - volume = Volume(self, volume_id, size, zone, snapshot_id, encrypted) + volume = Volume(self, volume_id, size, zone, snapshot_id, encrypted, kms_key_id) self.volumes[volume_id] = volume return volume @@ -2368,10 +2625,15 @@ class EBSBackend(object): def delete_volume(self, volume_id): if volume_id in self.volumes: + volume = self.volumes[volume_id] + if volume.attachment: + raise VolumeInUseError(volume_id, volume.attachment.instance.id) return self.volumes.pop(volume_id) raise InvalidVolumeIdError(volume_id) - def attach_volume(self, volume_id, instance_id, device_path): + def attach_volume( + self, volume_id, instance_id, device_path, delete_on_termination=False + ): volume = self.get_volume(volume_id) instance = self.get_instance(instance_id) @@ -2385,17 +2647,25 @@ class EBSBackend(object): status=volume.status, size=volume.size, attach_time=utc_date_and_time(), + delete_on_termination=delete_on_termination, ) instance.block_device_mapping[device_path] = bdt return volume.attachment def detach_volume(self, volume_id, instance_id, device_path): volume = self.get_volume(volume_id) - self.get_instance(instance_id) + instance = self.get_instance(instance_id) old_attachment = volume.attachment if not old_attachment: raise InvalidVolumeAttachmentError(volume_id, instance_id) + device_path = device_path or old_attachment.device + + try: + del instance.block_device_mapping[device_path] + except KeyError: + raise InvalidVolumeDetachmentError(volume_id, instance_id, device_path) + old_attachment.status = "detached" volume.attachment = None @@ -2452,32 +2722,55 @@ class EBSBackend(object): snapshot = self.get_snapshot(snapshot_id) return snapshot.create_volume_permission_groups - def add_create_volume_permission(self, snapshot_id, user_id=None, group=None): - if user_id: - self.raise_not_implemented_error( - "The UserId parameter for ModifySnapshotAttribute" - ) - - if group != "all": - raise InvalidAMIAttributeItemValueError("UserGroup", group) + def get_create_volume_permission_userids(self, snapshot_id): snapshot = self.get_snapshot(snapshot_id) - snapshot.create_volume_permission_groups.add(group) + return snapshot.create_volume_permission_userids + + def add_create_volume_permission(self, snapshot_id, user_ids=None, groups=None): + snapshot = self.get_snapshot(snapshot_id) + if user_ids: + snapshot.create_volume_permission_userids.update(user_ids) + + if groups and groups != ["all"]: + raise InvalidAMIAttributeItemValueError("UserGroup", groups) + else: + snapshot.create_volume_permission_groups.update(groups) + return True - def remove_create_volume_permission(self, snapshot_id, user_id=None, group=None): - if user_id: - self.raise_not_implemented_error( - "The UserId parameter for ModifySnapshotAttribute" - ) - - if group != "all": - raise InvalidAMIAttributeItemValueError("UserGroup", group) + def remove_create_volume_permission(self, snapshot_id, user_ids=None, groups=None): snapshot = self.get_snapshot(snapshot_id) - snapshot.create_volume_permission_groups.discard(group) + if user_ids: + snapshot.create_volume_permission_userids.difference_update(user_ids) + + if groups and groups != ["all"]: + raise InvalidAMIAttributeItemValueError("UserGroup", groups) + else: + snapshot.create_volume_permission_groups.difference_update(groups) + return True + def _get_default_encryption_key(self): + # https://aws.amazon.com/kms/features/#AWS_Service_Integration + # An AWS managed CMK is created automatically when you first create + # an encrypted resource using an AWS service integrated with KMS. + kms = kms_backends[self.region_name] + ebs_alias = "alias/aws/ebs" + if not kms.alias_exists(ebs_alias): + key = kms.create_key( + policy="", + key_usage="ENCRYPT_DECRYPT", + customer_master_key_spec="SYMMETRIC_DEFAULT", + description="Default master key that protects my EBS volumes when no other key is defined", + tags=None, + region=self.region_name, + ) + kms.add_alias(key.id, ebs_alias) + ebs_key = kms.describe_key(ebs_alias) + return ebs_key.arn -class VPC(TaggedEC2Resource): + +class VPC(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, @@ -2510,6 +2803,15 @@ class VPC(TaggedEC2Resource): amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_block, ) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-vpc.html + return "AWS::EC2::VPC" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -2646,6 +2948,7 @@ class VPCBackend(object): def __init__(self): self.vpcs = {} + self.vpc_end_points = {} self.vpc_refs[self.__class__].add(weakref.ref(self)) super(VPCBackend, self).__init__() @@ -2661,6 +2964,7 @@ class VPCBackend(object): cidr_block, instance_tenancy="default", amazon_provided_ipv6_cidr_block=False, + tags=[], ): vpc_id = random_vpc_id() try: @@ -2679,6 +2983,12 @@ class VPCBackend(object): instance_tenancy, amazon_provided_ipv6_cidr_block, ) + + for tag in tags: + tag_key = tag.get("Key") + tag_value = tag.get("Value") + vpc.add_tag(tag_key, tag_value) + self.vpcs[vpc_id] = vpc # AWS creates a default main route table and security group. @@ -2788,6 +3098,81 @@ class VPCBackend(object): vpc = self.get_vpc(vpc_id) return vpc.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block) + def create_vpc_endpoint( + self, + vpc_id, + service_name, + type=None, + policy_document=False, + route_table_ids=None, + subnet_ids=[], + network_interface_ids=[], + dns_entries=None, + client_token=None, + security_group=None, + tag_specifications=None, + private_dns_enabled=None, + ): + + vpc_endpoint_id = generate_vpc_end_point_id(vpc_id) + + # validates if vpc is present or not. + self.get_vpc(vpc_id) + + if type and type.lower() == "interface": + + network_interface_ids = [] + for subnet_id in subnet_ids: + self.get_subnet(subnet_id) + eni = self.create_network_interface(subnet_id, random_private_ip()) + network_interface_ids.append(eni.id) + + dns_entries = create_dns_entries(service_name, vpc_endpoint_id) + + else: + # considering gateway if type is not mentioned. + service_destination_cidr = randor_ipv4_cidr() + + for route_table_id in route_table_ids: + self.create_route(route_table_id, service_destination_cidr) + if dns_entries: + dns_entries = [dns_entries] + + vpc_end_point = VPCEndPoint( + vpc_endpoint_id, + vpc_id, + service_name, + type, + policy_document, + route_table_ids, + subnet_ids, + network_interface_ids, + dns_entries, + client_token, + security_group, + tag_specifications, + private_dns_enabled, + ) + + self.vpc_end_points[vpc_endpoint_id] = vpc_end_point + + return vpc_end_point + + def get_vpc_end_point_services(self): + vpc_end_point_services = self.vpc_end_points.values() + + services = [] + for value in vpc_end_point_services: + services.append(value.service_name) + + availability_zones = EC2Backend.describe_availability_zones(self) + + return { + "servicesDetails": vpc_end_point_services, + "services": services, + "availability_zones": availability_zones, + } + class VPCPeeringConnectionStatus(object): def __init__(self, code="initiating-request", message=""): @@ -2815,13 +3200,22 @@ class VPCPeeringConnectionStatus(object): self.message = "Inactive" -class VPCPeeringConnection(TaggedEC2Resource): +class VPCPeeringConnection(TaggedEC2Resource, CloudFormationModel): def __init__(self, vpc_pcx_id, vpc, peer_vpc): self.id = vpc_pcx_id self.vpc = vpc self.peer_vpc = peer_vpc self._status = VPCPeeringConnectionStatus() + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-vpcpeeringconnection.html + return "AWS::EC2::VPCPeeringConnection" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -2907,7 +3301,7 @@ class VPCPeeringConnectionBackend(object): return vpc_pcx -class Subnet(TaggedEC2Resource): +class Subnet(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, @@ -2942,6 +3336,16 @@ class Subnet(TaggedEC2Resource): ] # Reserved by AWS self._unused_ips = set() # if instance is destroyed hold IP here for reuse self._subnet_ips = {} # has IP: instance + self.state = "available" + + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-subnet.html + return "AWS::EC2::Subnet" @classmethod def create_from_cloudformation_json( @@ -3013,6 +3417,8 @@ class Subnet(TaggedEC2Resource): return self.availability_zone elif filter_name in ("defaultForAz", "default-for-az"): return self.default_for_az + elif filter_name == "state": + return self.state else: return super(Subnet, self).get_filter_value(filter_name, "DescribeSubnets") @@ -3078,24 +3484,43 @@ class SubnetBackend(object): return subnets[subnet_id] raise InvalidSubnetIdError(subnet_id) - def create_subnet(self, vpc_id, cidr_block, availability_zone, context=None): + def create_subnet( + self, + vpc_id, + cidr_block, + availability_zone=None, + availability_zone_id=None, + context=None, + tags=[], + ): subnet_id = random_subnet_id() vpc = self.get_vpc( vpc_id ) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's - vpc_cidr_block = ipaddress.IPv4Network( - six.text_type(vpc.cidr_block), strict=False - ) + vpc_cidr_blocks = [ + ipaddress.IPv4Network( + six.text_type(cidr_block_association["cidr_block"]), strict=False + ) + for cidr_block_association in vpc.get_cidr_block_association_set() + ] try: subnet_cidr_block = ipaddress.IPv4Network( six.text_type(cidr_block), strict=False ) except ValueError: raise InvalidCIDRBlockParameterError(cidr_block) - if not ( - vpc_cidr_block.network_address <= subnet_cidr_block.network_address - and vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address - ): + + subnet_in_vpc_cidr_range = False + for vpc_cidr_block in vpc_cidr_blocks: + if ( + vpc_cidr_block.network_address <= subnet_cidr_block.network_address + and vpc_cidr_block.broadcast_address + >= subnet_cidr_block.broadcast_address + ): + subnet_in_vpc_cidr_range = True + break + + if not subnet_in_vpc_cidr_range: raise InvalidSubnetRangeError(cidr_block) for subnet in self.get_all_subnets(filters={"vpc-id": vpc_id}): @@ -3106,15 +3531,25 @@ class SubnetBackend(object): # consider it the default default_for_az = str(availability_zone not in self.subnets).lower() map_public_ip_on_launch = default_for_az - if availability_zone is None: + + if availability_zone is None and not availability_zone_id: availability_zone = "us-east-1a" try: - availability_zone_data = next( - zone - for zones in RegionsAndZonesBackend.zones.values() - for zone in zones - if zone.name == availability_zone - ) + if availability_zone: + availability_zone_data = next( + zone + for zones in RegionsAndZonesBackend.zones.values() + for zone in zones + if zone.name == availability_zone + ) + elif availability_zone_id: + availability_zone_data = next( + zone + for zones in RegionsAndZonesBackend.zones.values() + for zone in zones + if zone.zone_id == availability_zone_id + ) + except StopIteration: raise InvalidAvailabilityZoneError( availability_zone, @@ -3138,6 +3573,11 @@ class SubnetBackend(object): assign_ipv6_address_on_creation=False, ) + for tag in tags: + tag_key = tag.get("Key") + tag_value = tag.get("Value") + subnet.add_tag(tag_key, tag_value) + # AWS associates a new subnet with the default Network ACL self.associate_default_network_acl_with_subnet(subnet_id, vpc_id) self.subnets[availability_zone][subnet_id] = subnet @@ -3170,11 +3610,306 @@ class SubnetBackend(object): raise InvalidParameterValueError(attr_name) -class SubnetRouteTableAssociation(object): +class FlowLogs(TaggedEC2Resource, CloudFormationModel): + def __init__( + self, + ec2_backend, + flow_log_id, + resource_id, + traffic_type, + log_destination, + log_group_name, + deliver_logs_permission_arn, + max_aggregation_interval, + log_destination_type, + log_format, + deliver_logs_status="SUCCESS", + deliver_logs_error_message=None, + ): + self.ec2_backend = ec2_backend + self.id = flow_log_id + self.resource_id = resource_id + self.traffic_type = traffic_type + self.log_destination = log_destination + self.log_group_name = log_group_name + self.deliver_logs_permission_arn = deliver_logs_permission_arn + self.deliver_logs_status = deliver_logs_status + self.deliver_logs_error_message = deliver_logs_error_message + self.max_aggregation_interval = max_aggregation_interval + self.log_destination_type = log_destination_type + self.log_format = log_format + + self.created_at = utc_date_and_time() + + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-flowlog.html + return "AWS::EC2::FlowLog" + + @classmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + + resource_type = properties.get("ResourceType") + resource_id = [properties.get("ResourceId")] + traffic_type = properties.get("TrafficType") + deliver_logs_permission_arn = properties.get("DeliverLogsPermissionArn") + log_destination_type = properties.get("LogDestinationType") + log_destination = properties.get("LogDestination") + log_group_name = properties.get("LogGroupName") + log_format = properties.get("LogFormat") + max_aggregation_interval = properties.get("MaxAggregationInterval") + + ec2_backend = ec2_backends[region_name] + flow_log, _ = ec2_backend.create_flow_logs( + resource_type, + resource_id, + traffic_type, + deliver_logs_permission_arn, + log_destination_type, + log_destination, + log_group_name, + log_format, + max_aggregation_interval, + ) + for tag in properties.get("Tags", []): + tag_key = tag["Key"] + tag_value = tag["Value"] + flow_log[0].add_tag(tag_key, tag_value) + + return flow_log[0] + + @property + def physical_resource_id(self): + return self.id + + def get_filter_value(self, filter_name): + """ + API Version 2016-11-15 defines the following filters for DescribeFlowLogs: + + * deliver-log-status + * log-destination-type + * flow-log-id + * log-group-name + * resource-id + * traffic-type + * tag:key=value + * tag-key + + Taken from: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeFlowLogs.html + """ + if filter_name == "resource-id": + return self.resource_id + elif filter_name == "traffic-type": + return self.traffic_type + elif filter_name == "log-destination-type": + return self.log_destination_type + elif filter_name == "flow-log-id": + return self.id + elif filter_name == "log-group-name": + return self.log_group_name + elif filter_name == "deliver-log-status": + return "SUCCESS" + else: + return super(FlowLogs, self).get_filter_value( + filter_name, "DescribeFlowLogs" + ) + + +class FlowLogsBackend(object): + def __init__(self): + self.flow_logs = defaultdict(dict) + super(FlowLogsBackend, self).__init__() + + def _validate_request( + self, + log_group_name, + log_destination, + log_destination_type, + max_aggregation_interval, + deliver_logs_permission_arn, + ): + if log_group_name is None and log_destination is None: + raise InvalidDependantParameterError( + "LogDestination", "LogGroupName", "not provided", + ) + + if log_destination_type == "s3": + if log_group_name is not None: + raise InvalidDependantParameterTypeError( + "LogDestination", "cloud-watch-logs", "LogGroupName", + ) + elif log_destination_type == "cloud-watch-logs": + if deliver_logs_permission_arn is None: + raise InvalidDependantParameterError( + "DeliverLogsPermissionArn", + "LogDestinationType", + "cloud-watch-logs", + ) + + if max_aggregation_interval not in ["60", "600"]: + raise InvalidAggregationIntervalParameterError( + "Flow Log Max Aggregation Interval" + ) + + def create_flow_logs( + self, + resource_type, + resource_ids, + traffic_type, + deliver_logs_permission_arn, + log_destination_type, + log_destination, + log_group_name, + log_format, + max_aggregation_interval, + ): + # Guess it's best to put it here due to possible + # lack of them in the CloudFormation template + max_aggregation_interval = ( + "600" if max_aggregation_interval is None else max_aggregation_interval + ) + log_destination_type = ( + "cloud-watch-logs" if log_destination_type is None else log_destination_type + ) + log_format = ( + "${version} ${account-id} ${interface-id} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${protocol} ${packets} ${bytes} ${start} ${end} ${action} ${log-status}" + if log_format is None + else log_format + ) + + # Validate the requests paremeters + self._validate_request( + log_group_name, + log_destination, + log_destination_type, + max_aggregation_interval, + deliver_logs_permission_arn, + ) + + flow_logs_set = [] + unsuccessful = [] + + for resource_id in resource_ids: + deliver_logs_status = "SUCCESS" + deliver_logs_error_message = None + flow_log_id = random_flow_log_id() + if resource_type == "VPC": + # Validate VPCs exist + self.get_vpc(resource_id) + elif resource_type == "Subnet": + # Validate Subnets exist + self.get_subnet(resource_id) + elif resource_type == "NetworkInterface": + # Validate NetworkInterfaces exist + self.get_network_interface(resource_id) + + if log_destination_type == "s3": + from moto.s3.models import s3_backend + from moto.s3.exceptions import MissingBucket + + arn = log_destination.split(":", 5)[5] + try: + s3_backend.get_bucket(arn) + except MissingBucket: + # Instead of creating FlowLog report + # the unsuccessful status for the + # given resource_id + unsuccessful.append( + ( + resource_id, + "400", + "LogDestination: {0} does not exist.".format(arn), + ) + ) + continue + elif log_destination_type == "cloud-watch-logs": + from moto.logs.models import logs_backends + from moto.logs.exceptions import ResourceNotFoundException + + # API allows to create a FlowLog with a + # non-existing LogGroup. It however later + # on reports the FAILED delivery status. + try: + # Need something easy to check the group exists. + # The list_tags_log_group seems to do the trick. + logs_backends[self.region_name].list_tags_log_group(log_group_name) + except ResourceNotFoundException: + deliver_logs_status = "FAILED" + deliver_logs_error_message = "Access error" + + all_flow_logs = self.describe_flow_logs() + if any( + fl.resource_id == resource_id + and ( + fl.log_group_name == log_group_name + or fl.log_destination == log_destination + ) + for fl in all_flow_logs + ): + raise FlowLogAlreadyExists() + flow_logs = FlowLogs( + self, + flow_log_id, + resource_id, + traffic_type, + log_destination, + log_group_name, + deliver_logs_permission_arn, + max_aggregation_interval, + log_destination_type, + log_format, + deliver_logs_status, + deliver_logs_error_message, + ) + self.flow_logs[flow_log_id] = flow_logs + flow_logs_set.append(flow_logs) + + return flow_logs_set, unsuccessful + + def describe_flow_logs(self, flow_log_ids=None, filters=None): + matches = itertools.chain([i for i in self.flow_logs.values()]) + if flow_log_ids: + matches = [flow_log for flow_log in matches if flow_log.id in flow_log_ids] + if filters: + matches = generic_filter(filters, matches) + return matches + + def delete_flow_logs(self, flow_log_ids): + non_existing = [] + for flow_log in flow_log_ids: + if flow_log in self.flow_logs: + self.flow_logs.pop(flow_log, None) + else: + non_existing.append(flow_log) + + if non_existing: + raise InvalidFlowLogIdError( + len(flow_log_ids), " ".join(x for x in flow_log_ids), + ) + return True + + +class SubnetRouteTableAssociation(CloudFormationModel): def __init__(self, route_table_id, subnet_id): self.route_table_id = route_table_id self.subnet_id = subnet_id + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-subnetroutetableassociation.html + return "AWS::EC2::SubnetRouteTableAssociation" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3204,7 +3939,7 @@ class SubnetRouteTableAssociationBackend(object): return subnet_association -class RouteTable(TaggedEC2Resource): +class RouteTable(TaggedEC2Resource, CloudFormationModel): def __init__(self, ec2_backend, route_table_id, vpc_id, main=False): self.ec2_backend = ec2_backend self.id = route_table_id @@ -3213,6 +3948,15 @@ class RouteTable(TaggedEC2Resource): self.associations = {} self.routes = {} + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-routetable.html + return "AWS::EC2::RouteTable" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3257,10 +4001,12 @@ class RouteTableBackend(object): self.route_tables = {} super(RouteTableBackend, self).__init__() - def create_route_table(self, vpc_id, main=False): + def create_route_table(self, vpc_id, tags=[], main=False): route_table_id = random_route_table_id() vpc = self.get_vpc(vpc_id) # Validate VPC exists route_table = RouteTable(self, route_table_id, vpc_id, main=main) + for tag in tags: + route_table.add_tag(tag.get("Key"), tag.get("Value")) self.route_tables[route_table_id] = route_table # AWS creates a default local route. @@ -3348,11 +4094,12 @@ class RouteTableBackend(object): return self.associate_route_table(route_table_id, subnet_id) -class Route(object): +class Route(CloudFormationModel): def __init__( self, route_table, destination_cidr_block, + destination_ipv6_cidr_block, local=False, gateway=None, instance=None, @@ -3360,9 +4107,12 @@ class Route(object): interface=None, vpc_pcx=None, ): - self.id = generate_route_id(route_table.id, destination_cidr_block) + self.id = generate_route_id( + route_table.id, destination_cidr_block, destination_ipv6_cidr_block + ) self.route_table = route_table self.destination_cidr_block = destination_cidr_block + self.destination_ipv6_cidr_block = destination_ipv6_cidr_block self.local = local self.gateway = gateway self.instance = instance @@ -3370,6 +4120,15 @@ class Route(object): self.interface = interface self.vpc_pcx = vpc_pcx + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-route.html + return "AWS::EC2::Route" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3396,6 +4155,39 @@ class Route(object): return route_table +class VPCEndPoint(TaggedEC2Resource): + def __init__( + self, + id, + vpc_id, + service_name, + type=None, + policy_document=False, + route_table_ids=None, + subnet_ids=None, + network_interface_ids=None, + dns_entries=None, + client_token=None, + security_group=None, + tag_specifications=None, + private_dns_enabled=None, + ): + self.id = id + self.vpc_id = vpc_id + self.service_name = service_name + self.type = type + self.policy_document = policy_document + self.route_table_ids = route_table_ids + self.network_interface_ids = network_interface_ids + self.subnet_ids = subnet_ids + self.client_token = client_token + self.security_group = security_group + self.tag_specifications = tag_specifications + self.private_dns_enabled = private_dns_enabled + self.created_at = datetime.utcnow() + self.dns_entries = dns_entries + + class RouteBackend(object): def __init__(self): super(RouteBackend, self).__init__() @@ -3404,6 +4196,7 @@ class RouteBackend(object): self, route_table_id, destination_cidr_block, + destination_ipv6_cidr_block=None, local=False, gateway_id=None, instance_id=None, @@ -3411,30 +4204,37 @@ class RouteBackend(object): interface_id=None, vpc_peering_connection_id=None, ): + gateway = None + nat_gateway = None + route_table = self.get_route_table(route_table_id) if interface_id: - self.raise_not_implemented_error("CreateRoute to NetworkInterfaceId") + # for validating interface Id whether it is valid or not. + self.get_network_interface(interface_id) - gateway = None - if gateway_id: - if EC2_RESOURCE_TO_PREFIX["vpn-gateway"] in gateway_id: - gateway = self.get_vpn_gateway(gateway_id) - elif EC2_RESOURCE_TO_PREFIX["internet-gateway"] in gateway_id: - gateway = self.get_internet_gateway(gateway_id) + else: + if gateway_id: + if EC2_RESOURCE_TO_PREFIX["vpn-gateway"] in gateway_id: + gateway = self.get_vpn_gateway(gateway_id) + elif EC2_RESOURCE_TO_PREFIX["internet-gateway"] in gateway_id: + gateway = self.get_internet_gateway(gateway_id) - try: - ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False) - except ValueError: - raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) + try: + if destination_cidr_block: + ipaddress.IPv4Network( + six.text_type(destination_cidr_block), strict=False + ) + except ValueError: + raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) - nat_gateway = None - if nat_gateway_id is not None: - nat_gateway = self.nat_gateways.get(nat_gateway_id) + if nat_gateway_id is not None: + nat_gateway = self.nat_gateways.get(nat_gateway_id) route = Route( route_table, destination_cidr_block, + destination_ipv6_cidr_block, local=local, gateway=gateway, instance=self.get_instance(instance_id) if instance_id else None, @@ -3495,12 +4295,21 @@ class RouteBackend(object): return deleted -class InternetGateway(TaggedEC2Resource): +class InternetGateway(TaggedEC2Resource, CloudFormationModel): def __init__(self, ec2_backend): self.ec2_backend = ec2_backend self.id = random_internet_gateway_id() self.vpc = None + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-internetgateway.html + return "AWS::EC2::InternetGateway" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3525,8 +4334,10 @@ class InternetGatewayBackend(object): self.internet_gateways = {} super(InternetGatewayBackend, self).__init__() - def create_internet_gateway(self): + def create_internet_gateway(self, tags=[]): igw = InternetGateway(self) + for tag in tags: + igw.add_tag(tag.get("Key"), tag.get("Value")) self.internet_gateways[igw.id] = igw return igw @@ -3573,11 +4384,20 @@ class InternetGatewayBackend(object): return self.describe_internet_gateways(internet_gateway_ids=igw_ids)[0] -class VPCGatewayAttachment(BaseModel): +class VPCGatewayAttachment(CloudFormationModel): def __init__(self, gateway_id, vpc_id): self.gateway_id = gateway_id self.vpc_id = vpc_id + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-vpcgatewayattachment.html + return "AWS::EC2::VPCGatewayAttachment" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3798,7 +4618,7 @@ class SpotFleetLaunchSpec(object): self.weighted_capacity = float(weighted_capacity) -class SpotFleetRequest(TaggedEC2Resource): +class SpotFleetRequest(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, @@ -3847,6 +4667,15 @@ class SpotFleetRequest(TaggedEC2Resource): def physical_resource_id(self): return self.id + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-spotfleet.html + return "AWS::EC2::SpotFleet" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -4070,18 +4899,29 @@ class SpotFleetBackend(object): return True -class ElasticAddress(object): - def __init__(self, domain, address=None): +class ElasticAddress(TaggedEC2Resource, CloudFormationModel): + def __init__(self, ec2_backend, domain, address=None): + self.ec2_backend = ec2_backend if address: self.public_ip = address else: self.public_ip = random_ip() self.allocation_id = random_eip_allocation_id() if domain == "vpc" else None + self.id = self.allocation_id self.domain = domain self.instance = None self.eni = None self.association_id = None + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-eip.html + return "AWS::EC2::EIP" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -4129,9 +4969,13 @@ class ElasticAddress(object): return self.eni.private_ip_address elif filter_name == "public-ip": return self.public_ip - else: + elif filter_name == "network-interface-owner-id": # TODO: implement network-interface-owner-id raise FilterNotImplementedError(filter_name, "DescribeAddresses") + else: + return super(ElasticAddress, self).get_filter_value( + filter_name, "DescribeAddresses" + ) class ElasticAddressBackend(object): @@ -4143,9 +4987,9 @@ class ElasticAddressBackend(object): if domain not in ["standard", "vpc"]: raise InvalidDomainError(domain) if address: - address = ElasticAddress(domain, address) + address = ElasticAddress(self, domain=domain, address=address) else: - address = ElasticAddress(domain) + address = ElasticAddress(self, domain=domain) self.addresses.append(address) return address @@ -4491,10 +5335,12 @@ class NetworkAclBackend(object): raise InvalidNetworkAclIdError(network_acl_id) return network_acl - def create_network_acl(self, vpc_id, default=False): + def create_network_acl(self, vpc_id, tags=[], default=False): network_acl_id = random_network_acl_id() self.get_vpc(vpc_id) network_acl = NetworkAcl(self, network_acl_id, vpc_id, default) + for tag in tags: + network_acl.add_tag(tag.get("Key"), tag.get("Value")) self.network_acls[network_acl_id] = network_acl if default: self.add_default_entries(network_acl_id) @@ -4522,23 +5368,7 @@ class NetworkAclBackend(object): ) def get_all_network_acls(self, network_acl_ids=None, filters=None): - network_acls = self.network_acls.values() - - if network_acl_ids: - network_acls = [ - network_acl - for network_acl in network_acls - if network_acl.id in network_acl_ids - ] - if len(network_acls) != len(network_acl_ids): - invalid_id = list( - set(network_acl_ids).difference( - set([network_acl.id for network_acl in network_acls]) - ) - )[0] - raise InvalidRouteTableIdError(invalid_id) - - return generic_filter(filters, network_acls) + self.describe_network_acls(network_acl_ids, filters) def delete_network_acl(self, network_acl_id): deleted = self.network_acls.pop(network_acl_id, None) @@ -4658,6 +5488,25 @@ class NetworkAclBackend(object): self, association_id, subnet_id, acl.id ) + def describe_network_acls(self, network_acl_ids=None, filters=None): + network_acls = self.network_acls.values() + + if network_acl_ids: + network_acls = [ + network_acl + for network_acl in network_acls + if network_acl.id in network_acl_ids + ] + if len(network_acls) != len(network_acl_ids): + invalid_id = list( + set(network_acl_ids).difference( + set([network_acl.id for network_acl in network_acls]) + ) + )[0] + raise InvalidRouteTableIdError(invalid_id) + + return generic_filter(filters, network_acls) + class NetworkAclAssociation(object): def __init__(self, ec2_backend, new_association_id, subnet_id, network_acl_id): @@ -4730,6 +5579,14 @@ class VpnGateway(TaggedEC2Resource): super(VpnGateway, self).__init__() def get_filter_value(self, filter_name): + if filter_name == "attachment.vpc-id": + return self.attachments.keys() + elif filter_name == "attachment.state": + return [attachment.state for attachment in self.attachments.values()] + elif filter_name == "vpn-gateway-id": + return self.id + elif filter_name == "type": + return self.type return super(VpnGateway, self).get_filter_value( filter_name, "DescribeVpnGateways" ) @@ -4831,7 +5688,7 @@ class CustomerGatewayBackend(object): return deleted -class NatGateway(object): +class NatGateway(CloudFormationModel): def __init__(self, backend, subnet_id, allocation_id): # public properties self.id = random_nat_gateway_id() @@ -4869,6 +5726,15 @@ class NatGateway(object): eips = self._backend.address_by_allocation([self.allocation_id]) return eips[0].public_ip + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-natgateway.html + return "AWS::EC2::NatGateway" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -4934,6 +5800,22 @@ class LaunchTemplateVersion(object): self.description = description self.create_time = utc_date_and_time() + @property + def image_id(self): + return self.data.get("ImageId", "") + + @property + def instance_type(self): + return self.data.get("InstanceType", "") + + @property + def security_groups(self): + return self.data.get("SecurityGroups", []) + + @property + def user_data(self): + return self.data.get("UserData", "") + class LaunchTemplate(TaggedEC2Resource): def __init__(self, backend, name, template_data, version_description): @@ -5015,6 +5897,121 @@ class LaunchTemplateBackend(object): return generic_filter(filters, templates) +class IamInstanceProfileAssociation(CloudFormationModel): + def __init__(self, ec2_backend, association_id, instance, iam_instance_profile): + self.ec2_backend = ec2_backend + self.id = association_id + self.instance = instance + self.iam_instance_profile = iam_instance_profile + self.state = "associated" + + +class IamInstanceProfileAssociationBackend(object): + def __init__(self): + self.iam_instance_profile_associations = {} + super(IamInstanceProfileAssociationBackend, self).__init__() + + def associate_iam_instance_profile( + self, + instance_id, + iam_instance_profile_name=None, + iam_instance_profile_arn=None, + ): + iam_association_id = random_iam_instance_profile_association_id() + + instance_profile = filter_iam_instance_profiles( + iam_instance_profile_arn, iam_instance_profile_name + ) + + if instance_id in self.iam_instance_profile_associations.keys(): + raise IncorrectStateIamProfileAssociationError(instance_id) + + iam_instance_profile_associations = IamInstanceProfileAssociation( + self, + iam_association_id, + self.get_instance(instance_id) if instance_id else None, + instance_profile, + ) + # Regarding to AWS there can be only one association with ec2. + self.iam_instance_profile_associations[ + instance_id + ] = iam_instance_profile_associations + return iam_instance_profile_associations + + def describe_iam_instance_profile_associations( + self, association_ids, filters=None, max_results=100, next_token=None + ): + associations_list = [] + if association_ids: + for association in self.iam_instance_profile_associations.values(): + if association.id in association_ids: + associations_list.append(association) + else: + # That's mean that no association id were given. Showing all. + associations_list.extend(self.iam_instance_profile_associations.values()) + + associations_list = filter_iam_instance_profile_associations( + associations_list, filters + ) + + starting_point = int(next_token or 0) + ending_point = starting_point + int(max_results or 100) + associations_page = associations_list[starting_point:ending_point] + new_next_token = ( + str(ending_point) if ending_point < len(associations_list) else None + ) + + return associations_page, new_next_token + + def disassociate_iam_instance_profile(self, association_id): + iam_instance_profile_associations = None + for association_key in self.iam_instance_profile_associations.keys(): + if ( + self.iam_instance_profile_associations[association_key].id + == association_id + ): + iam_instance_profile_associations = self.iam_instance_profile_associations[ + association_key + ] + del self.iam_instance_profile_associations[association_key] + # Deleting once and avoiding `RuntimeError: dictionary changed size during iteration` + break + + if not iam_instance_profile_associations: + raise InvalidAssociationIDIamProfileAssociationError(association_id) + + return iam_instance_profile_associations + + def replace_iam_instance_profile_association( + self, + association_id, + iam_instance_profile_name=None, + iam_instance_profile_arn=None, + ): + instance_profile = filter_iam_instance_profiles( + iam_instance_profile_arn, iam_instance_profile_name + ) + + iam_instance_profile_association = None + for association_key in self.iam_instance_profile_associations.keys(): + if ( + self.iam_instance_profile_associations[association_key].id + == association_id + ): + self.iam_instance_profile_associations[ + association_key + ].iam_instance_profile = instance_profile + iam_instance_profile_association = self.iam_instance_profile_associations[ + association_key + ] + break + + if not iam_instance_profile_association: + raise InvalidAssociationIDIamProfileAssociationError(association_id) + + return iam_instance_profile_association + + class EC2Backend( BaseBackend, InstanceBackend, @@ -5026,6 +6023,7 @@ class EC2Backend( VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend, + FlowLogsBackend, NetworkInterfaceBackend, VPNConnectionBackend, VPCPeeringConnectionBackend, @@ -5043,6 +6041,7 @@ class EC2Backend( CustomerGatewayBackend, NatGatewayBackend, LaunchTemplateBackend, + IamInstanceProfileAssociationBackend, ): def __init__(self, region_name): self.region_name = region_name @@ -5129,6 +6128,13 @@ class EC2Backend( self.describe_vpn_connections(vpn_connection_ids=[resource_id]) elif resource_prefix == EC2_RESOURCE_TO_PREFIX["vpn-gateway"]: self.get_vpn_gateway(vpn_gateway_id=resource_id) + elif ( + resource_prefix + == EC2_RESOURCE_TO_PREFIX["iam-instance-profile-association"] + ): + self.describe_iam_instance_profile_associations( + association_ids=[resource_id] + ) return True diff --git a/moto/ec2/resources/instance_types.json b/moto/ec2/resources/instance_types.json index 2fa2e4e93..a1b55ba21 100644 --- a/moto/ec2/resources/instance_types.json +++ b/moto/ec2/resources/instance_types.json @@ -1 +1 @@ -{"m1.xlarge": {"ecu_per_vcpu": 2.0, "network_perf": 9.0, "intel_avx": "", "name": "M1 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.xlarge", "computeunits": 8.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "i3.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3800.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.4xlarge", "computeunits": 53.0, "ebs_throughput": 400.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "i2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "", "name": "I2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 800.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "hs1.8xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "High Storage Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 48000.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hs1.8xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 117.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.micro": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Micro", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.micro", "computeunits": 0.1, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.4xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 24000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.4xlarge", "computeunits": 56.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "m2.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "", "name": "M2 High Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 420.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.xlarge", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 17.1, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "p2.xlarge": {"ecu_per_vcpu": 3.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "General Purpose GPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.xlarge", "computeunits": 12.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 61.0, "ebs_max_bandwidth": 750.0, "gpus": 1, "ipv6_support": true}, "i2.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 9.0, "intel_avx": "", "name": "I2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3200.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.4xlarge", "computeunits": 53.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t1.micro": {"ecu_per_vcpu": 0.0, "network_perf": 0.0, "intel_avx": "", "name": "T1 Micro", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "t1.micro", "computeunits": 0.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 4, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.613, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "d2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "D2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.xlarge", "computeunits": 14.0, "ebs_throughput": 93.75, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "r3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "I3 High I/O Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 7600.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.8xlarge", "computeunits": 99.0, "ebs_throughput": 850.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "c3.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "g2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.medium": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Medium", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.medium", "computeunits": 0.4, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 18, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 4.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.xlarge", "computeunits": 13.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "x1.16xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 13.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1920.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.16xlarge", "computeunits": 174.5, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "p2.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "General Purpose GPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.8xlarge", "computeunits": 94.0, "ebs_throughput": 625.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 488.0, "ebs_max_bandwidth": 5000.0, "gpus": 8, "ipv6_support": true}, "f1.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "F1 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3760.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 400, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 8, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r4.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "R4 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.8xlarge", "computeunits": 99.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 37500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "g3.4xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 11.0, "intel_avx": "Yes", "name": "G3 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.4xlarge", "computeunits": 47.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 20000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 1, "ipv6_support": true}, "cg1.4xlarge": {"ecu_per_vcpu": 2.09375, "network_perf": 12.0, "intel_avx": "", "name": "Cluster GPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cg1.4xlarge", "computeunits": 33.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 22.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.large": {"ecu_per_vcpu": 4.0, "network_perf": 7.0, "intel_avx": "Yes", "name": "C4 High-CPU Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.large", "computeunits": 8.0, "ebs_throughput": 62.5, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "m4.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "M4 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 256.0, "ebs_max_bandwidth": 10000.0, "gpus": 0, "ipv6_support": true}, "r4.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.4xlarge", "computeunits": 53.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 18750.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "r4.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.2xlarge", "computeunits": 27.0, "ebs_throughput": 218.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1750.0, "gpus": 0, "ipv6_support": true}, "c3.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "C3 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "i3.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 475.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.large", "computeunits": 7.0, "ebs_throughput": 50.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 425.0, "gpus": 0, "ipv6_support": true}, "r4.xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.xlarge", "computeunits": 13.5, "ebs_throughput": 109.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 875.0, "gpus": 0, "ipv6_support": true}, "m2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "", "name": "M2 High Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 850.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.2xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 120, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 34.2, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "m3.medium": {"ecu_per_vcpu": 3.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Medium", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 4.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.medium", "computeunits": 3.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "r3.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.4xlarge", "computeunits": 52.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.small": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Small", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.small", "computeunits": 0.2, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 8, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 2.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "R3 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i3.16xlarge": {"ecu_per_vcpu": 3.125, "network_perf": 17.0, "intel_avx": "Yes", "name": "I3 High I/O 16xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 15200.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.16xlarge", "computeunits": 200.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "c3.large": {"ecu_per_vcpu": 3.5, "network_perf": 6.0, "intel_avx": "Yes", "name": "C3 High-CPU Large", "architecture": "32/64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.large", "computeunits": 7.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i2.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 7.0, "intel_avx": "", "name": "I2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1600.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.2xlarge", "computeunits": 27.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 950.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.xlarge", "computeunits": 13.0, "ebs_throughput": 100.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 850.0, "gpus": 0, "ipv6_support": true}, "i2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 13.0, "intel_avx": "", "name": "I2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6400.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r4.16xlarge": {"ecu_per_vcpu": 3.046875, "network_perf": 17.0, "intel_avx": "Yes", "name": "R4 High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.16xlarge", "computeunits": 195.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "g3.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "G3 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.8xlarge", "computeunits": 94.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 2, "ipv6_support": true}, "c3.4xlarge": {"ecu_per_vcpu": 3.4375, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.4xlarge", "computeunits": 55.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "r4.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.large", "computeunits": 7.0, "ebs_throughput": 54.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 437.0, "gpus": 0, "ipv6_support": true}, "f1.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "F1 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 470.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.2xlarge", "computeunits": 26.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 1, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 122.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "m4.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "m3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 120, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 30.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c3.8xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 12.0, "intel_avx": "Yes", "name": "C3 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.8xlarge", "computeunits": 108.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "cr1.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "High Memory Cluster Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cr1.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "cc2.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "Cluster Compute Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3360.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cc2.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m1.large": {"ecu_per_vcpu": 2.0, "network_perf": 7.0, "intel_avx": "", "name": "M1 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 840.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.large", "computeunits": 4.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 30, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "r3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "R3 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "g3.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "G3 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 4, "ipv6_support": true}, "m1.medium": {"ecu_per_vcpu": 2.0, "network_perf": 6.0, "intel_avx": "", "name": "M1 General Purpose Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 410.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.medium", "computeunits": 2.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "i3.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 1900.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.2xlarge", "computeunits": 27.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "t2.xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.xlarge", "computeunits": 0.9, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "g2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 60.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c1.medium": {"ecu_per_vcpu": 2.5, "network_perf": 6.0, "intel_avx": "", "name": "C1 High-CPU Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 350.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.medium", "computeunits": 5.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.large": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.large", "computeunits": 0.6, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 36, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 12000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "c4.8xlarge": {"ecu_per_vcpu": 3.66666666667, "network_perf": 13.0, "intel_avx": "Yes", "name": "C4 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.8xlarge", "computeunits": 132.0, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 60.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "c4.2xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.2xlarge", "computeunits": 31.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "x1e.32xlarge": {"ecu_per_vcpu": 2.65625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1E 32xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3840.0, "placement_group_support": false, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1e.32xlarge", "computeunits": 340.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 3904.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": false}, "m4.10xlarge": {"ecu_per_vcpu": 3.1125, "network_perf": 13.0, "intel_avx": "Yes", "name": "M4 Deca Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.10xlarge", "computeunits": 124.5, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 40.0, "memory": 160.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "t2.2xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.2xlarge", "computeunits": 1.35, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.4xlarge": {"ecu_per_vcpu": 3.34375, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.4xlarge", "computeunits": 53.5, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 64.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.nano": {"ecu_per_vcpu": 0.0, "network_perf": 2.0, "intel_avx": "Yes", "name": "T2 Nano", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.nano", "computeunits": 0.05, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.8xlarge": {"ecu_per_vcpu": 3.22222222222, "network_perf": 13.0, "intel_avx": "Yes", "name": "D2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 48000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.8xlarge", "computeunits": 116.0, "ebs_throughput": 500.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 244.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "m3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m2.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "", "name": "M2 High Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.4xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 68.4, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "m1.small": {"ecu_per_vcpu": 1.0, "network_perf": 2.0, "intel_avx": "", "name": "M1 General Purpose Small", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.small", "computeunits": 1.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 8, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c1.xlarge": {"ecu_per_vcpu": 2.5, "network_perf": 9.0, "intel_avx": "", "name": "C1 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.xlarge", "computeunits": 20.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 7.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "x1.32xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 32xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3840.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.32xlarge", "computeunits": 349.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 1952.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r3.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 12.0, "intel_avx": "Yes", "name": "R3 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.large": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "M4 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.large", "computeunits": 6.5, "ebs_throughput": 56.25, "vpc_only": true, "max_ips": 20, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3600.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 450.0, "gpus": 0, "ipv6_support": true}, "p2.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "General Purpose GPU 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 732.0, "ebs_max_bandwidth": 10000.0, "gpus": 16, "ipv6_support": true}, "hi1.4xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "HI1. High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 2048.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hi1.4xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.4xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.4xlarge", "computeunits": 62.0, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "c4.xlarge": {"ecu_per_vcpu": 4.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.xlarge", "computeunits": 16.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "m3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}} \ No newline at end of file +{"a1.2xlarge": {"apiname": "a1.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "A1 Double Extra Large", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "a1.4xlarge": {"apiname": "a1.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 32.0, "name": "A1 Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "a1.large": {"apiname": "a1.large", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 4.0, "name": "A1 Large", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "a1.medium": {"apiname": "a1.medium", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 8, "memory": 2.0, "name": "A1 Medium", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": true}, "a1.metal": {"apiname": "a1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 32.0, "name": "A1 Metal", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "a1.xlarge": {"apiname": "a1.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 8.0, "name": "A1 Extra Large", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "c1.medium": {"apiname": "c1.medium", "architecture": "32/64-bit", "clock_speed_ghz": "unknown", "computeunits": 5.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.5, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 12, "memory": 1.7, "name": "C1 High-CPU Medium", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 350.0, "vcpus": 2.0, "vpc_only": false}, "c1.xlarge": {"apiname": "c1.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 20.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 2.5, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 60, "memory": 7.0, "name": "C1 High-CPU Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 1680.0, "vcpus": 8.0, "vpc_only": false}, "c3.2xlarge": {"apiname": "c3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 28.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 60, "memory": 15.0, "name": "C3 High-CPU Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 160.0, "vcpus": 8.0, "vpc_only": false}, "c3.4xlarge": {"apiname": "c3.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 55.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.4375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 240, "memory": 30.0, "name": "C3 High-CPU Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 320.0, "vcpus": 16.0, "vpc_only": false}, "c3.8xlarge": {"apiname": "c3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 108.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 240, "memory": 60.0, "name": "C3 High-CPU Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 640.0, "vcpus": 32.0, "vpc_only": false}, "c3.large": {"apiname": "c3.large", "architecture": "32/64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 7.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 30, "memory": 3.75, "name": "C3 High-CPU Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 32.0, "vcpus": 2.0, "vpc_only": false}, "c3.xlarge": {"apiname": "c3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 14.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 60, "memory": 7.5, "name": "C3 High-CPU Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 80.0, "vcpus": 4.0, "vpc_only": false}, "c4.2xlarge": {"apiname": "c4.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 31.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 15.0, "name": "C4 High-CPU Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "c4.4xlarge": {"apiname": "c4.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 62.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 30.0, "name": "C4 High-CPU Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "c4.8xlarge": {"apiname": "c4.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 132.0, "ebs_iops": 32000.0, "ebs_max_bandwidth": 4000.0, "ebs_throughput": 500.0, "ecu_per_vcpu": 3.6666666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 60.0, "name": "C4 High-CPU Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 36.0, "vpc_only": true}, "c4.large": {"apiname": "c4.large", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 8.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 3.75, "name": "C4 High-CPU Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "c4.xlarge": {"apiname": "c4.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 16.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 750.0, "ebs_throughput": 93.75, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 7.5, "name": "C4 High-CPU Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "c5.12xlarge": {"apiname": "c5.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 188.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.9166666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 96.0, "name": "C5 High-CPU 12xlarge", "network_perf": 14.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "c5.18xlarge": {"apiname": "c5.18xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 281.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.9027777777777777, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 144.0, "name": "C5 High-CPU 18xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 72.0, "vpc_only": true}, "c5.24xlarge": {"apiname": "c5.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 375.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.90625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 192.0, "name": "C5 High-CPU 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "c5.2xlarge": {"apiname": "c5.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 34.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "C5 High-CPU Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "c5.4xlarge": {"apiname": "c5.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 68.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 32.0, "name": "C5 High-CPU Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "c5.9xlarge": {"apiname": "c5.9xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 141.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.9166666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 72.0, "name": "C5 High-CPU 9xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 36.0, "vpc_only": true}, "c5.large": {"apiname": "c5.large", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 9.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 4.0, "name": "C5 High-CPU Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "c5.metal": {"apiname": "c5.metal", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 375.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.90625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 192.0, "name": "C5 High-CPU Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "c5.xlarge": {"apiname": "c5.xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 17.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 8.0, "name": "C5 High-CPU Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "c5d.12xlarge": {"apiname": "c5d.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 188.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.9166666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 96.0, "name": "C5 High-CPU 12xlarge", "network_perf": 14.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "c5d.18xlarge": {"apiname": "c5d.18xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 281.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.9027777777777777, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 144.0, "name": "C5 High-CPU 18xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 1800.0, "vcpus": 72.0, "vpc_only": true}, "c5d.24xlarge": {"apiname": "c5d.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 375.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.90625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 192.0, "name": "C5 High-CPU 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "c5d.2xlarge": {"apiname": "c5d.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 34.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "C5 High-CPU Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 200.0, "vcpus": 8.0, "vpc_only": true}, "c5d.4xlarge": {"apiname": "c5d.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 68.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 32.0, "name": "C5 High-CPU Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 400.0, "vcpus": 16.0, "vpc_only": true}, "c5d.9xlarge": {"apiname": "c5d.9xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 141.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.9166666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 72.0, "name": "C5 High-CPU 9xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 900.0, "vcpus": 36.0, "vpc_only": true}, "c5d.large": {"apiname": "c5d.large", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 9.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 4.0, "name": "C5 High-CPU Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 50.0, "vcpus": 2.0, "vpc_only": true}, "c5d.metal": {"apiname": "c5d.metal", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 375.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.90625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 192.0, "name": "C5 High-CPU Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "c5d.xlarge": {"apiname": "c5d.xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 17.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 8.0, "name": "C5 High-CPU Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 100.0, "vcpus": 4.0, "vpc_only": true}, "c5n.18xlarge": {"apiname": "c5n.18xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 192.0, "name": "C5N 18xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 72.0, "vpc_only": true}, "c5n.2xlarge": {"apiname": "c5n.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 21.0, "name": "C5N Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "c5n.4xlarge": {"apiname": "c5n.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 42.0, "name": "C5N Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "c5n.9xlarge": {"apiname": "c5n.9xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 96.0, "name": "C5N 9xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 36.0, "vpc_only": true}, "c5n.large": {"apiname": "c5n.large", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 5.25, "name": "C5N Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "c5n.metal": {"apiname": "c5n.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 192.0, "name": "C5N Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 72.0, "vpc_only": true}, "c5n.xlarge": {"apiname": "c5n.xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 10.5, "name": "C5N Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "cc2.8xlarge": {"apiname": "cc2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.6 GHz", "computeunits": 88.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "HVM", "max_ips": 240, "memory": 60.5, "name": "Cluster Compute Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670", "placement_group_support": false, "storage": 3360.0, "vcpus": 32.0, "vpc_only": false}, "cr1.8xlarge": {"apiname": "cr1.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 88.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 244.0, "name": "High Memory Cluster Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670", "placement_group_support": false, "storage": 240.0, "vcpus": 32.0, "vpc_only": false}, "d2.2xlarge": {"apiname": "d2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 28.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "D2 Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676v3 (Haswell)", "placement_group_support": false, "storage": 12000.0, "vcpus": 8.0, "vpc_only": false}, "d2.4xlarge": {"apiname": "d2.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 56.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "D2 Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676v3 (Haswell)", "placement_group_support": false, "storage": 24000.0, "vcpus": 16.0, "vpc_only": false}, "d2.8xlarge": {"apiname": "d2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 116.0, "ebs_iops": 32000.0, "ebs_max_bandwidth": 4000.0, "ebs_throughput": 500.0, "ecu_per_vcpu": 3.2222222222222223, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "D2 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2676v3 (Haswell)", "placement_group_support": false, "storage": 48000.0, "vcpus": 36.0, "vpc_only": false}, "d2.xlarge": {"apiname": "d2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 14.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 750.0, "ebs_throughput": 93.75, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "D2 Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2676v3 (Haswell)", "placement_group_support": false, "storage": 6000.0, "vcpus": 4.0, "vpc_only": false}, "f1.16xlarge": {"apiname": "f1.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 188.0, "ebs_iops": 75000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": false, "fpga": 8, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 400, "memory": 976.0, "name": "F1 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 3760.0, "vcpus": 64.0, "vpc_only": true}, "f1.2xlarge": {"apiname": "f1.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 26.0, "ebs_iops": 12000.0, "ebs_max_bandwidth": 1700.0, "ebs_throughput": 212.5, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 1, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 122.0, "name": "F1 Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 470.0, "vcpus": 8.0, "vpc_only": true}, "f1.4xlarge": {"apiname": "f1.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 52.0, "ebs_iops": 44000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 400.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 2, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "F1 Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 940.0, "vcpus": 16.0, "vpc_only": true}, "g2.2xlarge": {"apiname": "g2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.6 GHz", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 15.0, "name": "G2 Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 (Sandy Bridge)", "placement_group_support": false, "storage": 60.0, "vcpus": 8.0, "vpc_only": false}, "g2.8xlarge": {"apiname": "g2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.6 GHz", "computeunits": 104.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 4, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 60.0, "name": "G2 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670 (Sandy Bridge)", "placement_group_support": false, "storage": 240.0, "vcpus": 32.0, "vpc_only": false}, "g3.16xlarge": {"apiname": "g3.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 4, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 488.0, "name": "G3 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "g3.4xlarge": {"apiname": "g3.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 47.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 1, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "G3 Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "g3.8xlarge": {"apiname": "g3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 94.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 2, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "G3 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "g3s.xlarge": {"apiname": "g3s.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 13.0, "ebs_iops": 5000.0, "ebs_max_bandwidth": 850.0, "ebs_throughput": 100.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 30.5, "name": "G3S Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "g4dn.12xlarge": {"apiname": "g4dn.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 4, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "G4DN 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 900.0, "vcpus": 48.0, "vpc_only": true}, "g4dn.16xlarge": {"apiname": "g4dn.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 256.0, "name": "G4DN 16xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 900.0, "vcpus": 64.0, "vpc_only": true}, "g4dn.2xlarge": {"apiname": "g4dn.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 32.0, "name": "G4DN Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 225.0, "vcpus": 8.0, "vpc_only": true}, "g4dn.4xlarge": {"apiname": "g4dn.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 64.0, "name": "G4DN Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 225.0, "vcpus": 16.0, "vpc_only": true}, "g4dn.8xlarge": {"apiname": "g4dn.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 128.0, "name": "G4DN Eight Extra Large", "network_perf": 22.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 900.0, "vcpus": 32.0, "vpc_only": true}, "g4dn.metal": {"apiname": "g4dn.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 8, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 384.0, "name": "G4DN Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "g4dn.xlarge": {"apiname": "g4dn.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 10000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "G4DN Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 125.0, "vcpus": 4.0, "vpc_only": true}, "h1.16xlarge": {"apiname": "h1.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 256.0, "name": "H1 16xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 16000.0, "vcpus": 64.0, "vpc_only": true}, "h1.2xlarge": {"apiname": "h1.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 26.0, "ebs_iops": 12000.0, "ebs_max_bandwidth": 1750.0, "ebs_throughput": 218.75, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "H1 Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 2000.0, "vcpus": 8.0, "vpc_only": true}, "h1.4xlarge": {"apiname": "h1.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 53.5, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.34375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 64.0, "name": "H1 Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 4000.0, "vcpus": 16.0, "vpc_only": true}, "h1.8xlarge": {"apiname": "h1.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 99.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "H1 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 8000.0, "vcpus": 32.0, "vpc_only": true}, "hs1.8xlarge": {"apiname": "hs1.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2 GHz", "computeunits": 35.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.0588235294117645, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 240, "memory": 117.0, "name": "High Storage Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2650", "placement_group_support": false, "storage": 48000.0, "vcpus": 17.0, "vpc_only": false}, "i2.2xlarge": {"apiname": "i2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 27.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "I2 Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 1600.0, "vcpus": 8.0, "vpc_only": false}, "i2.4xlarge": {"apiname": "i2.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 53.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.3125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "I2 Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 3200.0, "vcpus": 16.0, "vpc_only": false}, "i2.8xlarge": {"apiname": "i2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 104.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "I2 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 6400.0, "vcpus": 32.0, "vpc_only": false}, "i2.xlarge": {"apiname": "i2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 14.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "I2 Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 800.0, "vcpus": 4.0, "vpc_only": false}, "i3.16xlarge": {"apiname": "i3.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 200.0, "ebs_iops": 65000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 488.0, "name": "I3 High I/O 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 15200.0, "vcpus": 64.0, "vpc_only": true}, "i3.2xlarge": {"apiname": "i3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 27.0, "ebs_iops": 12000.0, "ebs_max_bandwidth": 1700.0, "ebs_throughput": 212.5, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "I3 High I/O Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 1900.0, "vcpus": 8.0, "vpc_only": true}, "i3.4xlarge": {"apiname": "i3.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 53.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.3125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "I3 High I/O Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 3800.0, "vcpus": 16.0, "vpc_only": true}, "i3.8xlarge": {"apiname": "i3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 99.0, "ebs_iops": 32500.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "I3 High I/O Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 7600.0, "vcpus": 32.0, "vpc_only": true}, "i3.large": {"apiname": "i3.large", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 7.0, "ebs_iops": 3000.0, "ebs_max_bandwidth": 425.0, "ebs_throughput": 53.13, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 15.25, "name": "I3 High I/O Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 475.0, "vcpus": 2.0, "vpc_only": true}, "i3.metal": {"apiname": "i3.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 208.0, "ebs_iops": 65000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.888888888888889, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 512.0, "name": "I3 High I/O Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 15200.0, "vcpus": 72.0, "vpc_only": true}, "i3.xlarge": {"apiname": "i3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 13.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 850.0, "ebs_throughput": 106.25, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "I3 High I/O Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 950.0, "vcpus": 4.0, "vpc_only": true}, "i3en.12xlarge": {"apiname": "i3en.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "I3EN 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 30000.0, "vcpus": 48.0, "vpc_only": true}, "i3en.24xlarge": {"apiname": "i3en.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "I3EN 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 60000.0, "vcpus": 96.0, "vpc_only": true}, "i3en.2xlarge": {"apiname": "i3en.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "I3EN Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 5000.0, "vcpus": 8.0, "vpc_only": true}, "i3en.3xlarge": {"apiname": "i3en.3xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 96.0, "name": "I3EN 3xlarge", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 7500.0, "vcpus": 12.0, "vpc_only": true}, "i3en.6xlarge": {"apiname": "i3en.6xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "I3EN 6xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 15000.0, "vcpus": 24.0, "vpc_only": true}, "i3en.large": {"apiname": "i3en.large", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "I3EN Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1250.0, "vcpus": 2.0, "vpc_only": true}, "i3en.metal": {"apiname": "i3en.metal", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "I3EN Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 60000.0, "vcpus": 96.0, "vpc_only": true}, "i3en.xlarge": {"apiname": "i3en.xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "I3EN Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 2500.0, "vcpus": 4.0, "vpc_only": true}, "m1.large": {"apiname": "m1.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 4.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 2.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 30, "memory": 7.5, "name": "M1 General Purpose Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 840.0, "vcpus": 2.0, "vpc_only": false}, "m1.medium": {"apiname": "m1.medium", "architecture": "32/64-bit", "clock_speed_ghz": "unknown", "computeunits": 2.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 12, "memory": 3.75, "name": "M1 General Purpose Medium", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 410.0, "vcpus": 1.0, "vpc_only": false}, "m1.small": {"apiname": "m1.small", "architecture": "32/64-bit", "clock_speed_ghz": "unknown", "computeunits": 1.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 1.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 8, "memory": 1.7, "name": "M1 General Purpose Small", "network_perf": 2.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 160.0, "vcpus": 1.0, "vpc_only": false}, "m1.xlarge": {"apiname": "m1.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 8.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 2.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 60, "memory": 15.0, "name": "M1 General Purpose Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 1680.0, "vcpus": 4.0, "vpc_only": false}, "m2.2xlarge": {"apiname": "m2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 13.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 120, "memory": 34.2, "name": "M2 High Memory Double Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 850.0, "vcpus": 4.0, "vpc_only": false}, "m2.4xlarge": {"apiname": "m2.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 240, "memory": 68.4, "name": "M2 High Memory Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 1680.0, "vcpus": 8.0, "vpc_only": false}, "m2.xlarge": {"apiname": "m2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 6.5, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 60, "memory": 17.1, "name": "M2 High Memory Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 420.0, "vcpus": 2.0, "vpc_only": false}, "m3.2xlarge": {"apiname": "m3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 120, "memory": 30.0, "name": "M3 General Purpose Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)", "placement_group_support": false, "storage": 160.0, "vcpus": 8.0, "vpc_only": false}, "m3.large": {"apiname": "m3.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 6.5, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 30, "memory": 7.5, "name": "M3 General Purpose Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)", "placement_group_support": false, "storage": 32.0, "vcpus": 2.0, "vpc_only": false}, "m3.medium": {"apiname": "m3.medium", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 3.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 12, "memory": 3.75, "name": "M3 General Purpose Medium", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)", "placement_group_support": false, "storage": 4.0, "vcpus": 1.0, "vpc_only": false}, "m3.xlarge": {"apiname": "m3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 13.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 60, "memory": 15.0, "name": "M3 General Purpose Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)", "placement_group_support": false, "storage": 80.0, "vcpus": 4.0, "vpc_only": false}, "m4.10xlarge": {"apiname": "m4.10xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 124.5, "ebs_iops": 32000.0, "ebs_max_bandwidth": 4000.0, "ebs_throughput": 500.0, "ecu_per_vcpu": 3.1125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 160.0, "name": "M4 General Purpose Deca Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 40.0, "vpc_only": true}, "m4.16xlarge": {"apiname": "m4.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 65000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 256.0, "name": "M4 General Purpose 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "m4.2xlarge": {"apiname": "m4.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "M4 General Purpose Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "m4.4xlarge": {"apiname": "m4.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 53.5, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.34375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 64.0, "name": "M4 General Purpose Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "m4.large": {"apiname": "m4.large", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 6.5, "ebs_iops": 3600.0, "ebs_max_bandwidth": 450.0, "ebs_throughput": 56.25, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 20, "memory": 8.0, "name": "M4 General Purpose Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "m4.xlarge": {"apiname": "m4.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 13.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 750.0, "ebs_throughput": 93.75, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "M4 General Purpose Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "m5.12xlarge": {"apiname": "m5.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 173.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.6041666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 192.0, "name": "M5 General Purpose 12xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "m5.16xlarge": {"apiname": "m5.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 262.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 256.0, "name": "M5 General Purpose 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "m5.24xlarge": {"apiname": "m5.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "M5 General Purpose 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5.2xlarge": {"apiname": "m5.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 31.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "M5 General Purpose Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "m5.4xlarge": {"apiname": "m5.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 60.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.75, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 64.0, "name": "M5 General Purpose Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "m5.8xlarge": {"apiname": "m5.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 131.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "M5 General Purpose Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "m5.large": {"apiname": "m5.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 8.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 8.0, "name": "M5 General Purpose Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "m5.metal": {"apiname": "m5.metal", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "M5 General Purpose Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5.xlarge": {"apiname": "m5.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 16.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "M5 General Purpose Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "m5a.12xlarge": {"apiname": "m5a.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "M5A 12xlarge", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "m5a.16xlarge": {"apiname": "m5a.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 256.0, "name": "M5A 16xlarge", "network_perf": 14.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "m5a.24xlarge": {"apiname": "m5a.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 384.0, "name": "M5A 24xlarge", "network_perf": 16.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5a.2xlarge": {"apiname": "m5a.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "M5A Double Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "m5a.4xlarge": {"apiname": "m5a.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 64.0, "name": "M5A Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "m5a.8xlarge": {"apiname": "m5a.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "M5A Eight Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "m5a.large": {"apiname": "m5a.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 8.0, "name": "M5A Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "m5a.xlarge": {"apiname": "m5a.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "M5A Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "m5ad.12xlarge": {"apiname": "m5ad.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 675.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "M5AD 12xlarge", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "m5ad.24xlarge": {"apiname": "m5ad.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 384.0, "name": "M5AD 24xlarge", "network_perf": 16.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "m5ad.2xlarge": {"apiname": "m5ad.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "M5AD Double Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "m5ad.4xlarge": {"apiname": "m5ad.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 64.0, "name": "M5AD Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "m5ad.large": {"apiname": "m5ad.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 8.0, "name": "M5AD Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "m5ad.xlarge": {"apiname": "m5ad.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "M5AD Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "m5d.12xlarge": {"apiname": "m5d.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 173.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.6041666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 192.0, "name": "M5 General Purpose 12xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "m5d.16xlarge": {"apiname": "m5d.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 262.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 256.0, "name": "M5 General Purpose 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 2400.0, "vcpus": 64.0, "vpc_only": true}, "m5d.24xlarge": {"apiname": "m5d.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "M5 General Purpose 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "m5d.2xlarge": {"apiname": "m5d.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 31.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "M5 General Purpose Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "m5d.4xlarge": {"apiname": "m5d.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 60.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.75, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 64.0, "name": "M5 General Purpose Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "m5d.8xlarge": {"apiname": "m5d.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 131.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "M5 General Purpose Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1200.0, "vcpus": 32.0, "vpc_only": true}, "m5d.large": {"apiname": "m5d.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 8.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 8.0, "name": "M5 General Purpose Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "m5d.metal": {"apiname": "m5d.metal", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "M5 General Purpose Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "m5d.xlarge": {"apiname": "m5d.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 16.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "M5 General Purpose Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "m5dn.12xlarge": {"apiname": "m5dn.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "M5DN 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "m5dn.16xlarge": {"apiname": "m5dn.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 256.0, "name": "M5DN 16xlarge", "network_perf": 24.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 2400.0, "vcpus": 64.0, "vpc_only": true}, "m5dn.24xlarge": {"apiname": "m5dn.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 384.0, "name": "M5DN 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "m5dn.2xlarge": {"apiname": "m5dn.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "M5DN Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "m5dn.4xlarge": {"apiname": "m5dn.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 64.0, "name": "M5DN Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "m5dn.8xlarge": {"apiname": "m5dn.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "M5DN Eight Extra Large", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 1200.0, "vcpus": 32.0, "vpc_only": true}, "m5dn.large": {"apiname": "m5dn.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 8.0, "name": "M5DN Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "m5dn.metal": {"apiname": "m5dn.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 384.0, "name": "M5DN Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5dn.xlarge": {"apiname": "m5dn.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "M5DN Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "m5n.12xlarge": {"apiname": "m5n.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "M5N 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "m5n.16xlarge": {"apiname": "m5n.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 256.0, "name": "M5N 16xlarge", "network_perf": 24.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "m5n.24xlarge": {"apiname": "m5n.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 384.0, "name": "M5N 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5n.2xlarge": {"apiname": "m5n.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "M5N Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "m5n.4xlarge": {"apiname": "m5n.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 64.0, "name": "M5N Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "m5n.8xlarge": {"apiname": "m5n.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "M5N Eight Extra Large", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "m5n.large": {"apiname": "m5n.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 8.0, "name": "M5N Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "m5n.metal": {"apiname": "m5n.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 384.0, "name": "M5N Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5n.xlarge": {"apiname": "m5n.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "M5N Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "p2.16xlarge": {"apiname": "p2.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 65000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 16, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 768.0, "name": "General Purpose GPU 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "p2.8xlarge": {"apiname": "p2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 94.0, "ebs_iops": 32500.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 8, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 488.0, "name": "General Purpose GPU Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "p2.xlarge": {"apiname": "p2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 12.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 750.0, "ebs_throughput": 93.75, "ecu_per_vcpu": 3.0, "enhanced_networking": true, "fpga": 0, "gpus": 1, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "General Purpose GPU Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "p3.16xlarge": {"apiname": "p3.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 8, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 488.0, "name": "P3 16xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "p3.2xlarge": {"apiname": "p3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 26.0, "ebs_iops": 10000.0, "ebs_max_bandwidth": 1750.0, "ebs_throughput": 218.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 1, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "P3 Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "p3.8xlarge": {"apiname": "p3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 94.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 4, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "P3 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "p3dn.24xlarge": {"apiname": "p3dn.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 8, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "P3DN 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8175 (Skylake)", "placement_group_support": false, "storage": 1800.0, "vcpus": 96.0, "vpc_only": true}, "r3.2xlarge": {"apiname": "r3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "R3 High-Memory Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 160.0, "vcpus": 8.0, "vpc_only": false}, "r3.4xlarge": {"apiname": "r3.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 52.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "R3 High-Memory Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 320.0, "vcpus": 16.0, "vpc_only": false}, "r3.8xlarge": {"apiname": "r3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 104.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "R3 High-Memory Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 640.0, "vcpus": 32.0, "vpc_only": false}, "r3.large": {"apiname": "r3.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 6.5, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 15.25, "name": "R3 High-Memory Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 32.0, "vcpus": 2.0, "vpc_only": false}, "r3.xlarge": {"apiname": "r3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 13.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "R3 High-Memory Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 80.0, "vcpus": 4.0, "vpc_only": false}, "r4.16xlarge": {"apiname": "r4.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 195.0, "ebs_iops": 75000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.046875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 488.0, "name": "R4 High-Memory 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "r4.2xlarge": {"apiname": "r4.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 27.0, "ebs_iops": 12000.0, "ebs_max_bandwidth": 1700.0, "ebs_throughput": 212.5, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "R4 High-Memory Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "r4.4xlarge": {"apiname": "r4.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 53.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.3125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "R4 High-Memory Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "r4.8xlarge": {"apiname": "r4.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 99.0, "ebs_iops": 37500.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "R4 High-Memory Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "r4.large": {"apiname": "r4.large", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 7.0, "ebs_iops": 3000.0, "ebs_max_bandwidth": 425.0, "ebs_throughput": 53.13, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 15.25, "name": "R4 High-Memory Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "r4.xlarge": {"apiname": "r4.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 13.5, "ebs_iops": 6000.0, "ebs_max_bandwidth": 850.0, "ebs_throughput": 106.25, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "R4 High-Memory Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "r5.12xlarge": {"apiname": "r5.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 173.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.6041666666666665, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 384.0, "name": "R5 12xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "r5.16xlarge": {"apiname": "r5.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 262.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 512.0, "name": "R5 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "r5.24xlarge": {"apiname": "r5.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 347.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.6145833333333335, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 768.0, "name": "R5 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5.2xlarge": {"apiname": "r5.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 38.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 64.0, "name": "R5 Double Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "r5.4xlarge": {"apiname": "r5.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 71.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.4375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "R5 Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "r5.8xlarge": {"apiname": "r5.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 131.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 256.0, "name": "R5 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "r5.large": {"apiname": "r5.large", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 9.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.5, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 16.0, "name": "R5 Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "r5.metal": {"apiname": "r5.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 347.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.6145833333333335, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 768.0, "name": "R5 Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5.xlarge": {"apiname": "r5.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 19.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "R5 Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "r5a.12xlarge": {"apiname": "r5a.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "R5A 12xlarge", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "r5a.16xlarge": {"apiname": "r5a.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 512.0, "name": "R5A 16xlarge", "network_perf": 14.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "r5a.24xlarge": {"apiname": "r5a.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "R5A 24xlarge", "network_perf": 16.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5a.2xlarge": {"apiname": "r5a.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "R5A Double Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "r5a.4xlarge": {"apiname": "r5a.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "R5A Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "r5a.8xlarge": {"apiname": "r5a.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 32000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 256.0, "name": "R5A Eight Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "r5a.large": {"apiname": "r5a.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "R5A Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "r5a.xlarge": {"apiname": "r5a.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "R5A Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "r5ad.12xlarge": {"apiname": "r5ad.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "R5AD 12xlarge", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "r5ad.24xlarge": {"apiname": "r5ad.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "R5AD 24xlarge", "network_perf": 16.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "r5ad.2xlarge": {"apiname": "r5ad.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2210.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "R5AD Double Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "r5ad.4xlarge": {"apiname": "r5ad.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2210.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "R5AD Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "r5ad.large": {"apiname": "r5ad.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2210.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "R5AD Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "r5ad.xlarge": {"apiname": "r5ad.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2210.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "R5AD Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "r5d.12xlarge": {"apiname": "r5d.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 173.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.6041666666666665, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 384.0, "name": "R5D 12xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "r5d.16xlarge": {"apiname": "r5d.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 262.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 512.0, "name": "R5D 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 2400.0, "vcpus": 64.0, "vpc_only": true}, "r5d.24xlarge": {"apiname": "r5d.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 347.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.6145833333333335, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 768.0, "name": "R5D 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "r5d.2xlarge": {"apiname": "r5d.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 38.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 64.0, "name": "R5D Double Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "r5d.4xlarge": {"apiname": "r5d.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 71.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.4375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "R5D Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "r5d.8xlarge": {"apiname": "r5d.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 131.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 256.0, "name": "R5D Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1200.0, "vcpus": 32.0, "vpc_only": true}, "r5d.large": {"apiname": "r5d.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 10.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 5.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 16.0, "name": "R5D Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "r5d.metal": {"apiname": "r5d.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 347.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.6145833333333335, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 768.0, "name": "R5D Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "r5d.xlarge": {"apiname": "r5d.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 19.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "R5D Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "r5dn.12xlarge": {"apiname": "r5dn.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "R5DN 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "r5dn.16xlarge": {"apiname": "r5dn.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 512.0, "name": "R5DN 16xlarge", "network_perf": 24.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 2400.0, "vcpus": 64.0, "vpc_only": true}, "r5dn.24xlarge": {"apiname": "r5dn.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "R5DN 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "r5dn.2xlarge": {"apiname": "r5dn.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "R5DN Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "r5dn.4xlarge": {"apiname": "r5dn.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "R5DN Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "r5dn.8xlarge": {"apiname": "r5dn.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 256.0, "name": "R5DN Eight Extra Large", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 1200.0, "vcpus": 32.0, "vpc_only": true}, "r5dn.large": {"apiname": "r5dn.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "R5DN Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "r5dn.metal": {"apiname": "r5dn.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 768.0, "name": "R5DN Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5dn.xlarge": {"apiname": "r5dn.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "R5DN Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "r5n.12xlarge": {"apiname": "r5n.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "R5N 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "r5n.16xlarge": {"apiname": "r5n.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 512.0, "name": "R5N 16xlarge", "network_perf": 24.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "r5n.24xlarge": {"apiname": "r5n.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "R5N 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5n.2xlarge": {"apiname": "r5n.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "R5N Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "r5n.4xlarge": {"apiname": "r5n.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "R5N Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "r5n.8xlarge": {"apiname": "r5n.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 256.0, "name": "R5N Eight Extra Large", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "r5n.large": {"apiname": "r5n.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "R5N Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "r5n.metal": {"apiname": "r5n.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 768.0, "name": "R5N Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5n.xlarge": {"apiname": "r5n.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "R5N Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "t1.micro": {"apiname": "t1.micro", "architecture": "32/64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 4, "memory": 0.613, "name": "T1 Micro", "network_perf": 0.0, "physical_processor": "Variable", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": false}, "t2.2xlarge": {"apiname": "t2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "Up to 3.0 GHz", "computeunits": 1.3599999999999999, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 45, "memory": 32.0, "name": "T2 Double Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "t2.large": {"apiname": "t2.large", "architecture": "64-bit", "clock_speed_ghz": "Up to 3.0 GHz", "computeunits": 0.6, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 36, "memory": 8.0, "name": "T2 Large", "network_perf": 4.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t2.medium": {"apiname": "t2.medium", "architecture": "32/64-bit", "clock_speed_ghz": "Up to 3.3 GHz", "computeunits": 0.4, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 18, "memory": 4.0, "name": "T2 Medium", "network_perf": 4.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t2.micro": {"apiname": "t2.micro", "architecture": "32/64-bit", "clock_speed_ghz": "Up to 3.3 GHz", "computeunits": 0.1, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 4, "memory": 1.0, "name": "T2 Micro", "network_perf": 4.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": true}, "t2.nano": {"apiname": "t2.nano", "architecture": "32/64-bit", "clock_speed_ghz": "Up to 3.3 GHz", "computeunits": 0.05, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 4, "memory": 0.5, "name": "T2 Nano", "network_perf": 2.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": true}, "t2.small": {"apiname": "t2.small", "architecture": "32/64-bit", "clock_speed_ghz": "Up to 3.3 GHz", "computeunits": 0.2, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 12, "memory": 2.0, "name": "T2 Small", "network_perf": 4.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": true}, "t2.xlarge": {"apiname": "t2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "Up to 3.0 GHz", "computeunits": 0.9, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 45, "memory": 16.0, "name": "T2 Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "t3.2xlarge": {"apiname": "t3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 3.2, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "T3 Double Extra Large", "network_perf": 6.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "t3.large": {"apiname": "t3.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.6, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 36, "memory": 8.0, "name": "T3 Large", "network_perf": 4.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.medium": {"apiname": "t3.medium", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.4, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 18, "memory": 4.0, "name": "T3 Medium", "network_perf": 4.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.micro": {"apiname": "t3.micro", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.2, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 4, "memory": 1.0, "name": "T3 Micro", "network_perf": 4.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.nano": {"apiname": "t3.nano", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.1, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 4, "memory": 0.5, "name": "T3 Nano", "network_perf": 2.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.small": {"apiname": "t3.small", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.4, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 12, "memory": 2.0, "name": "T3 Small", "network_perf": 4.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.xlarge": {"apiname": "t3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 1.6, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "T3 Extra Large", "network_perf": 6.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "t3a.2xlarge": {"apiname": "t3a.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 3.2, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "T3A Double Extra Large", "network_perf": 6.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "t3a.large": {"apiname": "t3a.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.6, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 36, "memory": 8.0, "name": "T3A Large", "network_perf": 4.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.medium": {"apiname": "t3a.medium", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.4, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 18, "memory": 4.0, "name": "T3A Medium", "network_perf": 4.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.micro": {"apiname": "t3a.micro", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.2, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 4, "memory": 1.0, "name": "T3A Micro", "network_perf": 4.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.nano": {"apiname": "t3a.nano", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.1, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 4, "memory": 0.5, "name": "T3A Nano", "network_perf": 2.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.small": {"apiname": "t3a.small", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.4, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 8, "memory": 2.0, "name": "T3A Small", "network_perf": 4.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.xlarge": {"apiname": "t3a.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 1.6, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "T3A Extra Large", "network_perf": 6.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "u-12tb1.metal": {"apiname": "u-12tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 150, "memory": 12288.0, "name": "U-12TB1 Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Scalable (Skylake) processors", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "u-18tb1.metal": {"apiname": "u-18tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 160000.0, "ebs_max_bandwidth": 28000.0, "ebs_throughput": 3500.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 18432.0, "name": "U-18TB1 Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8280L (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "u-24tb1.metal": {"apiname": "u-24tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 160000.0, "ebs_max_bandwidth": 28000.0, "ebs_throughput": 3500.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 24576.0, "name": "U-24TB1 Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8280L (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "u-6tb1.metal": {"apiname": "u-6tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 150, "memory": 6144.0, "name": "U-6TB1 Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Scalable (Skylake) processors", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "u-9tb1.metal": {"apiname": "u-9tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 150, "memory": 9216.0, "name": "U-9TB1 Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Scalable (Skylake) processors", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "x1.16xlarge": {"apiname": "x1.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 174.5, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 2.7265625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 976.0, "name": "X1 Extra High-Memory 16xlarge", "network_perf": 8.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 1920.0, "vcpus": 64.0, "vpc_only": true}, "x1.32xlarge": {"apiname": "x1.32xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 349.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.7265625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 1952.0, "name": "X1 Extra High-Memory 32xlarge", "network_perf": 8.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 3840.0, "vcpus": 128.0, "vpc_only": true}, "x1e.16xlarge": {"apiname": "x1e.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 179.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 2.796875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 1952.0, "name": "X1E 16xlarge", "network_perf": 12.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 1920.0, "vcpus": 64.0, "vpc_only": true}, "x1e.2xlarge": {"apiname": "x1e.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 23.0, "ebs_iops": 7400.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 2.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 244.0, "name": "X1E Double Extra Large", "network_perf": 10.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 240.0, "vcpus": 8.0, "vpc_only": true}, "x1e.32xlarge": {"apiname": "x1e.32xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 340.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.65625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 3904.0, "name": "X1E 32xlarge", "network_perf": 20.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 3840.0, "vcpus": 128.0, "vpc_only": true}, "x1e.4xlarge": {"apiname": "x1e.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 47.0, "ebs_iops": 10000.0, "ebs_max_bandwidth": 1750.0, "ebs_throughput": 218.75, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 488.0, "name": "X1E Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 480.0, "vcpus": 16.0, "vpc_only": true}, "x1e.8xlarge": {"apiname": "x1e.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 91.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 2.84375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 976.0, "name": "X1E Eight Extra Large", "network_perf": 10.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 960.0, "vcpus": 32.0, "vpc_only": true}, "x1e.xlarge": {"apiname": "x1e.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 12.0, "ebs_iops": 3700.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 122.0, "name": "X1E Extra Large", "network_perf": 10.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 120.0, "vcpus": 4.0, "vpc_only": true}, "z1d.12xlarge": {"apiname": "z1d.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 271.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 5.645833333333333, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "Z1D 12xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "z1d.2xlarge": {"apiname": "z1d.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 53.0, "ebs_iops": 13333.0, "ebs_max_bandwidth": 2333.0, "ebs_throughput": 292.0, "ecu_per_vcpu": 6.625, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 64.0, "name": "Z1D Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "z1d.3xlarge": {"apiname": "z1d.3xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 75.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 438.0, "ecu_per_vcpu": 6.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 96.0, "name": "Z1D 3xlarge", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 450.0, "vcpus": 12.0, "vpc_only": true}, "z1d.6xlarge": {"apiname": "z1d.6xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 134.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 5.583333333333333, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 192.0, "name": "Z1D 6xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 900.0, "vcpus": 24.0, "vpc_only": true}, "z1d.large": {"apiname": "z1d.large", "architecture": "64-bit", "clock_speed_ghz": "4.0 GHz", "computeunits": 15.0, "ebs_iops": 13333.0, "ebs_max_bandwidth": 2333.0, "ebs_throughput": 291.0, "ecu_per_vcpu": 7.5, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 16.0, "name": "Z1D Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "z1d.metal": {"apiname": "z1d.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 271.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 5.645833333333333, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "Z1D Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "z1d.xlarge": {"apiname": "z1d.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 28.0, "ebs_iops": 13333.0, "ebs_max_bandwidth": 2333.0, "ebs_throughput": 291.0, "ecu_per_vcpu": 7.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "Z1D Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}} \ No newline at end of file diff --git a/moto/ec2/responses/__init__.py b/moto/ec2/responses/__init__.py index 21cbf8249..515ae1f31 100644 --- a/moto/ec2/responses/__init__.py +++ b/moto/ec2/responses/__init__.py @@ -24,6 +24,7 @@ from .security_groups import SecurityGroups from .spot_fleets import SpotFleets from .spot_instances import SpotInstances from .subnets import Subnets +from .flow_logs import FlowLogs from .tags import TagResponse from .virtual_private_gateways import VirtualPrivateGateways from .vm_export import VMExport @@ -33,6 +34,7 @@ from .vpc_peering_connections import VPCPeeringConnections from .vpn_connections import VPNConnections from .windows import Windows from .nat_gateways import NatGateways +from .iam_instance_profiles import IamInstanceProfiles class EC2Response( @@ -60,6 +62,7 @@ class EC2Response( SpotFleets, SpotInstances, Subnets, + FlowLogs, TagResponse, VirtualPrivateGateways, VMExport, @@ -69,6 +72,7 @@ class EC2Response( VPNConnections, Windows, NatGateways, + IamInstanceProfiles, ): @property def ec2_backend(self): diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 6736a7175..178d583e0 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -73,8 +73,12 @@ class AmisResponse(BaseResponse): return MODIFY_IMAGE_ATTRIBUTE_RESPONSE def register_image(self): + name = self.querystring.get("Name")[0] + description = self._get_param("Description", if_none="") if self.is_not_dryrun("RegisterImage"): - raise NotImplementedError("AMIs.register_image is not yet implemented") + image = self.ec2_backend.register_image(name, description) + template = self.response_template(REGISTER_IMAGE_RESPONSE) + return template.render(image=image) def reset_image_attribute(self): if self.is_not_dryrun("ResetImageAttribute"): @@ -125,7 +129,7 @@ DESCRIBE_IMAGES_RESPONSE = """true """ + +REGISTER_IMAGE_RESPONSE = """ + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + {{ image.id }} +""" diff --git a/moto/ec2/responses/availability_zones_and_regions.py b/moto/ec2/responses/availability_zones_and_regions.py index d63e2f4ad..61d4eb1ae 100644 --- a/moto/ec2/responses/availability_zones_and_regions.py +++ b/moto/ec2/responses/availability_zones_and_regions.py @@ -22,6 +22,7 @@ DESCRIBE_REGIONS_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE {{ snapshot.id }} + + {% for tag in snapshot.get_tags() %} + + {{ tag.resource_id }} + {{ tag.resource_type }} + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + """ DESCRIBE_SNAPSHOTS_RESPONSE = """ @@ -311,18 +334,18 @@ DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE = """ a9540c9f-161a-45d8-9cc1-1182b89ad69f snap-a0332ee0 - {% if not groups %} - - {% endif %} - {% if groups %} - - {% for group in groups %} - - {{ group }} - - {% endfor %} - - {% endif %} + + {% for group in groups %} + + {{ group }} + + {% endfor %} + {% for userId in userIds %} + + {{ userId }} + + {% endfor %} + """ diff --git a/moto/ec2/responses/flow_logs.py b/moto/ec2/responses/flow_logs.py new file mode 100644 index 000000000..74930f291 --- /dev/null +++ b/moto/ec2/responses/flow_logs.py @@ -0,0 +1,122 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from moto.ec2.models import validate_resource_ids +from moto.ec2.utils import filters_from_querystring + + +class FlowLogs(BaseResponse): + def create_flow_logs(self): + resource_type = self._get_param("ResourceType") + resource_ids = self._get_multi_param("ResourceId") + traffic_type = self._get_param("TrafficType") + deliver_logs_permission_arn = self._get_param("DeliverLogsPermissionArn") + log_destination_type = self._get_param("LogDestinationType") + log_destination = self._get_param("LogDestination") + log_group_name = self._get_param("LogGroupName") + log_format = self._get_param("LogFormat") + max_aggregation_interval = self._get_param("MaxAggregationInterval") + validate_resource_ids(resource_ids) + + tags = self._parse_tag_specification("TagSpecification") + tags = tags.get("vpc-flow-log", {}) + if self.is_not_dryrun("CreateFlowLogs"): + flow_logs, errors = self.ec2_backend.create_flow_logs( + resource_type=resource_type, + resource_ids=resource_ids, + traffic_type=traffic_type, + deliver_logs_permission_arn=deliver_logs_permission_arn, + log_destination_type=log_destination_type, + log_destination=log_destination, + log_group_name=log_group_name, + log_format=log_format, + max_aggregation_interval=max_aggregation_interval, + ) + for fl in flow_logs: + fl.add_tags(tags) + template = self.response_template(CREATE_FLOW_LOGS_RESPONSE) + return template.render(flow_logs=flow_logs, errors=errors) + + def describe_flow_logs(self): + flow_log_ids = self._get_multi_param("FlowLogId") + filters = filters_from_querystring(self.querystring) + flow_logs = self.ec2_backend.describe_flow_logs(flow_log_ids, filters) + if self.is_not_dryrun("DescribeFlowLogs"): + template = self.response_template(DESCRIBE_FLOW_LOGS_RESPONSE) + return template.render(flow_logs=flow_logs) + + def delete_flow_logs(self): + flow_log_ids = self._get_multi_param("FlowLogId") + self.ec2_backend.delete_flow_logs(flow_log_ids) + if self.is_not_dryrun("DeleteFlowLogs"): + template = self.response_template(DELETE_FLOW_LOGS_RESPONSE) + return template.render() + + +CREATE_FLOW_LOGS_RESPONSE = """ + + 2d96dae3-504b-4fc4-bf50-266EXAMPLE + + {% for error in errors %} + + + {{ error.1 }} + {{ error.2 }} + + {{ error.0 }} + + {% endfor %} + + + {% for flow_log in flow_logs %} + {{ flow_log.id }} + {% endfor %} + +""" + +DELETE_FLOW_LOGS_RESPONSE = """ + + c5c4f51f-f4e9-42bc-8700-EXAMPLE + +""" + +DESCRIBE_FLOW_LOGS_RESPONSE = """ + + 3cb46f23-099e-4bf0-891c-EXAMPLE + + {% for flow_log in flow_logs %} + + {% if flow_log.log_destination is not none %} + {{ flow_log.log_destination }} + {% endif %} + {{ flow_log.resource_id }} + {{ flow_log.log_destination_type }} + {{ flow_log.created_at }} + {{ flow_log.traffic_type }} + {{ flow_log.deliver_logs_status }} + {% if flow_log.deliver_logs_error_message is not none %} + {{ flow_log.deliver_logs_error_message }} + {% endif %} + {{ flow_log.log_format }} + ACTIVE + {{ flow_log.id }} + {{ flow_log.max_aggregation_interval }} + {% if flow_log.deliver_logs_permission_arn is not none %} + {{ flow_log.deliver_logs_permission_arn }} + {% endif %} + {% if flow_log.log_group_name is not none %} + {{ flow_log.log_group_name }} + {% endif %} + {% if flow_log.get_tags() %} + + {% for tag in flow_log.get_tags() %} + + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + + {% endif %} + + {% endfor %} + +""" diff --git a/moto/ec2/responses/iam_instance_profiles.py b/moto/ec2/responses/iam_instance_profiles.py new file mode 100644 index 000000000..3d2525ba7 --- /dev/null +++ b/moto/ec2/responses/iam_instance_profiles.py @@ -0,0 +1,89 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse + + +class IamInstanceProfiles(BaseResponse): + def associate_iam_instance_profile(self): + instance_id = self._get_param("InstanceId") + iam_instance_profile_name = self._get_param("IamInstanceProfile.Name") + iam_instance_profile_arn = self._get_param("IamInstanceProfile.Arn") + iam_association = self.ec2_backend.associate_iam_instance_profile( + instance_id, iam_instance_profile_name, iam_instance_profile_arn + ) + template = self.response_template(IAM_INSTANCE_PROFILE_RESPONSE) + return template.render(iam_association=iam_association, state="associating") + + def describe_iam_instance_profile_associations(self): + association_ids = self._get_multi_param("AssociationId") + filters = self._get_object_map("Filter") + max_items = self._get_param("MaxItems") + next_token = self._get_param("NextToken") + ( + iam_associations, + next_token, + ) = self.ec2_backend.describe_iam_instance_profile_associations( + association_ids, filters, max_items, next_token + ) + template = self.response_template(DESCRIBE_IAM_INSTANCE_PROFILE_RESPONSE) + return template.render(iam_associations=iam_associations, next_token=next_token) + + def disassociate_iam_instance_profile(self): + association_id = self._get_param("AssociationId") + iam_association = self.ec2_backend.disassociate_iam_instance_profile( + association_id + ) + template = self.response_template(IAM_INSTANCE_PROFILE_RESPONSE) + return template.render(iam_association=iam_association, state="disassociating") + + def replace_iam_instance_profile_association(self): + association_id = self._get_param("AssociationId") + iam_instance_profile_name = self._get_param("IamInstanceProfile.Name") + iam_instance_profile_arn = self._get_param("IamInstanceProfile.Arn") + iam_association = self.ec2_backend.replace_iam_instance_profile_association( + association_id, iam_instance_profile_name, iam_instance_profile_arn + ) + template = self.response_template(IAM_INSTANCE_PROFILE_RESPONSE) + return template.render(iam_association=iam_association, state="associating") + + +# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateIamInstanceProfile.html +IAM_INSTANCE_PROFILE_RESPONSE = """ + + e10deeaf-7cda-48e7-950b-example + + {{ iam_association.id }} + {% if iam_association.iam_instance_profile %} + + {{ iam_association.iam_instance_profile.arn }} + {{ iam_association.iam_instance_profile.id }} + + {% endif %} + {{ iam_association.instance.id }} + {{ state }} + + +""" + + +# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeIamInstanceProfileAssociations.html +# Note: this API description page contains an error! Provided `iamInstanceProfileAssociations` doesn't work, you +# should use `iamInstanceProfileAssociationSet` instead. +DESCRIBE_IAM_INSTANCE_PROFILE_RESPONSE = """ + + 84c2d2a6-12dc-491f-a9ee-example + {% if next_token %}{{ next_token }}{% endif %} + + {% for iam_association in iam_associations %} + + {{ iam_association.id }} + + {{ iam_association.iam_instance_profile.arn }} + {{ iam_association.iam_instance_profile.id }} + + {{ iam_association.instance.id }} + {{ iam_association.state }} + + {% endfor %} + + +""" diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index b9e572d29..eb395aa8f 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -1,13 +1,20 @@ from __future__ import unicode_literals -from boto.ec2.instancetype import InstanceType +from moto.packages.boto.ec2.instancetype import InstanceType from moto.autoscaling import autoscaling_backends from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores -from moto.ec2.utils import filters_from_querystring, dict_from_querystring +from moto.ec2.exceptions import MissingParameterError +from moto.ec2.utils import ( + filters_from_querystring, + dict_from_querystring, +) from moto.elbv2 import elbv2_backends from moto.core import ACCOUNT_ID +from copy import deepcopy +import six + class InstanceResponse(BaseResponse): def describe_instances(self): @@ -44,40 +51,31 @@ class InstanceResponse(BaseResponse): owner_id = self._get_param("OwnerId") user_data = self._get_param("UserData") security_group_names = self._get_multi_param("SecurityGroup") - security_group_ids = self._get_multi_param("SecurityGroupId") - nics = dict_from_querystring("NetworkInterface", self.querystring) - instance_type = self._get_param("InstanceType", if_none="m1.small") - placement = self._get_param("Placement.AvailabilityZone") - subnet_id = self._get_param("SubnetId") - private_ip = self._get_param("PrivateIpAddress") - associate_public_ip = self._get_param("AssociatePublicIpAddress") - key_name = self._get_param("KeyName") - ebs_optimized = self._get_param("EbsOptimized") - instance_initiated_shutdown_behavior = self._get_param( - "InstanceInitiatedShutdownBehavior" - ) - tags = self._parse_tag_specification("TagSpecification") - region_name = self.region + kwargs = { + "instance_type": self._get_param("InstanceType", if_none="m1.small"), + "placement": self._get_param("Placement.AvailabilityZone"), + "region_name": self.region, + "subnet_id": self._get_param("SubnetId"), + "owner_id": owner_id, + "key_name": self._get_param("KeyName"), + "security_group_ids": self._get_multi_param("SecurityGroupId"), + "nics": dict_from_querystring("NetworkInterface", self.querystring), + "private_ip": self._get_param("PrivateIpAddress"), + "associate_public_ip": self._get_param("AssociatePublicIpAddress"), + "tags": self._parse_tag_specification("TagSpecification"), + "ebs_optimized": self._get_param("EbsOptimized") or False, + "instance_initiated_shutdown_behavior": self._get_param( + "InstanceInitiatedShutdownBehavior" + ), + } + + mappings = self._parse_block_device_mapping() + if mappings: + kwargs["block_device_mappings"] = mappings if self.is_not_dryrun("RunInstance"): new_reservation = self.ec2_backend.add_instances( - image_id, - min_count, - user_data, - security_group_names, - instance_type=instance_type, - placement=placement, - region_name=region_name, - subnet_id=subnet_id, - owner_id=owner_id, - key_name=key_name, - security_group_ids=security_group_ids, - nics=nics, - private_ip=private_ip, - associate_public_ip=associate_public_ip, - tags=tags, - ebs_optimized=ebs_optimized, - instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, + image_id, min_count, user_data, security_group_names, **kwargs ) template = self.response_template(EC2_RUN_INSTANCES) @@ -113,16 +111,34 @@ class InstanceResponse(BaseResponse): template = self.response_template(EC2_START_INSTANCES) return template.render(instances=instances) + def _get_list_of_dict_params(self, param_prefix, _dct): + """ + Simplified version of _get_dict_param + Allows you to pass in a custom dict instead of using self.querystring by default + """ + params = [] + for key, value in _dct.items(): + if key.startswith(param_prefix): + params.append(value) + return params + def describe_instance_status(self): instance_ids = self._get_multi_param("InstanceId") include_all_instances = self._get_param("IncludeAllInstances") == "true" + filters = self._get_list_prefix("Filter") + filters = [ + {"name": f["name"], "values": self._get_list_of_dict_params("value.", f)} + for f in filters + ] if instance_ids: - instances = self.ec2_backend.get_multi_instances_by_id(instance_ids) + instances = self.ec2_backend.get_multi_instances_by_id( + instance_ids, filters + ) elif include_all_instances: - instances = self.ec2_backend.all_instances() + instances = self.ec2_backend.all_instances(filters) else: - instances = self.ec2_backend.all_running_instances() + instances = self.ec2_backend.all_running_instances(filters) template = self.response_template(EC2_INSTANCE_STATUS) return template.render(instances=instances) @@ -150,6 +166,14 @@ class InstanceResponse(BaseResponse): return template.render(instance=instance, attribute=attribute, value=value) + def describe_instance_credit_specifications(self): + instance_ids = self._get_multi_param("InstanceId") + instance = self.ec2_backend.describe_instance_credit_specifications( + instance_ids + ) + template = self.response_template(EC2_DESCRIBE_INSTANCE_CREDIT_SPECIFICATIONS) + return template.render(instances=instance) + def modify_instance_attribute(self): handlers = [ self._dot_value_instance_attribute_handler, @@ -246,6 +270,68 @@ class InstanceResponse(BaseResponse): ) return EC2_MODIFY_INSTANCE_ATTRIBUTE + def _parse_block_device_mapping(self): + device_mappings = self._get_list_prefix("BlockDeviceMapping") + mappings = [] + for device_mapping in device_mappings: + self._validate_block_device_mapping(device_mapping) + device_template = deepcopy(BLOCK_DEVICE_MAPPING_TEMPLATE) + device_template["VirtualName"] = device_mapping.get("virtual_name") + device_template["DeviceName"] = device_mapping.get("device_name") + device_template["Ebs"]["SnapshotId"] = device_mapping.get( + "ebs._snapshot_id" + ) + device_template["Ebs"]["VolumeSize"] = device_mapping.get( + "ebs._volume_size" + ) + device_template["Ebs"]["DeleteOnTermination"] = self._convert_to_bool( + device_mapping.get("ebs._delete_on_termination", False) + ) + device_template["Ebs"]["VolumeType"] = device_mapping.get( + "ebs._volume_type" + ) + device_template["Ebs"]["Iops"] = device_mapping.get("ebs._iops") + device_template["Ebs"]["Encrypted"] = self._convert_to_bool( + device_mapping.get("ebs._encrypted", False) + ) + mappings.append(device_template) + + return mappings + + @staticmethod + def _validate_block_device_mapping(device_mapping): + + if not any(mapping for mapping in device_mapping if mapping.startswith("ebs.")): + raise MissingParameterError("ebs") + if ( + "ebs._volume_size" not in device_mapping + and "ebs._snapshot_id" not in device_mapping + ): + raise MissingParameterError("size or snapshotId") + + @staticmethod + def _convert_to_bool(bool_str): + if isinstance(bool_str, bool): + return bool_str + + if isinstance(bool_str, six.text_type): + return str(bool_str).lower() == "true" + + return False + + +BLOCK_DEVICE_MAPPING_TEMPLATE = { + "VirtualName": None, + "DeviceName": None, + "Ebs": { + "SnapshotId": None, + "VolumeSize": None, + "DeleteOnTermination": None, + "VolumeType": None, + "Iops": None, + "Encrypted": None, + }, +} EC2_RUN_INSTANCES = ( """ @@ -653,6 +739,18 @@ EC2_DESCRIBE_INSTANCE_ATTRIBUTE = """ + 1b234b5c-d6ef-7gh8-90i1-j2345678901 + + {% for instance in instances %} + + {{ instance.id }} + standard + + {% endfor %} + +""" + EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE {{ instance.id }} @@ -720,13 +818,25 @@ EC2_DESCRIBE_INSTANCE_TYPES = """ {% for instance_type in instance_types %} - {{ instance_type.name }} - {{ instance_type.cores }} - {{ instance_type.memory }} - {{ instance_type.disk }} - {{ instance_type.storageCount }} - {{ instance_type.maxIpAddresses }} - {{ instance_type.ebsOptimizedAvailable }} + {{ instance_type.name }} + + {{ instance_type.cores }} + {{ instance_type.cores }} + 1 + + + {{ instance_type.memory }} + + + {{ instance_type.disk }} + + + + + x86_64 + + + {% endfor %} diff --git a/moto/ec2/responses/internet_gateways.py b/moto/ec2/responses/internet_gateways.py index d232b3b05..cec29849d 100644 --- a/moto/ec2/responses/internet_gateways.py +++ b/moto/ec2/responses/internet_gateways.py @@ -14,7 +14,10 @@ class InternetGateways(BaseResponse): def create_internet_gateway(self): if self.is_not_dryrun("CreateInternetGateway"): - igw = self.ec2_backend.create_internet_gateway() + tags = self._get_multi_param("TagSpecification") + if tags: + tags = tags[0].get("Tag") + igw = self.ec2_backend.create_internet_gateway(tags=tags) template = self.response_template(CREATE_INTERNET_GATEWAY_RESPONSE) return template.render(internet_gateway=igw) diff --git a/moto/ec2/responses/network_acls.py b/moto/ec2/responses/network_acls.py index 8d89e6065..4b1c4c2c5 100644 --- a/moto/ec2/responses/network_acls.py +++ b/moto/ec2/responses/network_acls.py @@ -6,7 +6,10 @@ from moto.ec2.utils import filters_from_querystring class NetworkACLs(BaseResponse): def create_network_acl(self): vpc_id = self._get_param("VpcId") - network_acl = self.ec2_backend.create_network_acl(vpc_id) + tags = self._get_multi_param("TagSpecification") + if tags: + tags = tags[0].get("Tag") + network_acl = self.ec2_backend.create_network_acl(vpc_id, tags=tags) template = self.response_template(CREATE_NETWORK_ACL_RESPONSE) return template.render(network_acl=network_acl) @@ -83,7 +86,7 @@ class NetworkACLs(BaseResponse): def describe_network_acls(self): network_acl_ids = self._get_multi_param("NetworkAclId") filters = filters_from_querystring(self.querystring) - network_acls = self.ec2_backend.get_all_network_acls(network_acl_ids, filters) + network_acls = self.ec2_backend.describe_network_acls(network_acl_ids, filters) template = self.response_template(DESCRIBE_NETWORK_ACL_RESPONSE) return template.render(network_acls=network_acls) @@ -161,7 +164,7 @@ DESCRIBE_NETWORK_ACL_RESPONSE = """ {{ tag.resource_id }} {{ tag.resource_type }} - {{ tag.key }} + {{ tag.key}} {{ tag.value }} {% endfor %} diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py index b5d65f831..c929ffb9e 100644 --- a/moto/ec2/responses/route_tables.py +++ b/moto/ec2/responses/route_tables.py @@ -16,6 +16,7 @@ class RouteTables(BaseResponse): def create_route(self): route_table_id = self._get_param("RouteTableId") destination_cidr_block = self._get_param("DestinationCidrBlock") + destination_ipv6_cidr_block = self._get_param("DestinationIpv6CidrBlock") gateway_id = self._get_param("GatewayId") instance_id = self._get_param("InstanceId") nat_gateway_id = self._get_param("NatGatewayId") @@ -25,6 +26,7 @@ class RouteTables(BaseResponse): self.ec2_backend.create_route( route_table_id, destination_cidr_block, + destination_ipv6_cidr_block, gateway_id=gateway_id, instance_id=instance_id, nat_gateway_id=nat_gateway_id, @@ -37,7 +39,10 @@ class RouteTables(BaseResponse): def create_route_table(self): vpc_id = self._get_param("VpcId") - route_table = self.ec2_backend.create_route_table(vpc_id) + tags = self._get_multi_param("TagSpecification") + if tags: + tags = tags[0].get("Tag") + route_table = self.ec2_backend.create_route_table(vpc_id, tags) template = self.response_template(CREATE_ROUTE_TABLE_RESPONSE) return template.render(route_table=route_table) diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index f0002d5bd..5c0d1c852 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -20,7 +20,11 @@ def parse_sg_attributes_from_dict(sg_attributes): ip_ranges = [] ip_ranges_tree = sg_attributes.get("IpRanges") or {} for ip_range_idx in sorted(ip_ranges_tree.keys()): - ip_ranges.append(ip_ranges_tree[ip_range_idx]["CidrIp"][0]) + ip_range = {"CidrIp": ip_ranges_tree[ip_range_idx]["CidrIp"][0]} + if ip_ranges_tree[ip_range_idx].get("Description"): + ip_range["Description"] = ip_ranges_tree[ip_range_idx].get("Description")[0] + + ip_ranges.append(ip_range) source_groups = [] source_group_ids = [] @@ -61,6 +65,7 @@ class SecurityGroups(BaseResponse): source_groups, source_group_ids, ) = parse_sg_attributes_from_dict(querytree) + yield ( group_name_or_id, ip_protocol, @@ -211,7 +216,10 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = ( {% for ip_range in rule.ip_ranges %} - {{ ip_range }} + {{ ip_range['CidrIp'] }} + {% if ip_range['Description'] %} + {{ ip_range['Description'] }} + {% endif %} {% endfor %} @@ -242,7 +250,10 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = ( {% for ip_range in rule.ip_ranges %} - {{ ip_range }} + {{ ip_range['CidrIp'] }} + {% if ip_range['Description'] %} + {{ ip_range['Description'] }} + {% endif %} {% endfor %} diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index e11984e52..1cfd36993 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -9,12 +9,23 @@ class Subnets(BaseResponse): def create_subnet(self): vpc_id = self._get_param("VpcId") cidr_block = self._get_param("CidrBlock") - availability_zone = self._get_param( - "AvailabilityZone", - if_none=random.choice(self.ec2_backend.describe_availability_zones()).name, - ) + availability_zone = self._get_param("AvailabilityZone") + availability_zone_id = self._get_param("AvailabilityZoneId") + tags = self._get_multi_param("TagSpecification") + if tags: + tags = tags[0].get("Tag") + + if not availability_zone and not availability_zone_id: + availability_zone = random.choice( + self.ec2_backend.describe_availability_zones() + ).name subnet = self.ec2_backend.create_subnet( - vpc_id, cidr_block, availability_zone, context=self + vpc_id, + cidr_block, + availability_zone, + availability_zone_id, + context=self, + tags=tags, ) template = self.response_template(CREATE_SUBNET_RESPONSE) return template.render(subnet=subnet) @@ -62,6 +73,16 @@ CREATE_SUBNET_RESPONSE = """ {{ subnet.assign_ipv6_address_on_creation }} {{ subnet.ipv6_cidr_block_associations }} arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }} + + {% for tag in subnet.get_tags() %} + + {{ tag.resource_id }} + {{ tag.resource_type }} + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + """ @@ -78,7 +99,7 @@ DESCRIBE_SUBNETS_RESPONSE = """ {% for subnet in subnets %} {{ subnet.id }} - available + {{ subnet.state }} {{ subnet.vpc_id }} {{ subnet.cidr_block }} {{ subnet.available_ip_addresses }} diff --git a/moto/ec2/responses/tags.py b/moto/ec2/responses/tags.py index 5290b7409..0c7d89a41 100644 --- a/moto/ec2/responses/tags.py +++ b/moto/ec2/responses/tags.py @@ -2,7 +2,8 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse from moto.ec2.models import validate_resource_ids -from moto.ec2.utils import tags_from_query_string, filters_from_querystring +from moto.ec2.utils import filters_from_querystring +from moto.core.utils import tags_from_query_string class TagResponse(BaseResponse): diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 3bf86af8a..84dbf2bf5 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -86,6 +86,7 @@ DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = ( 777788889999 {{ vpc_pcx.vpc.id }} {{ vpc_pcx.vpc.cidr_block }} + {{ vpc_pcx.vpc.ec2_backend.region_name }} """ @@ -98,6 +99,7 @@ DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = ( true false + {{ vpc_pcx.peer_vpc.ec2_backend.region_name }} {{ vpc_pcx._status.code }} @@ -128,6 +130,7 @@ ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = ( 777788889999 {{ vpc_pcx.vpc.id }} {{ vpc_pcx.vpc.cidr_block }} + {{ vpc_pcx.vpc.ec2_backend.region_name }} """ @@ -140,6 +143,7 @@ ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = ( false false + {{ vpc_pcx.peer_vpc.ec2_backend.region_name }} {{ vpc_pcx._status.code }} diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 0fd198378..de4bb3feb 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -14,14 +14,19 @@ class VPCs(BaseResponse): def create_vpc(self): cidr_block = self._get_param("CidrBlock") + tags = self._get_multi_param("TagSpecification") instance_tenancy = self._get_param("InstanceTenancy", if_none="default") amazon_provided_ipv6_cidr_blocks = self._get_param( "AmazonProvidedIpv6CidrBlock" ) + if tags: + tags = tags[0].get("Tag") + vpc = self.ec2_backend.create_vpc( cidr_block, instance_tenancy, amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_blocks, + tags=tags, ) doc_date = self._get_doc_date() template = self.response_template(CREATE_VPC_RESPONSE) @@ -163,6 +168,39 @@ class VPCs(BaseResponse): cidr_block_state="disassociating", ) + def create_vpc_endpoint(self): + vpc_id = self._get_param("VpcId") + service_name = self._get_param("ServiceName") + route_table_ids = self._get_multi_param("RouteTableId") + subnet_ids = self._get_multi_param("SubnetId") + type = self._get_param("VpcEndpointType") + policy_document = self._get_param("PolicyDocument") + client_token = self._get_param("ClientToken") + tag_specifications = self._get_param("TagSpecifications") + private_dns_enabled = self._get_param("PrivateDNSEnabled") + security_group = self._get_param("SecurityGroup") + + vpc_end_point = self.ec2_backend.create_vpc_endpoint( + vpc_id=vpc_id, + service_name=service_name, + type=type, + policy_document=policy_document, + route_table_ids=route_table_ids, + subnet_ids=subnet_ids, + client_token=client_token, + security_group=security_group, + tag_specifications=tag_specifications, + private_dns_enabled=private_dns_enabled, + ) + + template = self.response_template(CREATE_VPC_END_POINT) + return template.render(vpc_end_point=vpc_end_point) + + def describe_vpc_endpoint_services(self): + vpc_end_point_services = self.ec2_backend.get_vpc_end_point_services() + template = self.response_template(DESCRIBE_VPC_ENDPOINT_RESPONSE) + return template.render(vpc_end_points=vpc_end_point_services) + CREATE_VPC_RESPONSE = """ @@ -384,3 +422,72 @@ IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ """ + +CREATE_VPC_END_POINT = """ + + {{ vpc_end_point.policy_document }} + available + false + {{ vpc_end_point.service_name }} + {{ vpc_end_point.vpc_id }} + {{ vpc_end_point.id }} + + {% for routeid in vpc_end_point.route_table_ids %} + {{ routeid }} + {% endfor %} + + + {% for network_interface_id in vpc_end_point.network_interface_ids %} + {{ network_interface_id }} + {% endfor %} + + + {% for subnetId in vpc_end_point.subnet_ids %} + {{ subnetId }} + {% endfor %} + + + {% if vpc_end_point.dns_entries %} + {% for entry in vpc_end_point.dns_entries %} + + {{ entry["hosted_zone_id"] }} + {{ entry["dns_name"] }} + + {% endfor %} + {% endif %} + + {{ vpc_end_point.created_at }} + +""" + +DESCRIBE_VPC_ENDPOINT_RESPONSE = """ + 19a9ff46-7df6-49b8-9726-3df27527089d + + {% for serviceName in vpc_end_points.services %} + {{ serviceName }} + {% endfor %} + + + + {% for service in vpc_end_points.servicesDetails %} + amazon + + + {{ service.type }} + + + + {{ ".".join((service.service_name.split(".")[::-1])) }} + + false + + {% for zone in vpc_end_points.availability_zones %} + {{ zone.name }} + {% endfor %} + + {{ service.service_name }} + true + {% endfor %} + + +""" diff --git a/moto/ec2/responses/vpn_connections.py b/moto/ec2/responses/vpn_connections.py index 9ddd4d7d9..d0e2eead2 100644 --- a/moto/ec2/responses/vpn_connections.py +++ b/moto/ec2/responses/vpn_connections.py @@ -7,7 +7,7 @@ class VPNConnections(BaseResponse): def create_vpn_connection(self): type = self._get_param("Type") cgw_id = self._get_param("CustomerGatewayId") - vgw_id = self._get_param("VPNGatewayId") + vgw_id = self._get_param("VpnGatewayId") static_routes = self._get_param("StaticRoutesOnly") vpn_connection = self.ec2_backend.create_vpn_connection( type, cgw_id, vgw_id, static_routes_only=static_routes diff --git a/moto/ec2/urls.py b/moto/ec2/urls.py index 4d85b2f56..78f234320 100644 --- a/moto/ec2/urls.py +++ b/moto/ec2/urls.py @@ -2,6 +2,6 @@ from __future__ import unicode_literals from .responses import EC2Response -url_bases = ["https?://ec2\.(.+)\.amazonaws\.com(|\.cn)"] +url_bases = [r"https?://ec2\.(.+)\.amazonaws\.com(|\.cn)"] url_paths = {"{0}/": EC2Response.dispatch} diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 2301248c1..4a101f923 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -10,13 +10,14 @@ import six from cryptography.hazmat.primitives import serialization from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa -import sshpubkeys.exceptions -from sshpubkeys.keys import SSHKey +from moto.core import ACCOUNT_ID +from moto.iam import iam_backends EC2_RESOURCE_TO_PREFIX = { "customer-gateway": "cgw", "dhcp-options": "dopt", + "flow-logs": "fl", "image": "ami", "instance": "i", "internet-gateway": "igw", @@ -43,6 +44,7 @@ EC2_RESOURCE_TO_PREFIX = { "vpc-peering-connection": "pcx", "vpn-connection": "vpn", "vpn-gateway": "vgw", + "iam-instance-profile-association": "iip-assoc", } @@ -75,6 +77,10 @@ def random_security_group_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX["security-group"]) +def random_flow_log_id(): + return random_id(prefix=EC2_RESOURCE_TO_PREFIX["flow-logs"]) + + def random_snapshot_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX["snapshot"]) @@ -167,6 +173,10 @@ def random_launch_template_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX["launch-template"], size=17) +def random_iam_instance_profile_association_id(): + return random_id(prefix=EC2_RESOURCE_TO_PREFIX["iam-instance-profile-association"]) + + def random_public_ip(): return "54.214.{0}.{1}".format(random.choice(range(255)), random.choice(range(255))) @@ -183,35 +193,38 @@ def random_ip(): ) +def randor_ipv4_cidr(): + return "10.0.{}.{}/16".format(random.randint(0, 255), random.randint(0, 255)) + + def random_ipv6_cidr(): return "2400:6500:{}:{}::/56".format(random_resource_id(4), random_resource_id(4)) -def generate_route_id(route_table_id, cidr_block): +def generate_route_id(route_table_id, cidr_block, ipv6_cidr_block=None): + if ipv6_cidr_block and not cidr_block: + cidr_block = ipv6_cidr_block return "%s~%s" % (route_table_id, cidr_block) +def generate_vpc_end_point_id(vpc_id): + return "%s-%s" % ("vpce", vpc_id[4:]) + + +def create_dns_entries(service_name, vpc_endpoint_id): + dns_entries = {} + dns_entries["dns_name"] = "{}-{}.{}".format( + vpc_endpoint_id, random_resource_id(8), service_name + ) + dns_entries["hosted_zone_id"] = random_resource_id(13).upper() + return dns_entries + + def split_route_id(route_id): values = route_id.split("~") return values[0], values[1] -def tags_from_query_string(querystring_dict): - prefix = "Tag" - suffix = "Key" - response_values = {} - for key, value in querystring_dict.items(): - if key.startswith(prefix) and key.endswith(suffix): - tag_index = key.replace(prefix + ".", "").replace("." + suffix, "") - tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0] - tag_value_key = "Tag.{0}.Value".format(tag_index) - if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[0] - else: - response_values[tag_key] = None - return response_values - - def dhcp_configuration_from_querystring(querystring, option="DhcpConfiguration"): """ turn: @@ -252,7 +265,8 @@ def dhcp_configuration_from_querystring(querystring, option="DhcpConfiguration") def filters_from_querystring(querystring_dict): response_values = {} - for key, value in querystring_dict.items(): + last_tag_key = None + for key, value in sorted(querystring_dict.items()): match = re.search(r"Filter.(\d).Name", key) if match: filter_index = match.groups()[0] @@ -262,6 +276,10 @@ def filters_from_querystring(querystring_dict): for filter_key, filter_value in querystring_dict.items() if filter_key.startswith(value_prefix) ] + if value[0] == "tag-key": + last_tag_key = "tag:" + filter_values[0] + elif last_tag_key and value[0] == "tag-value": + response_values[last_tag_key] = filter_values response_values[value[0]] = filter_values return response_values @@ -285,7 +303,9 @@ def get_object_value(obj, attr): keys = attr.split(".") val = obj for key in keys: - if hasattr(val, key): + if key == "owner_id": + return ACCOUNT_ID + elif hasattr(val, key): val = getattr(val, key) elif isinstance(val, dict): val = val[key] @@ -329,6 +349,8 @@ def tag_filter_matches(obj, filter_name, filter_values): tag_values = get_obj_tag_names(obj) elif filter_name == "tag-value": tag_values = get_obj_tag_values(obj) + elif filter_name.startswith("tag:"): + tag_values = get_obj_tag_values(obj) else: tag_values = [get_obj_tag(obj, filter_name) or ""] @@ -356,6 +378,7 @@ filter_dict_attribute_mapping = { "image-id": "image_id", "network-interface.private-dns-name": "private_dns", "private-dns-name": "private_dns", + "owner-id": "owner_id", } @@ -553,6 +576,10 @@ def generate_instance_identity_document(instance): def rsa_public_key_parse(key_material): + # These imports take ~.5s; let's keep them local + import sshpubkeys.exceptions + from sshpubkeys.keys import SSHKey + try: if not isinstance(key_material, six.binary_type): key_material = key_material.encode("ascii") @@ -576,3 +603,47 @@ def rsa_public_key_fingerprint(rsa_public_key): fingerprint_hex = hashlib.md5(key_data).hexdigest() fingerprint = re.sub(r"([a-f0-9]{2})(?!$)", r"\1:", fingerprint_hex) return fingerprint + + +def filter_iam_instance_profile_associations(iam_instance_associations, filter_dict): + if not filter_dict: + return iam_instance_associations + result = [] + for iam_instance_association in iam_instance_associations: + filter_passed = True + if filter_dict.get("instance-id"): + if ( + iam_instance_association.instance.id + not in filter_dict.get("instance-id").values() + ): + filter_passed = False + if filter_dict.get("state"): + if iam_instance_association.state not in filter_dict.get("state").values(): + filter_passed = False + if filter_passed: + result.append(iam_instance_association) + return result + + +def filter_iam_instance_profiles(iam_instance_profile_arn, iam_instance_profile_name): + instance_profile = None + instance_profile_by_name = None + instance_profile_by_arn = None + if iam_instance_profile_name: + instance_profile_by_name = iam_backends["global"].get_instance_profile( + iam_instance_profile_name + ) + instance_profile = instance_profile_by_name + if iam_instance_profile_arn: + instance_profile_by_arn = iam_backends["global"].get_instance_profile_by_arn( + iam_instance_profile_arn + ) + instance_profile = instance_profile_by_arn + # We would prefer instance profile that we found by arn + if iam_instance_profile_arn and iam_instance_profile_name: + if instance_profile_by_name == instance_profile_by_arn: + instance_profile = instance_profile_by_arn + else: + instance_profile = None + + return instance_profile diff --git a/moto/ec2_instance_connect/__init__.py b/moto/ec2_instance_connect/__init__.py deleted file mode 100644 index c20d59cfa..000000000 --- a/moto/ec2_instance_connect/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from ..core.models import base_decorator -from .models import ec2_instance_connect_backends - -mock_ec2_instance_connect = base_decorator(ec2_instance_connect_backends) diff --git a/moto/ec2_instance_connect/models.py b/moto/ec2_instance_connect/models.py deleted file mode 100644 index cc8cc3f33..000000000 --- a/moto/ec2_instance_connect/models.py +++ /dev/null @@ -1,11 +0,0 @@ -import boto -from moto.core import BaseBackend - - -class Ec2InstanceConnectBackend(BaseBackend): - pass - - -ec2_instance_connect_backends = {} -for region in boto.ec2.regions(): - ec2_instance_connect_backends[region.name] = Ec2InstanceConnectBackend() diff --git a/moto/ec2_instance_connect/responses.py b/moto/ec2_instance_connect/responses.py deleted file mode 100644 index 462f1fddc..000000000 --- a/moto/ec2_instance_connect/responses.py +++ /dev/null @@ -1,9 +0,0 @@ -import json -from moto.core.responses import BaseResponse - - -class Ec2InstanceConnectResponse(BaseResponse): - def send_ssh_public_key(self): - return json.dumps( - {"RequestId": "example-2a47-4c91-9700-e37e85162cb6", "Success": True} - ) diff --git a/moto/ec2instanceconnect/__init__.py b/moto/ec2instanceconnect/__init__.py new file mode 100644 index 000000000..c53958f7e --- /dev/null +++ b/moto/ec2instanceconnect/__init__.py @@ -0,0 +1,4 @@ +from ..core.models import base_decorator +from .models import ec2instanceconnect_backends + +mock_ec2instanceconnect = base_decorator(ec2instanceconnect_backends) diff --git a/moto/ec2instanceconnect/models.py b/moto/ec2instanceconnect/models.py new file mode 100644 index 000000000..19c4717ec --- /dev/null +++ b/moto/ec2instanceconnect/models.py @@ -0,0 +1,19 @@ +from boto3 import Session +import json +from moto.core import BaseBackend + + +class Ec2InstanceConnectBackend(BaseBackend): + def send_ssh_public_key(self): + return json.dumps( + {"RequestId": "example-2a47-4c91-9700-e37e85162cb6", "Success": True} + ) + + +ec2instanceconnect_backends = {} +for region in Session().get_available_regions("ec2"): + ec2instanceconnect_backends[region] = Ec2InstanceConnectBackend() +for region in Session().get_available_regions("ec2", partition_name="aws-us-gov"): + ec2instanceconnect_backends[region] = Ec2InstanceConnectBackend() +for region in Session().get_available_regions("ec2", partition_name="aws-cn"): + ec2instanceconnect_backends[region] = Ec2InstanceConnectBackend() diff --git a/moto/ec2instanceconnect/responses.py b/moto/ec2instanceconnect/responses.py new file mode 100644 index 000000000..9fce11aa2 --- /dev/null +++ b/moto/ec2instanceconnect/responses.py @@ -0,0 +1,11 @@ +from moto.core.responses import BaseResponse +from .models import ec2instanceconnect_backends + + +class Ec2InstanceConnectResponse(BaseResponse): + @property + def ec2instanceconnect_backend(self): + return ec2instanceconnect_backends[self.region] + + def send_ssh_public_key(self): + return self.ec2instanceconnect_backend.send_ssh_public_key() diff --git a/moto/ec2_instance_connect/urls.py b/moto/ec2instanceconnect/urls.py similarity index 68% rename from moto/ec2_instance_connect/urls.py rename to moto/ec2instanceconnect/urls.py index e7078264f..d7b6b7ce4 100644 --- a/moto/ec2_instance_connect/urls.py +++ b/moto/ec2instanceconnect/urls.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .responses import Ec2InstanceConnectResponse -url_bases = ["https?://ec2-instance-connect\.(.+)\.amazonaws\.com"] +url_bases = [r"https?://ec2-instance-connect\.(.+)\.amazonaws\.com"] url_paths = {"{0}/$": Ec2InstanceConnectResponse.dispatch} diff --git a/moto/ecr/exceptions.py b/moto/ecr/exceptions.py index 9b55f0589..6d1713a6a 100644 --- a/moto/ecr/exceptions.py +++ b/moto/ecr/exceptions.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from moto.core.exceptions import RESTError +from moto.core.exceptions import RESTError, JsonRESTError class RepositoryNotFoundException(RESTError): @@ -13,7 +13,7 @@ class RepositoryNotFoundException(RESTError): ) -class ImageNotFoundException(RESTError): +class ImageNotFoundException(JsonRESTError): code = 400 def __init__(self, image_id, repository_name, registry_id): diff --git a/moto/ecr/models.py b/moto/ecr/models.py index f84df79aa..299ed48a7 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -7,7 +7,7 @@ from random import random from botocore.exceptions import ParamValidationError -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.ec2 import ec2_backends from moto.ecr.exceptions import ImageNotFoundException, RepositoryNotFoundException @@ -38,7 +38,7 @@ class BaseObject(BaseModel): return self.gen_response_object() -class Repository(BaseObject): +class Repository(BaseObject, CloudFormationModel): def __init__(self, repository_name): self.registry_id = DEFAULT_REGISTRY_ID self.arn = "arn:aws:ecr:us-east-1:{0}:repository/{1}".format( @@ -67,19 +67,24 @@ class Repository(BaseObject): del response_object["arn"], response_object["name"], response_object["images"] return response_object + @staticmethod + def cloudformation_name_type(): + return "RepositoryName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html + return "AWS::ECR::Repository" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - ecr_backend = ecr_backends[region_name] return ecr_backend.create_repository( # RepositoryName is optional in CloudFormation, thus create a random # name if necessary - repository_name=properties.get( - "RepositoryName", "ecrrepository{0}".format(int(random() * 10 ** 6)) - ) + repository_name=resource_name ) @classmethod @@ -159,7 +164,7 @@ class Image(BaseObject): def response_list_object(self): response_object = self.gen_response_object() response_object["imageTag"] = self.image_tag - response_object["imageDigest"] = "i don't know" + response_object["imageDigest"] = self.get_image_digest() return { k: v for k, v in response_object.items() if v is not None and v != [None] } @@ -403,7 +408,7 @@ class ECRBackend(BaseBackend): # If we have a digest, is it valid? if "imageDigest" in image_id: - pattern = re.compile("^[0-9a-zA-Z_+\.-]+:[0-9a-fA-F]{64}") + pattern = re.compile(r"^[0-9a-zA-Z_+\.-]+:[0-9a-fA-F]{64}") if not pattern.match(image_id.get("imageDigest")): response["failures"].append( { diff --git a/moto/ecs/exceptions.py b/moto/ecs/exceptions.py index d08066192..72129224e 100644 --- a/moto/ecs/exceptions.py +++ b/moto/ecs/exceptions.py @@ -21,3 +21,22 @@ class TaskDefinitionNotFoundException(JsonRESTError): error_type="ClientException", message="The specified task definition does not exist.", ) + + +class TaskSetNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(TaskSetNotFoundException, self).__init__( + error_type="ClientException", + message="The specified task set does not exist.", + ) + + +class ClusterNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(ClusterNotFoundException, self).__init__( + error_type="ClientException", message="Cluster not found", + ) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 30e4687c4..a4522660e 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -1,19 +1,24 @@ from __future__ import unicode_literals + import re import uuid +from copy import copy from datetime import datetime from random import random, randint import pytz from boto3 import Session +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.exceptions import JsonRESTError -from moto.core import BaseBackend, BaseModel -from moto.core.utils import unix_time +from moto.core.utils import unix_time, pascal_to_camelcase, remap_nested_keys from moto.ec2 import ec2_backends -from copy import copy - -from .exceptions import ServiceNotFoundException, TaskDefinitionNotFoundException +from .exceptions import ( + ServiceNotFoundException, + TaskDefinitionNotFoundException, + TaskSetNotFoundException, + ClusterNotFoundException, +) class BaseObject(BaseModel): @@ -39,7 +44,7 @@ class BaseObject(BaseModel): return self.gen_response_object() -class Cluster(BaseObject): +class Cluster(BaseObject, CloudFormationModel): def __init__(self, cluster_name, region_name): self.active_services_count = 0 self.arn = "arn:aws:ecs:{0}:012345678910:cluster/{1}".format( @@ -64,40 +69,37 @@ class Cluster(BaseObject): del response_object["arn"], response_object["name"] return response_object + @staticmethod + def cloudformation_name_type(): + return "ClusterName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html + return "AWS::ECS::Cluster" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - # if properties is not provided, cloudformation will use the default values for all properties - if "Properties" in cloudformation_json: - properties = cloudformation_json["Properties"] - else: - properties = {} - ecs_backend = ecs_backends[region_name] return ecs_backend.create_cluster( # ClusterName is optional in CloudFormation, thus create a random # name if necessary - cluster_name=properties.get( - "ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6)) - ) + cluster_name=resource_name ) @classmethod def update_from_cloudformation_json( cls, original_resource, new_resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - - if original_resource.name != properties["ClusterName"]: + if original_resource.name != new_resource_name: ecs_backend = ecs_backends[region_name] ecs_backend.delete_cluster(original_resource.arn) return ecs_backend.create_cluster( # ClusterName is optional in CloudFormation, thus create a # random name if necessary - cluster_name=properties.get( - "ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6)) - ) + cluster_name=new_resource_name ) else: # no-op when nothing changed between old and new resources @@ -111,7 +113,7 @@ class Cluster(BaseObject): raise UnformattedGetAttTemplateException() -class TaskDefinition(BaseObject): +class TaskDefinition(BaseObject, CloudFormationModel): def __init__( self, family, @@ -121,6 +123,7 @@ class TaskDefinition(BaseObject): network_mode=None, volumes=None, tags=None, + placement_constraints=None, ): self.family = family self.revision = revision @@ -137,6 +140,9 @@ class TaskDefinition(BaseObject): self.network_mode = "bridge" else: self.network_mode = network_mode + self.placement_constraints = ( + placement_constraints if placement_constraints is not None else [] + ) @property def response_object(self): @@ -150,6 +156,15 @@ class TaskDefinition(BaseObject): def physical_resource_id(self): return self.arn + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-taskdefinition.html + return "AWS::ECS::TaskDefinition" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -159,8 +174,10 @@ class TaskDefinition(BaseObject): family = properties.get( "Family", "task-definition-{0}".format(int(random() * 10 ** 6)) ) - container_definitions = properties["ContainerDefinitions"] - volumes = properties.get("Volumes") + container_definitions = remap_nested_keys( + properties.get("ContainerDefinitions", []), pascal_to_camelcase + ) + volumes = remap_nested_keys(properties.get("Volumes", []), pascal_to_camelcase) ecs_backend = ecs_backends[region_name] return ecs_backend.register_task_definition( @@ -172,7 +189,6 @@ class TaskDefinition(BaseObject): cls, original_resource, new_resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - family = properties.get( "Family", "task-definition-{0}".format(int(random() * 10 ** 6)) ) @@ -227,16 +243,17 @@ class Task(BaseObject): return response_object -class Service(BaseObject): +class Service(BaseObject, CloudFormationModel): def __init__( self, cluster, service_name, - task_definition, desired_count, + task_definition=None, load_balancers=None, scheduling_strategy=None, tags=None, + deployment_controller=None, ): self.cluster_arn = cluster.arn self.arn = "arn:aws:ecs:{0}:012345678910:service/{1}".format( @@ -245,21 +262,29 @@ class Service(BaseObject): self.name = service_name self.status = "ACTIVE" self.running_count = 0 - self.task_definition = task_definition.arn + if task_definition: + self.task_definition = task_definition.arn + else: + self.task_definition = None self.desired_count = desired_count + self.task_sets = [] + self.deployment_controller = deployment_controller or {"type": "ECS"} self.events = [] - self.deployments = [ - { - "createdAt": datetime.now(pytz.utc), - "desiredCount": self.desired_count, - "id": "ecs-svc/{}".format(randint(0, 32 ** 12)), - "pendingCount": self.desired_count, - "runningCount": 0, - "status": "PRIMARY", - "taskDefinition": task_definition.arn, - "updatedAt": datetime.now(pytz.utc), - } - ] + if self.deployment_controller["type"] == "ECS": + self.deployments = [ + { + "createdAt": datetime.now(pytz.utc), + "desiredCount": self.desired_count, + "id": "ecs-svc/{}".format(randint(0, 32 ** 12)), + "pendingCount": self.desired_count, + "runningCount": 0, + "status": "PRIMARY", + "taskDefinition": self.task_definition, + "updatedAt": datetime.now(pytz.utc), + } + ] + else: + self.deployments = [] self.load_balancers = load_balancers if load_balancers is not None else [] self.scheduling_strategy = ( scheduling_strategy if scheduling_strategy is not None else "REPLICA" @@ -278,6 +303,13 @@ class Service(BaseObject): response_object["serviceName"] = self.name response_object["serviceArn"] = self.arn response_object["schedulingStrategy"] = self.scheduling_strategy + if response_object["deploymentController"]["type"] == "ECS": + del response_object["deploymentController"] + del response_object["taskSets"] + else: + response_object["taskSets"] = [ + t.response_object for t in response_object["taskSets"] + ] for deployment in response_object["deployments"]: if isinstance(deployment["createdAt"], datetime): @@ -291,6 +323,15 @@ class Service(BaseObject): return response_object + @staticmethod + def cloudformation_name_type(): + return "ServiceName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-service.html + return "AWS::ECS::Service" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -304,14 +345,13 @@ class Service(BaseObject): task_definition = properties["TaskDefinition"].family else: task_definition = properties["TaskDefinition"] - service_name = "{0}Service{1}".format(cluster, int(random() * 10 ** 6)) desired_count = properties["DesiredCount"] # TODO: LoadBalancers # TODO: Role ecs_backend = ecs_backends[region_name] return ecs_backend.create_service( - cluster, service_name, task_definition, desired_count + cluster, resource_name, desired_count, task_definition_str=task_definition ) @classmethod @@ -335,11 +375,11 @@ class Service(BaseObject): # TODO: LoadBalancers # TODO: Role ecs_backend.delete_service(cluster_name, service_name) - new_service_name = "{0}Service{1}".format( - cluster_name, int(random() * 10 ** 6) - ) return ecs_backend.create_service( - cluster_name, new_service_name, task_definition, desired_count + cluster_name, + new_resource_name, + desired_count, + task_definition_str=task_definition, ) else: return ecs_backend.update_service( @@ -443,6 +483,7 @@ class ContainerInstance(BaseObject): if ec2_instance.platform == "windows" else "linux", # options are windows and linux, linux is default } + self.registered_at = datetime.now(pytz.utc) @property def response_object(self): @@ -451,6 +492,10 @@ class ContainerInstance(BaseObject): self._format_attribute(name, value) for name, value in response_object["attributes"].items() ] + if isinstance(response_object["registeredAt"], datetime): + response_object["registeredAt"] = unix_time( + response_object["registeredAt"].replace(tzinfo=None) + ) return response_object def _format_attribute(self, name, value): @@ -490,6 +535,73 @@ class ContainerInstanceFailure(BaseObject): return response_object +class TaskSet(BaseObject): + def __init__( + self, + service, + cluster, + task_definition, + region_name, + external_id=None, + network_configuration=None, + load_balancers=None, + service_registries=None, + launch_type=None, + capacity_provider_strategy=None, + platform_version=None, + scale=None, + client_token=None, + tags=None, + ): + self.service = service + self.cluster = cluster + self.status = "ACTIVE" + self.task_definition = task_definition or "" + self.region_name = region_name + self.external_id = external_id or "" + self.network_configuration = network_configuration or {} + self.load_balancers = load_balancers or [] + self.service_registries = service_registries or [] + self.launch_type = launch_type + self.capacity_provider_strategy = capacity_provider_strategy or [] + self.platform_version = platform_version or "" + self.scale = scale or {"value": 100.0, "unit": "PERCENT"} + self.client_token = client_token or "" + self.tags = tags or [] + self.stabilityStatus = "STEADY_STATE" + self.createdAt = datetime.now(pytz.utc) + self.updatedAt = datetime.now(pytz.utc) + self.stabilityStatusAt = datetime.now(pytz.utc) + self.id = "ecs-svc/{}".format(randint(0, 32 ** 12)) + self.service_arn = "" + self.cluster_arn = "" + + cluster_name = self.cluster.split("/")[-1] + service_name = self.service.split("/")[-1] + self.task_set_arn = "arn:aws:ecs:{0}:012345678910:task-set/{1}/{2}/{3}".format( + region_name, cluster_name, service_name, self.id + ) + + @property + def response_object(self): + response_object = self.gen_response_object() + if isinstance(response_object["createdAt"], datetime): + response_object["createdAt"] = unix_time( + self.createdAt.replace(tzinfo=None) + ) + if isinstance(response_object["updatedAt"], datetime): + response_object["updatedAt"] = unix_time( + self.updatedAt.replace(tzinfo=None) + ) + if isinstance(response_object["stabilityStatusAt"], datetime): + response_object["stabilityStatusAt"] = unix_time( + self.stabilityStatusAt.replace(tzinfo=None) + ) + del response_object["service"] + del response_object["cluster"] + return response_object + + class EC2ContainerServiceBackend(BaseBackend): def __init__(self, region_name): super(EC2ContainerServiceBackend, self).__init__() @@ -498,6 +610,7 @@ class EC2ContainerServiceBackend(BaseBackend): self.tasks = {} self.services = {} self.container_instances = {} + self.task_sets = {} self.region_name = region_name def reset(self): @@ -558,7 +671,13 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("{0} is not a cluster".format(cluster_name)) def register_task_definition( - self, family, container_definitions, volumes=None, network_mode=None, tags=None + self, + family, + container_definitions, + volumes=None, + network_mode=None, + tags=None, + placement_constraints=None, ): if family in self.task_definitions: last_id = self._get_last_task_definition_revision_id(family) @@ -574,6 +693,7 @@ class EC2ContainerServiceBackend(BaseBackend): volumes=volumes, network_mode=network_mode, tags=tags, + placement_constraints=placement_constraints, ) self.task_definitions[family][revision] = task_definition @@ -604,7 +724,10 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("{0} is not a task_definition".format(task_definition_name)) def run_task(self, cluster_str, task_definition_str, count, overrides, started_by): - cluster_name = cluster_str.split("/")[-1] + if cluster_str: + cluster_name = cluster_str.split("/")[-1] + else: + cluster_name = "default" if cluster_name in self.clusters: cluster = self.clusters[cluster_name] else: @@ -819,10 +942,30 @@ class EC2ContainerServiceBackend(BaseBackend): ) ) + if family: + task_definition_arns = self.list_task_definitions(family) + filtered_tasks = list( + filter( + lambda t: t.task_definition_arn in task_definition_arns, + filtered_tasks, + ) + ) + if started_by: filtered_tasks = list( filter(lambda t: started_by == t.started_by, filtered_tasks) ) + + if service_name: + # TODO: We can't filter on `service_name` until the backend actually + # launches tasks as part of the service creation process. + pass + + if desiredStatus: + filtered_tasks = list( + filter(lambda t: t.desired_status == desiredStatus, filtered_tasks) + ) + return [t.task_arn for t in filtered_tasks] def stop_task(self, cluster_str, task_str, reason): @@ -857,28 +1000,33 @@ class EC2ContainerServiceBackend(BaseBackend): self, cluster_str, service_name, - task_definition_str, desired_count, + task_definition_str=None, load_balancers=None, scheduling_strategy=None, tags=None, + deployment_controller=None, ): cluster_name = cluster_str.split("/")[-1] if cluster_name in self.clusters: cluster = self.clusters[cluster_name] else: raise Exception("{0} is not a cluster".format(cluster_name)) - task_definition = self.describe_task_definition(task_definition_str) + if task_definition_str is not None: + task_definition = self.describe_task_definition(task_definition_str) + else: + task_definition = None desired_count = desired_count if desired_count is not None else 0 service = Service( cluster, service_name, - task_definition, desired_count, + task_definition, load_balancers, scheduling_strategy, tags, + deployment_controller, ) cluster_service_pair = "{0}:{1}".format(cluster_name, service_name) self.services[cluster_service_pair] = service @@ -914,12 +1062,14 @@ class EC2ContainerServiceBackend(BaseBackend): or existing_service_obj.arn == requested_name_or_arn ): result.append(existing_service_obj) + return result def update_service( - self, cluster_str, service_name, task_definition_str, desired_count + self, cluster_str, service_str, task_definition_str, desired_count ): cluster_name = cluster_str.split("/")[-1] + service_name = service_str.split("/")[-1] cluster_service_pair = "{0}:{1}".format(cluster_name, service_name) if cluster_service_pair in self.services: if task_definition_str is not None: @@ -1087,9 +1237,7 @@ class EC2ContainerServiceBackend(BaseBackend): def put_attributes(self, cluster_name, attributes=None): if cluster_name is None or cluster_name not in self.clusters: - raise JsonRESTError( - "ClusterNotFoundException", "Cluster not found", status=400 - ) + raise ClusterNotFoundException if attributes is None: raise JsonRESTError( @@ -1178,9 +1326,7 @@ class EC2ContainerServiceBackend(BaseBackend): def delete_attributes(self, cluster_name, attributes=None): if cluster_name is None or cluster_name not in self.clusters: - raise JsonRESTError( - "ClusterNotFoundException", "Cluster not found", status=400 - ) + raise ClusterNotFoundException if attributes is None: raise JsonRESTError( @@ -1313,6 +1459,134 @@ class EC2ContainerServiceBackend(BaseBackend): raise ServiceNotFoundException(service_name=parsed_arn["id"]) raise NotImplementedError() + def create_task_set( + self, + service, + cluster, + task_definition, + external_id=None, + network_configuration=None, + load_balancers=None, + service_registries=None, + launch_type=None, + capacity_provider_strategy=None, + platform_version=None, + scale=None, + client_token=None, + tags=None, + ): + task_set = TaskSet( + service, + cluster, + task_definition, + self.region_name, + external_id=external_id, + network_configuration=network_configuration, + load_balancers=load_balancers, + service_registries=service_registries, + launch_type=launch_type, + capacity_provider_strategy=capacity_provider_strategy, + platform_version=platform_version, + scale=scale, + client_token=client_token, + tags=tags, + ) + + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + + service_obj = self.services.get("{0}:{1}".format(cluster_name, service_name)) + if not service_obj: + raise ServiceNotFoundException(service_name=service_name) + + cluster_obj = self.clusters.get(cluster_name) + if not cluster_obj: + raise ClusterNotFoundException + + task_set.task_definition = self.describe_task_definition(task_definition).arn + task_set.service_arn = service_obj.arn + task_set.cluster_arn = cluster_obj.arn + + service_obj.task_sets.append(task_set) + # TODO: validate load balancers + + return task_set + + def describe_task_sets(self, cluster, service, task_sets=None, include=None): + task_sets = task_sets or [] + include = include or [] + + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + service_key = "{0}:{1}".format(cluster_name, service_name) + + service_obj = self.services.get(service_key) + if not service_obj: + raise ServiceNotFoundException(service_name=service_name) + + cluster_obj = self.clusters.get(cluster_name) + if not cluster_obj: + raise ClusterNotFoundException + + task_set_results = [] + if task_sets: + for task_set in service_obj.task_sets: + if task_set.task_set_arn in task_sets: + task_set_results.append(task_set) + else: + task_set_results = service_obj.task_sets + + return task_set_results + + def delete_task_set(self, cluster, service, task_set, force=False): + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + + service_key = "{0}:{1}".format(cluster_name, service_name) + task_set_element = None + for i, ts in enumerate(self.services[service_key].task_sets): + if task_set == ts.task_set_arn: + task_set_element = i + + if task_set_element is not None: + deleted_task_set = self.services[service_key].task_sets.pop( + task_set_element + ) + else: + raise TaskSetNotFoundException + + # TODO: add logic for `force` to raise an exception if `PRIMARY` task has not been scaled to 0. + + return deleted_task_set + + def update_task_set(self, cluster, service, task_set, scale): + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + task_set_obj = self.describe_task_sets( + cluster_name, service_name, task_sets=[task_set] + )[0] + task_set_obj.scale = scale + return task_set_obj + + def update_service_primary_task_set(self, cluster, service, primary_task_set): + """ Updates task sets be PRIMARY or ACTIVE for given cluster:service task sets """ + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + task_set_obj = self.describe_task_sets( + cluster_name, service_name, task_sets=[primary_task_set] + )[0] + + service_obj = self.describe_services(cluster, [service])[0] + service_obj.load_balancers = task_set_obj.load_balancers + service_obj.task_definition = task_set_obj.task_definition + + for task_set in service_obj.task_sets: + if task_set.task_set_arn == primary_task_set: + task_set.status = "PRIMARY" + else: + task_set.status = "ACTIVE" + return task_set_obj + ecs_backends = {} for region in Session().get_available_regions("ecs"): diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 49bf022b4..15d2f0c4b 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -63,12 +63,14 @@ class EC2ContainerServiceResponse(BaseResponse): volumes = self._get_param("volumes") tags = self._get_param("tags") network_mode = self._get_param("networkMode") + placement_constraints = self._get_param("placementConstraints") task_definition = self.ecs_backend.register_task_definition( family, container_definitions, volumes=volumes, network_mode=network_mode, tags=tags, + placement_constraints=placement_constraints, ) return json.dumps({"taskDefinition": task_definition.response_object}) @@ -85,7 +87,10 @@ class EC2ContainerServiceResponse(BaseResponse): def describe_task_definition(self): task_definition_str = self._get_param("taskDefinition") data = self.ecs_backend.describe_task_definition(task_definition_str) - return json.dumps({"taskDefinition": data.response_object, "failures": []}) + resp = {"taskDefinition": data.response_object, "failures": []} + if "TAGS" in self._get_param("include", []): + resp["tags"] = self.ecs_backend.list_tags_for_resource(data.arn) + return json.dumps(resp) def deregister_task_definition(self): task_definition_str = self._get_param("taskDefinition") @@ -160,14 +165,16 @@ class EC2ContainerServiceResponse(BaseResponse): load_balancers = self._get_param("loadBalancers") scheduling_strategy = self._get_param("schedulingStrategy") tags = self._get_param("tags") + deployment_controller = self._get_param("deploymentController") service = self.ecs_backend.create_service( cluster_str, service_name, - task_definition_str, desired_count, + task_definition_str, load_balancers, scheduling_strategy, tags, + deployment_controller, ) return json.dumps({"service": service.response_object}) @@ -187,12 +194,16 @@ class EC2ContainerServiceResponse(BaseResponse): cluster_str = self._get_param("cluster") service_names = self._get_param("services") services = self.ecs_backend.describe_services(cluster_str, service_names) - return json.dumps( - { - "services": [service.response_object for service in services], - "failures": [], - } - ) + resp = { + "services": [service.response_object for service in services], + "failures": [], + } + if "TAGS" in self._get_param("include", []): + for i, service in enumerate(services): + resp["services"][i]["tags"] = self.ecs_backend.list_tags_for_resource( + service.arn + ) + return json.dumps(resp) def update_service(self): cluster_str = self._get_param("cluster") @@ -345,3 +356,80 @@ class EC2ContainerServiceResponse(BaseResponse): tag_keys = self._get_param("tagKeys") results = self.ecs_backend.untag_resource(resource_arn, tag_keys) return json.dumps(results) + + def create_task_set(self): + service_str = self._get_param("service") + cluster_str = self._get_param("cluster") + task_definition = self._get_param("taskDefinition") + external_id = self._get_param("externalId") + network_configuration = self._get_param("networkConfiguration") + load_balancers = self._get_param("loadBalancers") + service_registries = self._get_param("serviceRegistries") + launch_type = self._get_param("launchType") + capacity_provider_strategy = self._get_param("capacityProviderStrategy") + platform_version = self._get_param("platformVersion") + scale = self._get_param("scale") + client_token = self._get_param("clientToken") + tags = self._get_param("tags") + task_set = self.ecs_backend.create_task_set( + service_str, + cluster_str, + task_definition, + external_id=external_id, + network_configuration=network_configuration, + load_balancers=load_balancers, + service_registries=service_registries, + launch_type=launch_type, + capacity_provider_strategy=capacity_provider_strategy, + platform_version=platform_version, + scale=scale, + client_token=client_token, + tags=tags, + ) + return json.dumps({"taskSet": task_set.response_object}) + + def describe_task_sets(self): + cluster_str = self._get_param("cluster") + service_str = self._get_param("service") + task_sets = self._get_param("taskSets") + include = self._get_param("include", []) + task_set_objs = self.ecs_backend.describe_task_sets( + cluster_str, service_str, task_sets, include + ) + + response_objs = [t.response_object for t in task_set_objs] + if "TAGS" not in include: + for ro in response_objs: + del ro["tags"] + return json.dumps({"taskSets": response_objs}) + + def delete_task_set(self): + cluster_str = self._get_param("cluster") + service_str = self._get_param("service") + task_set = self._get_param("taskSet") + force = self._get_param("force") + task_set = self.ecs_backend.delete_task_set( + cluster_str, service_str, task_set, force + ) + return json.dumps({"taskSet": task_set.response_object}) + + def update_task_set(self): + cluster_str = self._get_param("cluster") + service_str = self._get_param("service") + task_set = self._get_param("taskSet") + scale = self._get_param("scale") + + task_set = self.ecs_backend.update_task_set( + cluster_str, service_str, task_set, scale + ) + return json.dumps({"taskSet": task_set.response_object}) + + def update_service_primary_task_set(self): + cluster_str = self._get_param("cluster") + service_str = self._get_param("service") + primary_task_set = self._get_param("primaryTaskSet") + + task_set = self.ecs_backend.update_service_primary_task_set( + cluster_str, service_str, primary_task_set + ) + return json.dumps({"taskSet": task_set.response_object}) diff --git a/moto/elasticbeanstalk/__init__.py b/moto/elasticbeanstalk/__init__.py new file mode 100644 index 000000000..851fa445b --- /dev/null +++ b/moto/elasticbeanstalk/__init__.py @@ -0,0 +1,4 @@ +from .models import eb_backends +from moto.core.models import base_decorator + +mock_elasticbeanstalk = base_decorator(eb_backends) diff --git a/moto/elasticbeanstalk/exceptions.py b/moto/elasticbeanstalk/exceptions.py new file mode 100644 index 000000000..f1e27c564 --- /dev/null +++ b/moto/elasticbeanstalk/exceptions.py @@ -0,0 +1,15 @@ +from moto.core.exceptions import RESTError + + +class InvalidParameterValueError(RESTError): + def __init__(self, message): + super(InvalidParameterValueError, self).__init__( + "InvalidParameterValue", message + ) + + +class ResourceNotFoundException(RESTError): + def __init__(self, message): + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", message + ) diff --git a/moto/elasticbeanstalk/models.py b/moto/elasticbeanstalk/models.py new file mode 100644 index 000000000..3767846c1 --- /dev/null +++ b/moto/elasticbeanstalk/models.py @@ -0,0 +1,152 @@ +import weakref + +from boto3 import Session + +from moto.core import BaseBackend, BaseModel +from .exceptions import InvalidParameterValueError, ResourceNotFoundException + + +class FakeEnvironment(BaseModel): + def __init__( + self, application, environment_name, solution_stack_name, tags, + ): + self.application = weakref.proxy( + application + ) # weakref to break circular dependencies + self.environment_name = environment_name + self.solution_stack_name = solution_stack_name + self.tags = tags + + @property + def application_name(self): + return self.application.application_name + + @property + def environment_arn(self): + return ( + "arn:aws:elasticbeanstalk:{region}:{account_id}:" + "environment/{application_name}/{environment_name}".format( + region=self.region, + account_id="123456789012", + application_name=self.application_name, + environment_name=self.environment_name, + ) + ) + + @property + def platform_arn(self): + return "TODO" # TODO + + @property + def region(self): + return self.application.region + + +class FakeApplication(BaseModel): + def __init__(self, backend, application_name): + self.backend = weakref.proxy(backend) # weakref to break cycles + self.application_name = application_name + self.environments = dict() + + def create_environment( + self, environment_name, solution_stack_name, tags, + ): + if environment_name in self.environments: + raise InvalidParameterValueError + + env = FakeEnvironment( + application=self, + environment_name=environment_name, + solution_stack_name=solution_stack_name, + tags=tags, + ) + self.environments[environment_name] = env + + return env + + @property + def region(self): + return self.backend.region + + +class EBBackend(BaseBackend): + def __init__(self, region): + self.region = region + self.applications = dict() + + def reset(self): + # preserve region + region = self.region + self._reset_model_refs() + self.__dict__ = {} + self.__init__(region) + + def create_application(self, application_name): + if application_name in self.applications: + raise InvalidParameterValueError( + "Application {} already exists.".format(application_name) + ) + new_app = FakeApplication(backend=self, application_name=application_name,) + self.applications[application_name] = new_app + return new_app + + def create_environment(self, app, environment_name, stack_name, tags): + return app.create_environment( + environment_name=environment_name, + solution_stack_name=stack_name, + tags=tags, + ) + + def describe_environments(self): + envs = [] + for app in self.applications.values(): + for env in app.environments.values(): + envs.append(env) + return envs + + def list_available_solution_stacks(self): + # Implemented in response.py + pass + + def update_tags_for_resource(self, resource_arn, tags_to_add, tags_to_remove): + try: + res = self._find_environment_by_arn(resource_arn) + except KeyError: + raise ResourceNotFoundException( + "Resource not found for ARN '{}'.".format(resource_arn) + ) + + for key, value in tags_to_add.items(): + res.tags[key] = value + + for key in tags_to_remove: + del res.tags[key] + + def list_tags_for_resource(self, resource_arn): + try: + res = self._find_environment_by_arn(resource_arn) + except KeyError: + raise ResourceNotFoundException( + "Resource not found for ARN '{}'.".format(resource_arn) + ) + return res.tags + + def _find_environment_by_arn(self, arn): + for app in self.applications.keys(): + for env in self.applications[app].environments.values(): + if env.environment_arn == arn: + return env + raise KeyError() + + +eb_backends = {} +for region in Session().get_available_regions("elasticbeanstalk"): + eb_backends[region] = EBBackend(region) +for region in Session().get_available_regions( + "elasticbeanstalk", partition_name="aws-us-gov" +): + eb_backends[region] = EBBackend(region) +for region in Session().get_available_regions( + "elasticbeanstalk", partition_name="aws-cn" +): + eb_backends[region] = EBBackend(region) diff --git a/moto/elasticbeanstalk/responses.py b/moto/elasticbeanstalk/responses.py new file mode 100644 index 000000000..387cbb3ea --- /dev/null +++ b/moto/elasticbeanstalk/responses.py @@ -0,0 +1,1386 @@ +from moto.core.responses import BaseResponse +from moto.core.utils import tags_from_query_string +from .models import eb_backends +from .exceptions import InvalidParameterValueError + + +class EBResponse(BaseResponse): + @property + def backend(self): + """ + :rtype: EBBackend + """ + return eb_backends[self.region] + + def create_application(self): + app = self.backend.create_application( + application_name=self._get_param("ApplicationName"), + ) + + template = self.response_template(EB_CREATE_APPLICATION) + return template.render(region_name=self.backend.region, application=app,) + + def describe_applications(self): + template = self.response_template(EB_DESCRIBE_APPLICATIONS) + return template.render(applications=self.backend.applications.values(),) + + def create_environment(self): + application_name = self._get_param("ApplicationName") + try: + app = self.backend.applications[application_name] + except KeyError: + raise InvalidParameterValueError( + "No Application named '{}' found.".format(application_name) + ) + + tags = tags_from_query_string(self.querystring, prefix="Tags.member") + env = self.backend.create_environment( + app, + environment_name=self._get_param("EnvironmentName"), + stack_name=self._get_param("SolutionStackName"), + tags=tags, + ) + + template = self.response_template(EB_CREATE_ENVIRONMENT) + return template.render(environment=env, region=self.backend.region,) + + def describe_environments(self): + envs = self.backend.describe_environments() + + template = self.response_template(EB_DESCRIBE_ENVIRONMENTS) + return template.render(environments=envs,) + + def list_available_solution_stacks(self): + return EB_LIST_AVAILABLE_SOLUTION_STACKS + + def update_tags_for_resource(self): + resource_arn = self._get_param("ResourceArn") + tags_to_add = tags_from_query_string( + self.querystring, prefix="TagsToAdd.member" + ) + tags_to_remove = self._get_multi_param("TagsToRemove.member") + self.backend.update_tags_for_resource(resource_arn, tags_to_add, tags_to_remove) + + return EB_UPDATE_TAGS_FOR_RESOURCE + + def list_tags_for_resource(self): + resource_arn = self._get_param("ResourceArn") + tags = self.backend.list_tags_for_resource(resource_arn) + + template = self.response_template(EB_LIST_TAGS_FOR_RESOURCE) + return template.render(tags=tags, arn=resource_arn,) + + +EB_CREATE_APPLICATION = """ + + + + + 2019-09-03T13:08:29.049Z + + + + false + 180 + false + + + false + 200 + false + + + + arn:aws:elasticbeanstalk:{{ region_name }}:111122223333:application/{{ application_name }} + {{ application.application_name }} + 2019-09-03T13:08:29.049Z + + + + 1b6173c8-13aa-4b0a-99e9-eb36a1fb2778 + + +""" + + +EB_DESCRIBE_APPLICATIONS = """ + + + + {% for application in applications %} + + + 2019-09-03T13:08:29.049Z + + + + 180 + false + false + + + false + 200 + false + + + + arn:aws:elasticbeanstalk:{{ region_name }}:111122223333:application/{{ application.name }} + {{ application.application_name }} + 2019-09-03T13:08:29.049Z + + {% endfor %} + + + + 015a05eb-282e-4b76-bd18-663fdfaf42e4 + + +""" + + +EB_CREATE_ENVIRONMENT = """ + + + {{ environment.solution_stack_name }} + Grey + {{ environment.environment_arn }} + 2019-09-04T09:41:24.222Z + 2019-09-04T09:41:24.222Z + {{ environment_id }} + {{ environment.platform_arn }} + + WebServer + Standard + 1.0 + + {{ environment.environment_name }} + {{ environment.application_name }} + Launching + + + 18dc8158-f5d7-4d5a-82ef-07fcaadf81c6 + + +""" + + +EB_DESCRIBE_ENVIRONMENTS = """ + + + + {% for env in environments %} + + {{ env.solution_stack_name }} + Grey + {{ env.environment_arn }} + false + 2019-08-30T09:35:10.913Z + false + + 2019-08-22T07:02:47.332Z + {{ env.environment_id }} + 1 + {{ env.platform_arn }} + + WebServer + Standard + 1.0 + + No Data + {{ env.environment_name }} + + + + {{ env.application_name }} + Ready + + {% endfor %} + + + + dd56b215-01a0-40b2-bd1e-57589c39424f + + +""" + + +# Current list as of 2019-09-04 +EB_LIST_AVAILABLE_SOLUTION_STACKS = """ + + + + 64bit Amazon Linux 2018.03 v4.10.1 running Node.js + 64bit Amazon Linux 2018.03 v4.9.2 running Node.js + 64bit Amazon Linux 2018.03 v4.8.0 running Node.js + 64bit Amazon Linux 2018.03 v4.6.0 running Node.js + 64bit Amazon Linux 2018.03 v4.5.3 running Node.js + 64bit Amazon Linux 2018.03 v4.5.1 running Node.js + 64bit Amazon Linux 2018.03 v4.5.0 running Node.js + 64bit Amazon Linux 2017.09 v4.4.6 running Node.js + 64bit Amazon Linux 2017.09 v4.4.5 running Node.js + 64bit Amazon Linux 2017.09 v4.4.4 running Node.js + 64bit Amazon Linux 2017.09 v4.4.2 running Node.js + 64bit Amazon Linux 2017.09 v4.4.0 running Node.js + 64bit Amazon Linux 2017.03 v4.3.0 running Node.js + 64bit Amazon Linux 2017.03 v4.2.2 running Node.js + 64bit Amazon Linux 2017.03 v4.2.1 running Node.js + 64bit Amazon Linux 2017.03 v4.2.0 running Node.js + 64bit Amazon Linux 2017.03 v4.1.1 running Node.js + 64bit Amazon Linux 2017.03 v4.1.0 running Node.js + 64bit Amazon Linux 2016.09 v4.0.1 running Node.js + 64bit Amazon Linux 2016.09 v4.0.0 running Node.js + 64bit Amazon Linux 2016.09 v3.3.1 running Node.js + 64bit Amazon Linux 2016.09 v3.1.0 running Node.js + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.4 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.5 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.6 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.0 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.12 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.7 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.5 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.4 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.3 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.2 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.1 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.0 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 5.6 + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.0 + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.0 + 64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.1 + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.4 + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.5 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.4 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.5 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.1 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.4 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.5 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.1 + 64bit Amazon Linux 2017.09 v2.6.2 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.2 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.1 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.4 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.5 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.1 + 64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.1 + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.5 + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.6 + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.4.3 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.4 + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.5 + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.6 + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.4.1 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.4.0 running PHP 7.0 + 64bit Amazon Linux 2016.09 v2.3.2 running PHP 7.0 + 64bit Amazon Linux 2016.09 v2.3.1 running PHP 7.0 + 64bit Amazon Linux 2018.03 v2.9.1 running Python 3.6 + 64bit Amazon Linux 2018.03 v2.9.1 running Python 3.4 + 64bit Amazon Linux 2018.03 v2.9.1 running Python + 64bit Amazon Linux 2018.03 v2.9.1 running Python 2.7 + 64bit Amazon Linux 2018.03 v2.7.5 running Python 3.6 + 64bit Amazon Linux 2018.03 v2.7.1 running Python 3.6 + 64bit Amazon Linux 2018.03 v2.7.0 running Python 3.6 + 64bit Amazon Linux 2017.09 v2.6.4 running Python 3.6 + 64bit Amazon Linux 2017.09 v2.6.1 running Python 3.6 + 64bit Amazon Linux 2017.03 v2.4.0 running Python 3.4 + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 1.9.3 + 64bit Amazon Linux 2018.03 v2.8.0 running Ruby 2.5 (Passenger Standalone) + 64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Puma) + 64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8.5 Java 8 + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8 Java 8 + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 7 + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 6 + 64bit Amazon Linux 2018.03 v3.1.1 running Tomcat 8.5 Java 8 + 64bit Amazon Linux 2017.03 v2.6.5 running Tomcat 8 Java 8 + 64bit Amazon Linux 2017.03 v2.6.2 running Tomcat 8 Java 8 + 64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 8 Java 8 + 64bit Amazon Linux 2017.03 v2.6.0 running Tomcat 8 Java 8 + 64bit Amazon Linux 2016.09 v2.5.4 running Tomcat 8 Java 8 + 64bit Amazon Linux 2016.03 v2.1.0 running Tomcat 8 Java 8 + 64bit Windows Server Core 2016 v2.2.1 running IIS 10.0 + 64bit Windows Server 2016 v2.2.1 running IIS 10.0 + 64bit Windows Server Core 2012 R2 v2.2.1 running IIS 8.5 + 64bit Windows Server 2012 R2 v2.2.1 running IIS 8.5 + 64bit Windows Server Core 2016 v1.2.0 running IIS 10.0 + 64bit Windows Server 2016 v1.2.0 running IIS 10.0 + 64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5 + 64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5 + 64bit Windows Server 2012 v1.2.0 running IIS 8 + 64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5 + 64bit Windows Server Core 2012 R2 running IIS 8.5 + 64bit Windows Server 2012 R2 running IIS 8.5 + 64bit Windows Server 2012 running IIS 8 + 64bit Windows Server 2008 R2 running IIS 7.5 + 64bit Amazon Linux 2018.03 v2.12.16 running Docker 18.06.1-ce + 64bit Amazon Linux 2016.09 v2.5.2 running Docker 1.12.6 + 64bit Amazon Linux 2018.03 v2.15.2 running Multi-container Docker 18.06.1-ce (Generic) + 64bit Debian jessie v2.12.16 running Go 1.4 (Preconfigured - Docker) + 64bit Debian jessie v2.12.16 running Go 1.3 (Preconfigured - Docker) + 64bit Debian jessie v2.12.16 running Python 3.4 (Preconfigured - Docker) + 64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker) + 64bit Amazon Linux 2018.03 v2.9.1 running Java 8 + 64bit Amazon Linux 2018.03 v2.9.1 running Java 7 + 64bit Amazon Linux 2018.03 v2.8.0 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.6 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.5 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.4 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.2 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.1 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.8 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.5 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.4 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.3 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.0 running Java 8 + 64bit Amazon Linux 2017.03 v2.5.4 running Java 8 + 64bit Amazon Linux 2017.03 v2.5.3 running Java 8 + 64bit Amazon Linux 2017.03 v2.5.2 running Java 8 + 64bit Amazon Linux 2016.09 v2.4.4 running Java 8 + 64bit Amazon Linux 2018.03 v2.12.1 running Go 1.12.7 + 64bit Amazon Linux 2018.03 v2.6.14 running Packer 1.0.3 + 64bit Amazon Linux 2018.03 v2.12.16 running GlassFish 5.0 Java 8 (Preconfigured - Docker) + + + + 64bit Amazon Linux 2018.03 v4.10.1 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.9.2 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.8.0 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.6.0 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.5.3 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.5.1 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.5.0 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.6 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.5 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.4 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.2 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.0 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.3.0 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.2.2 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.2.1 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.2.0 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.1.1 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.1.0 running Node.js + + zip + + + + 64bit Amazon Linux 2016.09 v4.0.1 running Node.js + + zip + + + + 64bit Amazon Linux 2016.09 v4.0.0 running Node.js + + zip + + + + 64bit Amazon Linux 2016.09 v3.3.1 running Node.js + + zip + + + + 64bit Amazon Linux 2016.09 v3.1.0 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.12 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.7 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.5 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.4 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.3 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.2 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.1 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.0 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.5 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.2 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.2 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.1 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.3 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.1 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.0 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2016.09 v2.3.2 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2016.09 v2.3.1 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Python 3.4 + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Python + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Python 2.7 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.5 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.0 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.1 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.0 running Python 3.4 + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 1.9.3 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.0 running Ruby 2.5 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Puma) + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8.5 Java 8 + + war + zip + + + + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 7 + + war + zip + + + + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 6 + + war + zip + + + + 64bit Amazon Linux 2018.03 v3.1.1 running Tomcat 8.5 Java 8 + + war + zip + + + + 64bit Amazon Linux 2017.03 v2.6.5 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2017.03 v2.6.2 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2017.03 v2.6.0 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2016.09 v2.5.4 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2016.03 v2.1.0 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Windows Server Core 2016 v2.2.1 running IIS 10.0 + + zip + + + + 64bit Windows Server 2016 v2.2.1 running IIS 10.0 + + zip + + + + 64bit Windows Server Core 2012 R2 v2.2.1 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 R2 v2.2.1 running IIS 8.5 + + zip + + + + 64bit Windows Server Core 2016 v1.2.0 running IIS 10.0 + + zip + + + + 64bit Windows Server 2016 v1.2.0 running IIS 10.0 + + zip + + + + 64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 v1.2.0 running IIS 8 + + zip + + + + 64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5 + + zip + + + + 64bit Windows Server Core 2012 R2 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 R2 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 running IIS 8 + + zip + + + + 64bit Windows Server 2008 R2 running IIS 7.5 + + zip + + + + 64bit Amazon Linux 2018.03 v2.12.16 running Docker 18.06.1-ce + + + + 64bit Amazon Linux 2016.09 v2.5.2 running Docker 1.12.6 + + + + 64bit Amazon Linux 2018.03 v2.15.2 running Multi-container Docker 18.06.1-ce (Generic) + + zip + json + + + + 64bit Debian jessie v2.12.16 running Go 1.4 (Preconfigured - Docker) + + zip + + + + 64bit Debian jessie v2.12.16 running Go 1.3 (Preconfigured - Docker) + + zip + + + + 64bit Debian jessie v2.12.16 running Python 3.4 (Preconfigured - Docker) + + zip + + + + 64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker) + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Java 7 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.8.0 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.6 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.5 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.4 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.2 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.8 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.5 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.03 v2.5.4 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.03 v2.5.3 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.03 v2.5.2 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2016.09 v2.4.4 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.12.1 running Go 1.12.7 + + zip + + + + 64bit Amazon Linux 2018.03 v2.6.14 running Packer 1.0.3 + + + + 64bit Amazon Linux 2018.03 v2.12.16 running GlassFish 5.0 Java 8 (Preconfigured - Docker) + + zip + + + + + + bd6bd2b2-9983-4845-b53b-fe53e8a5e1e7 + + +""" + + +EB_UPDATE_TAGS_FOR_RESOURCE = """ + + + f355d788-e67e-440f-b915-99e35254ffee + + +""" + + +EB_LIST_TAGS_FOR_RESOURCE = """ + + + + {% for key, value in tags.items() %} + + {{ key }} + {{ value }} + + {% endfor %} + + {{ arn }} + + + 178e410f-3b57-456f-a64c-a3b6a16da9ab + + +""" diff --git a/moto/elasticbeanstalk/urls.py b/moto/elasticbeanstalk/urls.py new file mode 100644 index 000000000..2d57f7f9d --- /dev/null +++ b/moto/elasticbeanstalk/urls.py @@ -0,0 +1,11 @@ +from __future__ import unicode_literals + +from .responses import EBResponse + +url_bases = [ + r"https?://elasticbeanstalk.(?P[a-zA-Z0-9\-_]+).amazonaws.com", +] + +url_paths = { + "{0}/$": EBResponse.dispatch, +} diff --git a/moto/elb/models.py b/moto/elb/models.py index f77811623..47cdfd507 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -1,16 +1,19 @@ from __future__ import unicode_literals import datetime -from boto.ec2.elb.attributes import ( + +import pytz + +from moto.packages.boto.ec2.elb.attributes import ( LbAttributes, ConnectionSettingAttribute, ConnectionDrainingAttribute, AccessLogAttribute, CrossZoneLoadBalancingAttribute, ) -from boto.ec2.elb.policies import Policies, OtherPolicy +from moto.packages.boto.ec2.elb.policies import Policies, OtherPolicy from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.ec2.models import ec2_backends from .exceptions import ( BadHealthCheckDefinition, @@ -66,7 +69,7 @@ class FakeBackend(BaseModel): ) -class FakeLoadBalancer(BaseModel): +class FakeLoadBalancer(CloudFormationModel): def __init__( self, name, @@ -83,7 +86,7 @@ class FakeLoadBalancer(BaseModel): self.zones = zones self.listeners = [] self.backends = [] - self.created_time = datetime.datetime.now() + self.created_time = datetime.datetime.now(pytz.utc) self.scheme = scheme self.attributes = FakeLoadBalancer.get_default_attributes() self.policies = Policies() @@ -116,6 +119,15 @@ class FakeLoadBalancer(BaseModel): ) self.backends.append(backend) + @staticmethod + def cloudformation_name_type(): + return "LoadBalancerName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancing-loadbalancer.html + return "AWS::ElasticLoadBalancing::LoadBalancer" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/elb/responses.py b/moto/elb/responses.py index de21f23e7..7bf627b66 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -1,11 +1,11 @@ from __future__ import unicode_literals -from boto.ec2.elb.attributes import ( +from moto.packages.boto.ec2.elb.attributes import ( ConnectionSettingAttribute, ConnectionDrainingAttribute, AccessLogAttribute, CrossZoneLoadBalancingAttribute, ) -from boto.ec2.elb.policies import AppCookieStickinessPolicy, OtherPolicy +from moto.packages.boto.ec2.elb.policies import AppCookieStickinessPolicy, OtherPolicy from moto.core.responses import BaseResponse from .models import elb_backends @@ -442,7 +442,7 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ 32: raise InvalidTargetGroupNameError( - "Target group name '%s' cannot be longer than '32' characters" % name + "Target group name '{}' cannot be longer than '32' characters".format( + name + ) ) - if not re.match("^[a-zA-Z0-9\-]+$", name): + if not re.match(r"^[a-zA-Z0-9\-]+$", name): raise InvalidTargetGroupNameError( - "Target group name '%s' can only contain characters that are alphanumeric characters or hyphens(-)" - % name + "Target group name '{}' can only contain characters that are alphanumeric characters or hyphens(-)".format( + name + ) ) # undocumented validation - if not re.match("(?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$", name): + if not re.match(r"(?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$", name): raise InvalidTargetGroupNameError( "1 validation error detected: Value '%s' at 'targetGroup.targetGroupArn.targetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern: (?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" % name diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 922de96d4..68fbc8816 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -158,7 +158,7 @@ class ELBV2Response(BaseResponse): condition = {} condition["field"] = _condition["field"] values = sorted( - [e for e in _condition.items() if e[0].startswith("values.member")], + [e for e in _condition.items() if "values.member" in e[0]], key=lambda x: x[0], ) condition["values"] = [e[1] for e in values] @@ -356,7 +356,7 @@ class ELBV2Response(BaseResponse): condition = {} condition["field"] = _condition["field"] values = sorted( - [e for e in _condition.items() if e[0].startswith("values.member")], + [e for e in _condition.items() if "values.member" in e[0]], key=lambda x: x[0], ) condition["values"] = [e[1] for e in values] diff --git a/moto/emr/exceptions.py b/moto/emr/exceptions.py index 1a3398d4f..bb9634652 100644 --- a/moto/emr/exceptions.py +++ b/moto/emr/exceptions.py @@ -1,7 +1,14 @@ from __future__ import unicode_literals -from moto.core.exceptions import RESTError +from moto.core.exceptions import RESTError, JsonRESTError class EmrError(RESTError): code = 400 + + +class InvalidRequestException(JsonRESTError): + def __init__(self, message, **kwargs): + super(InvalidRequestException, self).__init__( + "InvalidRequestException", message, **kwargs + ) diff --git a/moto/emr/models.py b/moto/emr/models.py index 713b15b9f..b37ebf034 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -6,8 +6,13 @@ import pytz from boto3 import Session from dateutil.parser import parse as dtparse from moto.core import BaseBackend, BaseModel -from moto.emr.exceptions import EmrError -from .utils import random_instance_group_id, random_cluster_id, random_step_id +from moto.emr.exceptions import EmrError, InvalidRequestException +from .utils import ( + random_instance_group_id, + random_cluster_id, + random_step_id, + CamelToUnderscoresWalker, +) class FakeApplication(BaseModel): @@ -28,6 +33,7 @@ class FakeBootstrapAction(BaseModel): class FakeInstanceGroup(BaseModel): def __init__( self, + cluster_id, instance_count, instance_role, instance_type, @@ -35,8 +41,11 @@ class FakeInstanceGroup(BaseModel): name=None, id=None, bid_price=None, + ebs_configuration=None, + auto_scaling_policy=None, ): self.id = id or random_instance_group_id() + self.cluster_id = cluster_id self.bid_price = bid_price self.market = market @@ -51,7 +60,8 @@ class FakeInstanceGroup(BaseModel): self.num_instances = instance_count self.role = instance_role self.type = instance_type - + self.ebs_configuration = ebs_configuration + self.auto_scaling_policy = auto_scaling_policy self.creation_datetime = datetime.now(pytz.utc) self.start_datetime = datetime.now(pytz.utc) self.ready_datetime = datetime.now(pytz.utc) @@ -61,6 +71,34 @@ class FakeInstanceGroup(BaseModel): def set_instance_count(self, instance_count): self.num_instances = instance_count + @property + def auto_scaling_policy(self): + return self._auto_scaling_policy + + @auto_scaling_policy.setter + def auto_scaling_policy(self, value): + if value is None: + self._auto_scaling_policy = value + return + self._auto_scaling_policy = CamelToUnderscoresWalker.parse(value) + self._auto_scaling_policy["status"] = {"state": "ATTACHED"} + # Transform common ${emr.clusterId} placeholder in any dimensions it occurs in. + if "rules" in self._auto_scaling_policy: + for rule in self._auto_scaling_policy["rules"]: + if ( + "trigger" in rule + and "cloud_watch_alarm_definition" in rule["trigger"] + and "dimensions" in rule["trigger"]["cloud_watch_alarm_definition"] + ): + for dimension in rule["trigger"]["cloud_watch_alarm_definition"][ + "dimensions" + ]: + if ( + "value" in dimension + and dimension["value"] == "${emr.clusterId}" + ): + dimension["value"] = self.cluster_id + class FakeStep(BaseModel): def __init__( @@ -86,6 +124,9 @@ class FakeStep(BaseModel): self.start_datetime = None self.state = state + def start(self): + self.start_datetime = datetime.now(pytz.utc) + class FakeCluster(BaseModel): def __init__( @@ -105,6 +146,9 @@ class FakeCluster(BaseModel): requested_ami_version=None, running_ami_version=None, custom_ami_id=None, + step_concurrency_level=1, + security_configuration=None, + kerberos_attributes=None, ): self.id = cluster_id or random_cluster_id() emr_backend.clusters[self.id] = self @@ -195,6 +239,7 @@ class FakeCluster(BaseModel): self.role = job_flow_role or "EMRJobflowDefault" self.service_role = service_role + self.step_concurrency_level = step_concurrency_level self.creation_datetime = datetime.now(pytz.utc) self.start_datetime = None @@ -204,6 +249,12 @@ class FakeCluster(BaseModel): self.start_cluster() self.run_bootstrap_actions() + if self.steps: + self.steps[0].start() + self.security_configuration = ( + security_configuration # ToDo: Raise if doesn't already exist. + ) + self.kerberos_attributes = kerberos_attributes @property def instance_groups(self): @@ -292,12 +343,20 @@ class FakeCluster(BaseModel): self.visible_to_all_users = visibility +class FakeSecurityConfiguration(BaseModel): + def __init__(self, name, security_configuration): + self.name = name + self.security_configuration = security_configuration + self.creation_date_time = datetime.now(pytz.utc) + + class ElasticMapReduceBackend(BaseBackend): def __init__(self, region_name): super(ElasticMapReduceBackend, self).__init__() self.region_name = region_name self.clusters = {} self.instance_groups = {} + self.security_configurations = {} def reset(self): region_name = self.region_name @@ -312,7 +371,7 @@ class ElasticMapReduceBackend(BaseBackend): cluster = self.clusters[cluster_id] result_groups = [] for instance_group in instance_groups: - group = FakeInstanceGroup(**instance_group) + group = FakeInstanceGroup(cluster_id=cluster_id, **instance_group) self.instance_groups[group.id] = group cluster.add_instance_group(group) result_groups.append(group) @@ -426,6 +485,11 @@ class ElasticMapReduceBackend(BaseBackend): ) return steps[start_idx : start_idx + max_items], marker + def modify_cluster(self, cluster_id, step_concurrency_level): + cluster = self.clusters[cluster_id] + cluster.step_concurrency_level = step_concurrency_level + return cluster + def modify_instance_groups(self, instance_groups): result_groups = [] for instance_group in instance_groups: @@ -458,6 +522,56 @@ class ElasticMapReduceBackend(BaseBackend): clusters.append(cluster) return clusters + def put_auto_scaling_policy(self, instance_group_id, auto_scaling_policy): + instance_groups = self.get_instance_groups( + instance_group_ids=[instance_group_id] + ) + if len(instance_groups) == 0: + return None + instance_group = instance_groups[0] + instance_group.auto_scaling_policy = auto_scaling_policy + return instance_group + + def remove_auto_scaling_policy(self, cluster_id, instance_group_id): + instance_groups = self.get_instance_groups( + instance_group_ids=[instance_group_id] + ) + if len(instance_groups) == 0: + return None + instance_group = instance_groups[0] + instance_group.auto_scaling_policy = None + + def create_security_configuration(self, name, security_configuration): + if name in self.security_configurations: + raise InvalidRequestException( + message="SecurityConfiguration with name '{}' already exists.".format( + name + ) + ) + security_configuration = FakeSecurityConfiguration( + name=name, security_configuration=security_configuration + ) + self.security_configurations[name] = security_configuration + return security_configuration + + def get_security_configuration(self, name): + if name not in self.security_configurations: + raise InvalidRequestException( + message="Security configuration with name '{}' does not exist.".format( + name + ) + ) + return self.security_configurations[name] + + def delete_security_configuration(self, name): + if name not in self.security_configurations: + raise InvalidRequestException( + message="Security configuration with name '{}' does not exist.".format( + name + ) + ) + del self.security_configurations[name] + emr_backends = {} for region in Session().get_available_regions("emr"): diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 94847ec8b..a5d98ced4 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -10,9 +10,10 @@ from six.moves.urllib.parse import urlparse from moto.core.responses import AWSServiceSpec from moto.core.responses import BaseResponse from moto.core.responses import xml_to_json_response +from moto.core.utils import tags_from_query_string from .exceptions import EmrError from .models import emr_backends -from .utils import steps_from_query_string, tags_from_query_string +from .utils import steps_from_query_string, Unflattener, ReleaseLabel def generate_boto3_response(operation): @@ -73,6 +74,10 @@ class ElasticMapReduceResponse(BaseResponse): instance_groups = self._get_list_prefix("InstanceGroups.member") for item in instance_groups: item["instance_count"] = int(item["instance_count"]) + # Adding support to EbsConfiguration + self._parse_ebs_configuration(item) + # Adding support for auto_scaling_policy + Unflattener.unflatten_complex_params(item, "auto_scaling_policy") instance_groups = self.backend.add_instance_groups(jobflow_id, instance_groups) template = self.response_template(ADD_INSTANCE_GROUPS_TEMPLATE) return template.render(instance_groups=instance_groups) @@ -89,7 +94,7 @@ class ElasticMapReduceResponse(BaseResponse): @generate_boto3_response("AddTags") def add_tags(self): cluster_id = self._get_param("ResourceId") - tags = tags_from_query_string(self.querystring) + tags = tags_from_query_string(self.querystring, prefix="Tags") self.backend.add_tags(cluster_id, tags) template = self.response_template(ADD_TAGS_TEMPLATE) return template.render() @@ -97,11 +102,29 @@ class ElasticMapReduceResponse(BaseResponse): def cancel_steps(self): raise NotImplementedError + @generate_boto3_response("CreateSecurityConfiguration") def create_security_configuration(self): - raise NotImplementedError + name = self._get_param("Name") + security_configuration = self._get_param("SecurityConfiguration") + resp = self.backend.create_security_configuration( + name=name, security_configuration=security_configuration + ) + template = self.response_template(CREATE_SECURITY_CONFIGURATION_TEMPLATE) + return template.render(name=name, creation_date_time=resp.creation_date_time) + @generate_boto3_response("DescribeSecurityConfiguration") + def describe_security_configuration(self): + name = self._get_param("Name") + security_configuration = self.backend.get_security_configuration(name=name) + template = self.response_template(DESCRIBE_SECURITY_CONFIGURATION_TEMPLATE) + return template.render(security_configuration=security_configuration) + + @generate_boto3_response("DeleteSecurityConfiguration") def delete_security_configuration(self): - raise NotImplementedError + name = self._get_param("Name") + self.backend.delete_security_configuration(name=name) + template = self.response_template(DELETE_SECURITY_CONFIGURATION_TEMPLATE) + return template.render() @generate_boto3_response("DescribeCluster") def describe_cluster(self): @@ -122,9 +145,6 @@ class ElasticMapReduceResponse(BaseResponse): template = self.response_template(DESCRIBE_JOB_FLOWS_TEMPLATE) return template.render(clusters=clusters) - def describe_security_configuration(self): - raise NotImplementedError - @generate_boto3_response("DescribeStep") def describe_step(self): cluster_id = self._get_param("ClusterId") @@ -180,6 +200,14 @@ class ElasticMapReduceResponse(BaseResponse): template = self.response_template(LIST_STEPS_TEMPLATE) return template.render(steps=steps, marker=marker) + @generate_boto3_response("ModifyCluster") + def modify_cluster(self): + cluster_id = self._get_param("ClusterId") + step_concurrency_level = self._get_param("StepConcurrencyLevel") + cluster = self.backend.modify_cluster(cluster_id, step_concurrency_level) + template = self.response_template(MODIFY_CLUSTER_TEMPLATE) + return template.render(cluster=cluster) + @generate_boto3_response("ModifyInstanceGroups") def modify_instance_groups(self): instance_groups = self._get_list_prefix("InstanceGroups.member") @@ -295,7 +323,9 @@ class ElasticMapReduceResponse(BaseResponse): custom_ami_id = self._get_param("CustomAmiId") if custom_ami_id: kwargs["custom_ami_id"] = custom_ami_id - if release_label and release_label < "emr-5.7.0": + if release_label and ( + ReleaseLabel(release_label) < ReleaseLabel("emr-5.7.0") + ): message = "Custom AMI is not allowed" raise EmrError( error_type="ValidationException", @@ -310,6 +340,43 @@ class ElasticMapReduceResponse(BaseResponse): template="error_json", ) + step_concurrency_level = self._get_param("StepConcurrencyLevel") + if step_concurrency_level: + kwargs["step_concurrency_level"] = step_concurrency_level + + security_configuration = self._get_param("SecurityConfiguration") + if security_configuration: + kwargs["security_configuration"] = security_configuration + + kerberos_attributes = {} + kwargs["kerberos_attributes"] = kerberos_attributes + + realm = self._get_param("KerberosAttributes.Realm") + if realm: + kerberos_attributes["Realm"] = realm + + kdc_admin_password = self._get_param("KerberosAttributes.KdcAdminPassword") + if kdc_admin_password: + kerberos_attributes["KdcAdminPassword"] = kdc_admin_password + + cross_realm_principal_password = self._get_param( + "KerberosAttributes.CrossRealmTrustPrincipalPassword" + ) + if cross_realm_principal_password: + kerberos_attributes[ + "CrossRealmTrustPrincipalPassword" + ] = cross_realm_principal_password + + ad_domain_join_user = self._get_param("KerberosAttributes.ADDomainJoinUser") + if ad_domain_join_user: + kerberos_attributes["ADDomainJoinUser"] = ad_domain_join_user + + ad_domain_join_password = self._get_param( + "KerberosAttributes.ADDomainJoinPassword" + ) + if ad_domain_join_password: + kerberos_attributes["ADDomainJoinPassword"] = ad_domain_join_password + cluster = self.backend.run_job_flow(**kwargs) applications = self._get_list_prefix("Applications.member") @@ -324,6 +391,10 @@ class ElasticMapReduceResponse(BaseResponse): if instance_groups: for ig in instance_groups: ig["instance_count"] = int(ig["instance_count"]) + # Adding support to EbsConfiguration + self._parse_ebs_configuration(ig) + # Adding support for auto_scaling_policy + Unflattener.unflatten_complex_params(ig, "auto_scaling_policy") self.backend.add_instance_groups(cluster.id, instance_groups) tags = self._get_list_prefix("Tags.member") @@ -335,6 +406,85 @@ class ElasticMapReduceResponse(BaseResponse): template = self.response_template(RUN_JOB_FLOW_TEMPLATE) return template.render(cluster=cluster) + def _has_key_prefix(self, key_prefix, value): + for key in value: # iter on both keys and values + if key.startswith(key_prefix): + return True + return False + + def _parse_ebs_configuration(self, instance_group): + key_ebs_config = "ebs_configuration" + ebs_configuration = dict() + # Filter only EBS config keys + for key in instance_group: + if key.startswith(key_ebs_config): + ebs_configuration[key] = instance_group[key] + + if len(ebs_configuration) > 0: + # Key that should be extracted + ebs_optimized = "ebs_optimized" + ebs_block_device_configs = "ebs_block_device_configs" + volume_specification = "volume_specification" + size_in_gb = "size_in_gb" + volume_type = "volume_type" + iops = "iops" + volumes_per_instance = "volumes_per_instance" + + key_ebs_optimized = "{0}._{1}".format(key_ebs_config, ebs_optimized) + # EbsOptimized config + if key_ebs_optimized in ebs_configuration: + instance_group.pop(key_ebs_optimized) + ebs_configuration[ebs_optimized] = ebs_configuration.pop( + key_ebs_optimized + ) + + # Ebs Blocks + ebs_blocks = [] + idx = 1 + keyfmt = "{0}._{1}.member.{{}}".format( + key_ebs_config, ebs_block_device_configs + ) + key = keyfmt.format(idx) + while self._has_key_prefix(key, ebs_configuration): + vlespc_keyfmt = "{0}._{1}._{{}}".format(key, volume_specification) + vol_size = vlespc_keyfmt.format(size_in_gb) + vol_iops = vlespc_keyfmt.format(iops) + vol_type = vlespc_keyfmt.format(volume_type) + + ebs_block = dict() + ebs_block[volume_specification] = dict() + if vol_size in ebs_configuration: + instance_group.pop(vol_size) + ebs_block[volume_specification][size_in_gb] = int( + ebs_configuration.pop(vol_size) + ) + if vol_iops in ebs_configuration: + instance_group.pop(vol_iops) + ebs_block[volume_specification][iops] = ebs_configuration.pop( + vol_iops + ) + if vol_type in ebs_configuration: + instance_group.pop(vol_type) + ebs_block[volume_specification][ + volume_type + ] = ebs_configuration.pop(vol_type) + + per_instance = "{0}._{1}".format(key, volumes_per_instance) + if per_instance in ebs_configuration: + instance_group.pop(per_instance) + ebs_block[volumes_per_instance] = int( + ebs_configuration.pop(per_instance) + ) + + if len(ebs_block) > 0: + ebs_blocks.append(ebs_block) + idx += 1 + key = keyfmt.format(idx) + + if len(ebs_blocks) > 0: + ebs_configuration[ebs_block_device_configs] = ebs_blocks + instance_group[key_ebs_config] = ebs_configuration + @generate_boto3_response("SetTerminationProtection") def set_termination_protection(self): termination_protection = self._get_param("TerminationProtected") @@ -358,6 +508,25 @@ class ElasticMapReduceResponse(BaseResponse): template = self.response_template(TERMINATE_JOB_FLOWS_TEMPLATE) return template.render() + @generate_boto3_response("PutAutoScalingPolicy") + def put_auto_scaling_policy(self): + cluster_id = self._get_param("ClusterId") + instance_group_id = self._get_param("InstanceGroupId") + auto_scaling_policy = self._get_param("AutoScalingPolicy") + instance_group = self.backend.put_auto_scaling_policy( + instance_group_id, auto_scaling_policy + ) + template = self.response_template(PUT_AUTO_SCALING_POLICY) + return template.render(cluster_id=cluster_id, instance_group=instance_group) + + @generate_boto3_response("RemoveAutoScalingPolicy") + def remove_auto_scaling_policy(self): + cluster_id = self._get_param("ClusterId") + instance_group_id = self._get_param("InstanceGroupId") + instance_group = self.backend.put_auto_scaling_policy(instance_group_id, None) + template = self.response_template(REMOVE_AUTO_SCALING_POLICY) + return template.render(cluster_id=cluster_id, instance_group=instance_group) + ADD_INSTANCE_GROUPS_TEMPLATE = """ @@ -441,6 +610,23 @@ DESCRIBE_CLUSTER_TEMPLATE = """ + + {{ cluster.step_concurrency_level }} + + + 0751c837-e78d-4aef-95c9-9c4d29a092ff + + +""" + MODIFY_INSTANCE_GROUPS_TEMPLATE = """ 2690d7eb-ed86-11dd-9877-6fad448a8419 @@ -890,3 +1205,147 @@ TERMINATE_JOB_FLOWS_TEMPLATE = """ + + {{cluster_id}} + {{instance_group.id}} + {% if instance_group.auto_scaling_policy is not none %} + + {% if instance_group.auto_scaling_policy.constraints is not none %} + + {% if instance_group.auto_scaling_policy.constraints.min_capacity is not none %} + {{instance_group.auto_scaling_policy.constraints.min_capacity}} + {% endif %} + {% if instance_group.auto_scaling_policy.constraints.max_capacity is not none %} + {{instance_group.auto_scaling_policy.constraints.max_capacity}} + {% endif %} + + {% endif %} + {% if instance_group.auto_scaling_policy.rules is not none %} + + {% for rule in instance_group.auto_scaling_policy.rules %} + + {% if 'name' in rule %} + {{rule['name']}} + {% endif %} + {% if 'description' in rule %} + {{rule['description']}} + {% endif %} + {% if 'action' in rule %} + + {% if 'market' in rule['action'] %} + {{rule['action']['market']}} + {% endif %} + {% if 'simple_scaling_policy_configuration' in rule['action'] %} + + {% if 'adjustment_type' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['adjustment_type']}} + {% endif %} + {% if 'scaling_adjustment' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['scaling_adjustment']}} + {% endif %} + {% if 'cool_down' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['cool_down']}} + {% endif %} + + {% endif %} + + {% endif %} + {% if 'trigger' in rule %} + + {% if 'cloud_watch_alarm_definition' in rule['trigger'] %} + + {% if 'comparison_operator' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['comparison_operator']}} + {% endif %} + {% if 'evaluation_periods' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['evaluation_periods']}} + {% endif %} + {% if 'metric_name' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['metric_name']}} + {% endif %} + {% if 'namespace' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['namespace']}} + {% endif %} + {% if 'period' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['period']}} + {% endif %} + {% if 'statistic' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['statistic']}} + {% endif %} + {% if 'threshold' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['threshold']}} + {% endif %} + {% if 'unit' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['unit']}} + {% endif %} + {% if 'dimensions' in rule['trigger']['cloud_watch_alarm_definition'] %} + + {% for dimension in rule['trigger']['cloud_watch_alarm_definition']['dimensions'] %} + + {% if 'key' in dimension %} + {{dimension['key']}} + {% endif %} + {% if 'value' in dimension %} + {{dimension['value']}} + {% endif %} + + {% endfor %} + + {% endif %} + + {% endif %} + + {% endif %} + + {% endfor %} + + {% endif %} + {% if instance_group.auto_scaling_policy.status is not none %} + + {% if 'state' in instance_group.auto_scaling_policy.status %} + {{instance_group.auto_scaling_policy.status['state']}} + {% endif %} + + {% endif %} + + {% endif %} + + + d47379d9-b505-49af-9335-a68950d82535 + +""" + +REMOVE_AUTO_SCALING_POLICY = """ + + c04a1042-5340-4c0a-a7b5-7779725ce4f7 + +""" + +CREATE_SECURITY_CONFIGURATION_TEMPLATE = """ + + {{name}} + {{creation_date_time}} + + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + +""" + +DESCRIBE_SECURITY_CONFIGURATION_TEMPLATE = """ + + {{security_configuration['name']}} + {{security_configuration['security_configuration']}} + {{security_configuration['creation_date_time']}} + + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + +""" + +DELETE_SECURITY_CONFIGURATION_TEMPLATE = """ + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + +""" diff --git a/moto/emr/utils.py b/moto/emr/utils.py index 0f75995b8..506201c1c 100644 --- a/moto/emr/utils.py +++ b/moto/emr/utils.py @@ -1,6 +1,8 @@ from __future__ import unicode_literals import random +import re import string +from moto.core.utils import camelcase_to_underscores import six @@ -22,22 +24,6 @@ def random_instance_group_id(size=13): return "i-{0}".format(random_id()) -def tags_from_query_string(querystring_dict): - prefix = "Tags" - suffix = "Key" - response_values = {} - for key, value in querystring_dict.items(): - if key.startswith(prefix) and key.endswith(suffix): - tag_index = key.replace(prefix + ".", "").replace("." + suffix, "") - tag_key = querystring_dict.get("Tags.{0}.Key".format(tag_index))[0] - tag_value_key = "Tags.{0}.Value".format(tag_index) - if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[0] - else: - response_values[tag_key] = None - return response_values - - def steps_from_query_string(querystring_dict): steps = [] for step in querystring_dict: @@ -53,3 +39,182 @@ def steps_from_query_string(querystring_dict): idx += 1 steps.append(step) return steps + + +class Unflattener: + @staticmethod + def unflatten_complex_params(input_dict, param_name): + """Function to unflatten (portions of) dicts with complex keys. The moto request parser flattens the incoming + request bodies, which is generally helpful, but for nested dicts/lists can result in a hard-to-manage + parameter exposion. This function allows one to selectively unflatten a set of dict keys, replacing them + with a deep dist/list structure named identically to the root component in the complex name. + + Complex keys are composed of multiple components + separated by periods. Components may be prefixed with _, which is stripped. Lists indexes are represented + with two components, 'member' and the index number.""" + items_to_process = {} + for k in input_dict.keys(): + if k.startswith(param_name): + items_to_process[k] = input_dict[k] + if len(items_to_process) == 0: + return + + for k in items_to_process.keys(): + del input_dict[k] + + for k in items_to_process.keys(): + Unflattener._set_deep(k, input_dict, items_to_process[k]) + + @staticmethod + def _set_deep(complex_key, container, value): + keys = complex_key.split(".") + keys.reverse() + + while len(keys) > 0: + if len(keys) == 1: + key = keys.pop().strip("_") + Unflattener._add_to_container(container, key, value) + else: + key = keys.pop().strip("_") + if keys[-1] == "member": + keys.pop() + if not Unflattener._key_in_container(container, key): + container = Unflattener._add_to_container(container, key, []) + else: + container = Unflattener._get_child(container, key) + else: + if not Unflattener._key_in_container(container, key): + container = Unflattener._add_to_container(container, key, {}) + else: + container = Unflattener._get_child(container, key) + + @staticmethod + def _add_to_container(container, key, value): + if type(container) is dict: + container[key] = value + elif type(container) is list: + i = int(key) + while len(container) < i: + container.append(None) + container[i - 1] = value + return value + + @staticmethod + def _get_child(container, key): + if type(container) is dict: + return container[key] + elif type(container) is list: + i = int(key) + return container[i - 1] + + @staticmethod + def _key_in_container(container, key): + if type(container) is dict: + return key in container + elif type(container) is list: + i = int(key) + return len(container) >= i + + +class CamelToUnderscoresWalker: + """A class to convert the keys in dict/list hierarchical data structures from CamelCase to snake_case (underscores)""" + + @staticmethod + def parse(x): + if isinstance(x, dict): + return CamelToUnderscoresWalker.parse_dict(x) + elif isinstance(x, list): + return CamelToUnderscoresWalker.parse_list(x) + else: + return CamelToUnderscoresWalker.parse_scalar(x) + + @staticmethod + def parse_dict(x): + temp = {} + for key in x.keys(): + temp[camelcase_to_underscores(key)] = CamelToUnderscoresWalker.parse(x[key]) + return temp + + @staticmethod + def parse_list(x): + temp = [] + for i in x: + temp.append(CamelToUnderscoresWalker.parse(i)) + return temp + + @staticmethod + def parse_scalar(x): + return x + + +class ReleaseLabel(object): + + version_re = re.compile(r"^emr-(\d+)\.(\d+)\.(\d+)$") + + def __init__(self, release_label): + major, minor, patch = self.parse(release_label) + + self.major = major + self.minor = minor + self.patch = patch + + @classmethod + def parse(cls, release_label): + if not release_label: + raise ValueError("Invalid empty ReleaseLabel: %r" % release_label) + + match = cls.version_re.match(release_label) + if not match: + raise ValueError("Invalid ReleaseLabel: %r" % release_label) + + major, minor, patch = match.groups() + + major = int(major) + minor = int(minor) + patch = int(patch) + + return major, minor, patch + + def __str__(self): + version = "emr-%d.%d.%d" % (self.major, self.minor, self.patch) + return version + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, str(self)) + + def __iter__(self): + return iter((self.major, self.minor, self.patch)) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + self.major == other.major + and self.minor == other.minor + and self.patch == other.patch + ) + + def __ne__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) != tuple(other) + + def __lt__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) < tuple(other) + + def __le__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) <= tuple(other) + + def __gt__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) > tuple(other) + + def __ge__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) >= tuple(other) diff --git a/moto/events/models.py b/moto/events/models.py index 548d41393..a3675d8e1 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -4,11 +4,13 @@ import json from boto3 import Session from moto.core.exceptions import JsonRESTError -from moto.core import BaseBackend, BaseModel -from moto.sts.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel +from moto.utilities.tagging_service import TaggingService + +from uuid import uuid4 -class Rule(BaseModel): +class Rule(CloudFormationModel): def _generate_arn(self, name): return "arn:aws:events:{region_name}:111111111111:rule/{name}".format( region_name=self.region_name, name=name @@ -23,13 +25,12 @@ class Rule(BaseModel): self.state = kwargs.get("State") or "ENABLED" self.description = kwargs.get("Description") self.role_arn = kwargs.get("RoleArn") + self.event_bus_name = kwargs.get("EventBusName", "default") self.targets = [] - def enable(self): - self.state = "ENABLED" - - def disable(self): - self.state = "DISABLED" + @property + def physical_resource_id(self): + return self.name # This song and dance for targets is because we need order for Limits and NextTokens, but can't use OrderedDicts # with Python 2.6, so tracking it with an array it is. @@ -39,6 +40,16 @@ class Rule(BaseModel): return i return None + def enable(self): + self.state = "ENABLED" + + def disable(self): + self.state = "DISABLED" + + def delete(self, region_name): + event_backend = events_backends[region_name] + event_backend.delete_rule(name=self.name) + def put_targets(self, targets): # Not testing for valid ARNs. for target in targets: @@ -54,8 +65,51 @@ class Rule(BaseModel): if index is not None: self.targets.pop(index) + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException -class EventBus(BaseModel): + if attribute_name == "Arn": + return self.arn + + raise UnformattedGetAttTemplateException() + + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html + return "AWS::Events::Rule" + + @classmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + event_backend = events_backends[region_name] + event_name = resource_name + return event_backend.put_rule(name=event_name, **properties) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): + original_resource.delete(region_name) + return cls.create_from_cloudformation_json( + new_resource_name, cloudformation_json, region_name + ) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + event_backend = events_backends[region_name] + event_name = resource_name + event_backend.delete_rule(name=event_name) + + +class EventBus(CloudFormationModel): def __init__(self, region_name, name): self.region = region_name self.name = name @@ -90,6 +144,60 @@ class EventBus(BaseModel): return json.dumps(policy) + def delete(self, region_name): + event_backend = events_backends[region_name] + event_backend.delete_event_bus(name=self.name) + + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.arn + elif attribute_name == "Name": + return self.name + elif attribute_name == "Policy": + return self.policy + + raise UnformattedGetAttTemplateException() + + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbus.html + return "AWS::Events::EventBus" + + @classmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + event_backend = events_backends[region_name] + event_name = resource_name + event_source_name = properties.get("EventSourceName") + return event_backend.create_event_bus( + name=event_name, event_source_name=event_source_name + ) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): + original_resource.delete(region_name) + return cls.create_from_cloudformation_json( + new_resource_name, cloudformation_json, region_name + ) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + event_backend = events_backends[region_name] + event_bus_name = resource_name + event_backend.delete_event_bus(event_bus_name) + class EventsBackend(BaseBackend): ACCOUNT_ID = re.compile(r"^(\d{1,12}|\*)$") @@ -104,6 +212,7 @@ class EventsBackend(BaseBackend): self.region_name = region_name self.event_buses = {} self.event_sources = {} + self.tagger = TaggingService() self._add_default_event_bus() @@ -141,6 +250,9 @@ class EventsBackend(BaseBackend): def delete_rule(self, name): self.rules_order.pop(self.rules_order.index(name)) + arn = self.rules.get(name).arn + if self.tagger.has_tags(arn): + self.tagger.delete_all_tags_for_resource(arn) return self.rules.pop(name) is not None def describe_rule(self, name): @@ -227,10 +339,10 @@ class EventsBackend(BaseBackend): return return_obj def put_rule(self, name, **kwargs): - rule = Rule(name, self.region_name, **kwargs) - self.rules[rule.name] = rule - self.rules_order.append(rule.name) - return rule.arn + new_rule = Rule(name, self.region_name, **kwargs) + self.rules[new_rule.name] = new_rule + self.rules_order.append(new_rule.name) + return new_rule def put_targets(self, name, targets): rule = self.rules.get(name) @@ -250,16 +362,19 @@ class EventsBackend(BaseBackend): raise JsonRESTError("ValidationError", "Can only submit 10 events at once") # We dont really need to store the events yet - return [] + return [{"EventId": str(uuid4())} for _ in events] def remove_targets(self, name, ids): rule = self.rules.get(name) if rule: rule.remove_targets(ids) - return True - - return False + return {"FailedEntries": [], "FailedEntryCount": 0} + else: + raise JsonRESTError( + "ResourceNotFoundException", + "An entity that you specified does not exist", + ) def test_event_pattern(self): raise NotImplementedError() @@ -278,12 +393,12 @@ class EventsBackend(BaseBackend): if principal is None or self.ACCOUNT_ID.match(principal) is None: raise JsonRESTError( - "InvalidParameterValue", "Principal must match ^(\d{1,12}|\*)$" + "InvalidParameterValue", r"Principal must match ^(\d{1,12}|\*)$" ) if statement_id is None or self.STATEMENT_ID.match(statement_id) is None: raise JsonRESTError( - "InvalidParameterValue", "StatementId must match ^[a-zA-Z0-9-_]{1,64}$" + "InvalidParameterValue", r"StatementId must match ^[a-zA-Z0-9-_]{1,64}$" ) event_bus._permissions[statement_id] = { @@ -321,7 +436,7 @@ class EventsBackend(BaseBackend): return event_bus - def create_event_bus(self, name, event_source_name): + def create_event_bus(self, name, event_source_name=None): if name in self.event_buses: raise JsonRESTError( "ResourceAlreadyExistsException", @@ -358,9 +473,34 @@ class EventsBackend(BaseBackend): raise JsonRESTError( "ValidationException", "Cannot delete event bus default." ) - self.event_buses.pop(name, None) + def list_tags_for_resource(self, arn): + name = arn.split("/")[-1] + if name in self.rules: + return self.tagger.list_tags_for_resource(self.rules[name].arn) + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + + def tag_resource(self, arn, tags): + name = arn.split("/")[-1] + if name in self.rules: + self.tagger.tag_resource(self.rules[name].arn, tags) + return {} + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + + def untag_resource(self, arn, tag_names): + name = arn.split("/")[-1] + if name in self.rules: + self.tagger.untag_resource_using_names(self.rules[name].arn, tag_names) + return {} + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + events_backends = {} for region in Session().get_available_regions("events"): diff --git a/moto/events/responses.py b/moto/events/responses.py index b415564f8..99577bacb 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -25,6 +25,7 @@ class EventsHandler(BaseResponse): "Description": rule.description, "ScheduleExpression": rule.schedule_exp, "RoleArn": rule.role_arn, + "EventBusName": rule.event_bus_name, } @property @@ -62,7 +63,9 @@ class EventsHandler(BaseResponse): rule = self.events_backend.describe_rule(name) if not rule: - return self.error("ResourceNotFoundException", "Rule test does not exist.") + return self.error( + "ResourceNotFoundException", "Rule " + name + " does not exist." + ) rule_dict = self._generate_rule_dict(rule) return json.dumps(rule_dict), self.response_headers @@ -148,14 +151,15 @@ class EventsHandler(BaseResponse): def put_events(self): events = self._get_param("Entries") - failed_entries = self.events_backend.put_events(events) + entries = self.events_backend.put_events(events) - if failed_entries: - return json.dumps( - {"FailedEntryCount": len(failed_entries), "Entries": failed_entries} - ) + failed_count = len([e for e in entries if "ErrorCode" in e]) + response = { + "FailedEntryCount": failed_count, + "Entries": entries, + } - return "", self.response_headers + return json.dumps(response) def put_rule(self): name = self._get_param("Name") @@ -164,6 +168,7 @@ class EventsHandler(BaseResponse): state = self._get_param("State") desc = self._get_param("Description") role_arn = self._get_param("RoleArn") + event_bus_name = self._get_param("EventBusName") if not name: return self.error("ValidationException", "Parameter Name is required.") @@ -180,25 +185,26 @@ class EventsHandler(BaseResponse): if sched_exp: if not ( - re.match("^cron\(.*\)", sched_exp) + re.match(r"^cron\(.*\)", sched_exp) or re.match( - "^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)", sched_exp + r"^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)", sched_exp ) ): return self.error( "ValidationException", "Parameter ScheduleExpression is not valid." ) - rule_arn = self.events_backend.put_rule( + rule = self.events_backend.put_rule( name, ScheduleExpression=sched_exp, EventPattern=event_pattern, State=state, Description=desc, RoleArn=role_arn, + EventBusName=event_bus_name, ) - return json.dumps({"RuleArn": rule_arn}), self.response_headers + return json.dumps({"RuleArn": rule.arn}), self.response_headers def put_targets(self): rule_name = self._get_param("Rule") @@ -215,7 +221,10 @@ class EventsHandler(BaseResponse): "ResourceNotFoundException", "Rule " + rule_name + " does not exist." ) - return "", self.response_headers + return ( + json.dumps({"FailedEntryCount": 0, "FailedEntries": []}), + self.response_headers, + ) def remove_targets(self): rule_name = self._get_param("Rule") @@ -232,7 +241,10 @@ class EventsHandler(BaseResponse): "ResourceNotFoundException", "Rule " + rule_name + " does not exist." ) - return "", self.response_headers + return ( + json.dumps({"FailedEntryCount": 0, "FailedEntries": []}), + self.response_headers, + ) def test_event_pattern(self): pass @@ -297,3 +309,26 @@ class EventsHandler(BaseResponse): self.events_backend.delete_event_bus(name) return "", self.response_headers + + def list_tags_for_resource(self): + arn = self._get_param("ResourceARN") + + result = self.events_backend.list_tags_for_resource(arn) + + return json.dumps(result), self.response_headers + + def tag_resource(self): + arn = self._get_param("ResourceARN") + tags = self._get_param("Tags") + + result = self.events_backend.tag_resource(arn, tags) + + return json.dumps(result), self.response_headers + + def untag_resource(self): + arn = self._get_param("ResourceARN") + tags = self._get_param("TagKeys") + + result = self.events_backend.untag_resource(arn, tags) + + return json.dumps(result), self.response_headers diff --git a/moto/forecast/__init__.py b/moto/forecast/__init__.py new file mode 100644 index 000000000..75b23b94a --- /dev/null +++ b/moto/forecast/__init__.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals + +from .models import forecast_backends +from ..core.models import base_decorator + +forecast_backend = forecast_backends["us-east-1"] +mock_forecast = base_decorator(forecast_backends) diff --git a/moto/forecast/exceptions.py b/moto/forecast/exceptions.py new file mode 100644 index 000000000..dbc6f6414 --- /dev/null +++ b/moto/forecast/exceptions.py @@ -0,0 +1,27 @@ +from __future__ import unicode_literals + +from moto.core.exceptions import AWSError + + +class InvalidInputException(AWSError): + TYPE = "InvalidInputException" + + +class ResourceAlreadyExistsException(AWSError): + TYPE = "ResourceAlreadyExistsException" + + +class ResourceNotFoundException(AWSError): + TYPE = "ResourceNotFoundException" + + +class ResourceInUseException(AWSError): + TYPE = "ResourceInUseException" + + +class LimitExceededException(AWSError): + TYPE = "LimitExceededException" + + +class ValidationException(AWSError): + TYPE = "ValidationException" diff --git a/moto/forecast/models.py b/moto/forecast/models.py new file mode 100644 index 000000000..c7b18618c --- /dev/null +++ b/moto/forecast/models.py @@ -0,0 +1,173 @@ +import re +from datetime import datetime + +from boto3 import Session +from future.utils import iteritems + +from moto.core import ACCOUNT_ID, BaseBackend +from moto.core.utils import iso_8601_datetime_without_milliseconds +from .exceptions import ( + InvalidInputException, + ResourceAlreadyExistsException, + ResourceNotFoundException, + ValidationException, +) + + +class DatasetGroup: + accepted_dataset_group_name_format = re.compile(r"^[a-zA-Z][a-z-A-Z0-9_]*") + accepted_dataset_group_arn_format = re.compile(r"^[a-zA-Z0-9\-\_\.\/\:]+$") + accepted_dataset_types = [ + "INVENTORY_PLANNING", + "METRICS", + "RETAIL", + "EC2_CAPACITY", + "CUSTOM", + "WEB_TRAFFIC", + "WORK_FORCE", + ] + + def __init__( + self, region_name, dataset_arns, dataset_group_name, domain, tags=None + ): + self.creation_date = iso_8601_datetime_without_milliseconds(datetime.now()) + self.modified_date = self.creation_date + + self.arn = ( + "arn:aws:forecast:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":dataset-group/" + + dataset_group_name + ) + self.dataset_arns = dataset_arns if dataset_arns else [] + self.dataset_group_name = dataset_group_name + self.domain = domain + self.tags = tags + self._validate() + + def update(self, dataset_arns): + self.dataset_arns = dataset_arns + self.last_modified_date = iso_8601_datetime_without_milliseconds(datetime.now()) + + def _validate(self): + errors = [] + + errors.extend(self._validate_dataset_group_name()) + errors.extend(self._validate_dataset_group_name_len()) + errors.extend(self._validate_dataset_group_domain()) + + if errors: + err_count = len(errors) + message = str(err_count) + " validation error" + message += "s" if err_count > 1 else "" + message += " detected: " + message += "; ".join(errors) + raise ValidationException(message) + + def _validate_dataset_group_name(self): + errors = [] + if not re.match( + self.accepted_dataset_group_name_format, self.dataset_group_name + ): + errors.append( + "Value '" + + self.dataset_group_name + + "' at 'datasetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern " + + self.accepted_dataset_group_name_format.pattern + ) + return errors + + def _validate_dataset_group_name_len(self): + errors = [] + if len(self.dataset_group_name) >= 64: + errors.append( + "Value '" + + self.dataset_group_name + + "' at 'datasetGroupName' failed to satisfy constraint: Member must have length less than or equal to 63" + ) + return errors + + def _validate_dataset_group_domain(self): + errors = [] + if self.domain not in self.accepted_dataset_types: + errors.append( + "Value '" + + self.domain + + "' at 'domain' failed to satisfy constraint: Member must satisfy enum value set " + + str(self.accepted_dataset_types) + ) + return errors + + +class ForecastBackend(BaseBackend): + def __init__(self, region_name): + super(ForecastBackend, self).__init__() + self.dataset_groups = {} + self.datasets = {} + self.region_name = region_name + + def create_dataset_group(self, dataset_group_name, domain, dataset_arns, tags): + dataset_group = DatasetGroup( + region_name=self.region_name, + dataset_group_name=dataset_group_name, + domain=domain, + dataset_arns=dataset_arns, + tags=tags, + ) + + if dataset_arns: + for dataset_arn in dataset_arns: + if dataset_arn not in self.datasets: + raise InvalidInputException( + "Dataset arns: [" + dataset_arn + "] are not found" + ) + + if self.dataset_groups.get(dataset_group.arn): + raise ResourceAlreadyExistsException( + "A dataset group already exists with the arn: " + dataset_group.arn + ) + + self.dataset_groups[dataset_group.arn] = dataset_group + return dataset_group + + def describe_dataset_group(self, dataset_group_arn): + try: + dataset_group = self.dataset_groups[dataset_group_arn] + except KeyError: + raise ResourceNotFoundException("No resource found " + dataset_group_arn) + return dataset_group + + def delete_dataset_group(self, dataset_group_arn): + try: + del self.dataset_groups[dataset_group_arn] + except KeyError: + raise ResourceNotFoundException("No resource found " + dataset_group_arn) + + def update_dataset_group(self, dataset_group_arn, dataset_arns): + try: + dsg = self.dataset_groups[dataset_group_arn] + except KeyError: + raise ResourceNotFoundException("No resource found " + dataset_group_arn) + + for dataset_arn in dataset_arns: + if dataset_arn not in dsg.dataset_arns: + raise InvalidInputException( + "Dataset arns: [" + dataset_arn + "] are not found" + ) + + dsg.update(dataset_arns) + + def list_dataset_groups(self): + return [v for (_, v) in iteritems(self.dataset_groups)] + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + +forecast_backends = {} +for region in Session().get_available_regions("forecast"): + forecast_backends[region] = ForecastBackend(region) diff --git a/moto/forecast/responses.py b/moto/forecast/responses.py new file mode 100644 index 000000000..09d55b0d8 --- /dev/null +++ b/moto/forecast/responses.py @@ -0,0 +1,92 @@ +from __future__ import unicode_literals + +import json + +from moto.core.responses import BaseResponse +from moto.core.utils import amzn_request_id +from .exceptions import AWSError +from .models import forecast_backends + + +class ForecastResponse(BaseResponse): + @property + def forecast_backend(self): + return forecast_backends[self.region] + + @amzn_request_id + def create_dataset_group(self): + dataset_group_name = self._get_param("DatasetGroupName") + domain = self._get_param("Domain") + dataset_arns = self._get_param("DatasetArns") + tags = self._get_param("Tags") + + try: + dataset_group = self.forecast_backend.create_dataset_group( + dataset_group_name=dataset_group_name, + domain=domain, + dataset_arns=dataset_arns, + tags=tags, + ) + response = {"DatasetGroupArn": dataset_group.arn} + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_dataset_group(self): + dataset_group_arn = self._get_param("DatasetGroupArn") + + try: + dataset_group = self.forecast_backend.describe_dataset_group( + dataset_group_arn=dataset_group_arn + ) + response = { + "CreationTime": dataset_group.creation_date, + "DatasetArns": dataset_group.dataset_arns, + "DatasetGroupArn": dataset_group.arn, + "DatasetGroupName": dataset_group.dataset_group_name, + "Domain": dataset_group.domain, + "LastModificationTime": dataset_group.modified_date, + "Status": "ACTIVE", + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def delete_dataset_group(self): + dataset_group_arn = self._get_param("DatasetGroupArn") + try: + self.forecast_backend.delete_dataset_group(dataset_group_arn) + return 200, {}, None + except AWSError as err: + return err.response() + + @amzn_request_id + def update_dataset_group(self): + dataset_group_arn = self._get_param("DatasetGroupArn") + dataset_arns = self._get_param("DatasetArns") + try: + self.forecast_backend.update_dataset_group(dataset_group_arn, dataset_arns) + return 200, {}, None + except AWSError as err: + return err.response() + + @amzn_request_id + def list_dataset_groups(self): + list_all = self.forecast_backend.list_dataset_groups() + list_all = sorted( + [ + { + "DatasetGroupArn": dsg.arn, + "DatasetGroupName": dsg.dataset_group_name, + "CreationTime": dsg.creation_date, + "LastModificationTime": dsg.creation_date, + } + for dsg in list_all + ], + key=lambda x: x["LastModificationTime"], + reverse=True, + ) + response = {"DatasetGroups": list_all} + return 200, {}, json.dumps(response) diff --git a/moto/forecast/urls.py b/moto/forecast/urls.py new file mode 100644 index 000000000..221659e6f --- /dev/null +++ b/moto/forecast/urls.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals + +from .responses import ForecastResponse + +url_bases = ["https?://forecast.(.+).amazonaws.com"] + +url_paths = {"{0}/$": ForecastResponse.dispatch} diff --git a/moto/glue/models.py b/moto/glue/models.py index 8f3396d9a..cf930cfb2 100644 --- a/moto/glue/models.py +++ b/moto/glue/models.py @@ -34,6 +34,9 @@ class GlueBackend(BaseBackend): except KeyError: raise DatabaseNotFoundException(database_name) + def get_databases(self): + return [self.databases[key] for key in self.databases] if self.databases else [] + def create_table(self, database_name, table_name, table_input): database = self.get_database(database_name) diff --git a/moto/glue/responses.py b/moto/glue/responses.py index bf7b5776b..66185e099 100644 --- a/moto/glue/responses.py +++ b/moto/glue/responses.py @@ -30,6 +30,12 @@ class GlueResponse(BaseResponse): database = self.glue_backend.get_database(database_name) return json.dumps({"Database": {"Name": database.name}}) + def get_databases(self): + database_list = self.glue_backend.get_databases() + return json.dumps( + {"DatabaseList": [{"Name": database.name} for database in database_list]} + ) + def create_table(self): database_name = self.parameters.get("DatabaseName") table_input = self.parameters.get("TableInput") diff --git a/moto/core/access_control.py b/moto/iam/access_control.py similarity index 99% rename from moto/core/access_control.py rename to moto/iam/access_control.py index 8ba0c3ba1..bcde25d9e 100644 --- a/moto/core/access_control.py +++ b/moto/iam/access_control.py @@ -25,8 +25,6 @@ from botocore.credentials import Credentials from six import string_types from moto.core import ACCOUNT_ID -from moto.iam.models import Policy -from moto.iam import iam_backend from moto.core.exceptions import ( SignatureDoesNotMatchError, AccessDeniedError, @@ -44,6 +42,7 @@ from moto.s3.exceptions import ( S3SignatureDoesNotMatchError, ) from moto.sts import sts_backend +from .models import iam_backend, Policy log = logging.getLogger(__name__) diff --git a/moto/iam/config.py b/moto/iam/config.py new file mode 100644 index 000000000..cf116f945 --- /dev/null +++ b/moto/iam/config.py @@ -0,0 +1,321 @@ +import json +import boto3 +from moto.core.exceptions import InvalidNextTokenException +from moto.core.models import ConfigQueryModel +from moto.iam import iam_backends + + +class RoleConfigQuery(ConfigQueryModel): + def list_config_service_resources( + self, + resource_ids, + resource_name, + limit, + next_token, + backend_region=None, + resource_region=None, + aggregator=None, + ): + # IAM roles are "global" and aren't assigned into any availability zone + # The resource ID is a AWS-assigned random string like "AROA0BSVNSZKXVHS00SBJ" + # The resource name is a user-assigned string like "MyDevelopmentAdminRole" + # Stored in moto backend with the AWS-assigned random string like "AROA0BSVNSZKXVHS00SBJ" + + # Grab roles from backend; need the full values since names and id's are different + role_list = list(self.backends["global"].roles.values()) + + if not role_list: + return [], None + + # Filter by resource name or ids + if resource_name or resource_ids: + filtered_roles = [] + # resource_name takes precedence over resource_ids + if resource_name: + for role in role_list: + if role.name == resource_name: + filtered_roles = [role] + break + # but if both are passed, it must be a subset + if filtered_roles and resource_ids: + if filtered_roles[0].id not in resource_ids: + return [], None + else: + for role in role_list: + if role.id in resource_ids: + filtered_roles.append(role) + + # Filtered roles are now the subject for the listing + role_list = filtered_roles + + if aggregator: + # IAM is a little special; Roles are created in us-east-1 (which AWS calls the "global" region) + # However, the resource will return in the aggregator (in duplicate) for each region in the aggregator + # Therefore, we'll need to find out the regions where the aggregators are running, and then duplicate the resource there + + # In practice, it looks like AWS will only duplicate these resources if you've "used" any roles in the region, but since + # we can't really tell if this has happened in moto, we'll just bind this to the regions in your aggregator + aggregated_regions = [] + aggregator_sources = aggregator.get( + "account_aggregation_sources" + ) or aggregator.get("organization_aggregation_source") + for source in aggregator_sources: + source_dict = source.__dict__ + if source_dict.get("all_aws_regions", False): + aggregated_regions = boto3.Session().get_available_regions("config") + break + for region in source_dict.get("aws_regions", []): + aggregated_regions.append(region) + + duplicate_role_list = [] + for region in list(set(aggregated_regions)): + for role in role_list: + duplicate_role_list.append( + { + "_id": "{}{}".format( + role.id, region + ), # this is only for sorting, isn't returned outside of this functin + "type": "AWS::IAM::Role", + "id": role.id, + "name": role.name, + "region": region, + } + ) + + # Pagination logic, sort by role id + sorted_roles = sorted(duplicate_role_list, key=lambda role: role["_id"]) + else: + # Non-aggregated queries are in the else block, and we can treat these like a normal config resource + # Pagination logic, sort by role id + sorted_roles = sorted(role_list, key=lambda role: role.id) + + new_token = None + + # Get the start: + if not next_token: + start = 0 + else: + try: + # Find the index of the next + start = next( + index + for (index, r) in enumerate(sorted_roles) + if next_token == (r["_id"] if aggregator else r.id) + ) + except StopIteration: + raise InvalidNextTokenException() + + # Get the list of items to collect: + role_list = sorted_roles[start : (start + limit)] + + if len(sorted_roles) > (start + limit): + record = sorted_roles[start + limit] + new_token = record["_id"] if aggregator else record.id + + return ( + [ + { + "type": "AWS::IAM::Role", + "id": role["id"] if aggregator else role.id, + "name": role["name"] if aggregator else role.name, + "region": role["region"] if aggregator else "global", + } + for role in role_list + ], + new_token, + ) + + def get_config_resource( + self, resource_id, resource_name=None, backend_region=None, resource_region=None + ): + + role = self.backends["global"].roles.get(resource_id, {}) + + if not role: + return + + if resource_name and role.name != resource_name: + return + + # Format the role to the AWS Config format: + config_data = role.to_config_dict() + + # The 'configuration' field is also a JSON string: + config_data["configuration"] = json.dumps(config_data["configuration"]) + + # Supplementary config need all values converted to JSON strings if they are not strings already: + for field, value in config_data["supplementaryConfiguration"].items(): + if not isinstance(value, str): + config_data["supplementaryConfiguration"][field] = json.dumps(value) + + return config_data + + +class PolicyConfigQuery(ConfigQueryModel): + def list_config_service_resources( + self, + resource_ids, + resource_name, + limit, + next_token, + backend_region=None, + resource_region=None, + aggregator=None, + ): + # IAM policies are "global" and aren't assigned into any availability zone + # The resource ID is a AWS-assigned random string like "ANPA0BSVNSZK00SJSPVUJ" + # The resource name is a user-assigned string like "my-development-policy" + # Stored in moto backend with the arn like "arn:aws:iam::123456789012:policy/my-development-policy" + + policy_list = list(self.backends["global"].managed_policies.values()) + + # We don't want to include AWS Managed Policies. This technically needs to + # respect the configuration recorder's 'includeGlobalResourceTypes' setting, + # but it's default set be default, and moto's config doesn't yet support + # custom configuration recorders, we'll just behave as default. + policy_list = list( + filter( + lambda policy: not policy.arn.startswith("arn:aws:iam::aws"), + policy_list, + ) + ) + + if not policy_list: + return [], None + + # Filter by resource name or ids + if resource_name or resource_ids: + filtered_policies = [] + # resource_name takes precedence over resource_ids + if resource_name: + for policy in policy_list: + if policy.name == resource_name: + filtered_policies = [policy] + break + # but if both are passed, it must be a subset + if filtered_policies and resource_ids: + if filtered_policies[0].id not in resource_ids: + return [], None + + else: + for policy in policy_list: + if policy.id in resource_ids: + filtered_policies.append(policy) + + # Filtered roles are now the subject for the listing + policy_list = filtered_policies + + if aggregator: + # IAM is a little special; Policies are created in us-east-1 (which AWS calls the "global" region) + # However, the resource will return in the aggregator (in duplicate) for each region in the aggregator + # Therefore, we'll need to find out the regions where the aggregators are running, and then duplicate the resource there + + # In practice, it looks like AWS will only duplicate these resources if you've "used" any policies in the region, but since + # we can't really tell if this has happened in moto, we'll just bind this to the regions in your aggregator + aggregated_regions = [] + aggregator_sources = aggregator.get( + "account_aggregation_sources" + ) or aggregator.get("organization_aggregation_source") + for source in aggregator_sources: + source_dict = source.__dict__ + if source_dict.get("all_aws_regions", False): + aggregated_regions = boto3.Session().get_available_regions("config") + break + for region in source_dict.get("aws_regions", []): + aggregated_regions.append(region) + + duplicate_policy_list = [] + for region in list(set(aggregated_regions)): + for policy in policy_list: + duplicate_policy_list.append( + { + "_id": "{}{}".format( + policy.id, region + ), # this is only for sorting, isn't returned outside of this functin + "type": "AWS::IAM::Policy", + "id": policy.id, + "name": policy.name, + "region": region, + } + ) + + # Pagination logic, sort by role id + sorted_policies = sorted( + duplicate_policy_list, key=lambda policy: policy["_id"] + ) + + else: + # Non-aggregated queries are in the else block, and we can treat these like a normal config resource + # Pagination logic, sort by role id + sorted_policies = sorted(policy_list, key=lambda role: role.id) + + new_token = None + + # Get the start: + if not next_token: + start = 0 + else: + try: + # Find the index of the next + start = next( + index + for (index, p) in enumerate(sorted_policies) + if next_token == (p["_id"] if aggregator else p.id) + ) + except StopIteration: + raise InvalidNextTokenException() + + # Get the list of items to collect: + policy_list = sorted_policies[start : (start + limit)] + + if len(sorted_policies) > (start + limit): + record = sorted_policies[start + limit] + new_token = record["_id"] if aggregator else record.id + + return ( + [ + { + "type": "AWS::IAM::Policy", + "id": policy["id"] if aggregator else policy.id, + "name": policy["name"] if aggregator else policy.name, + "region": policy["region"] if aggregator else "global", + } + for policy in policy_list + ], + new_token, + ) + + def get_config_resource( + self, resource_id, resource_name=None, backend_region=None, resource_region=None + ): + # policies are listed in the backend as arns, but we have to accept the PolicyID as the resource_id + # we'll make a really crude search for it + policy = None + for arn in self.backends["global"].managed_policies.keys(): + policy_candidate = self.backends["global"].managed_policies[arn] + if policy_candidate.id == resource_id: + policy = policy_candidate + break + + if not policy: + return + + if resource_name and policy.name != resource_name: + return + + # Format the policy to the AWS Config format: + config_data = policy.to_config_dict() + + # The 'configuration' field is also a JSON string: + config_data["configuration"] = json.dumps(config_data["configuration"]) + + # Supplementary config need all values converted to JSON strings if they are not strings already: + for field, value in config_data["supplementaryConfiguration"].items(): + if not isinstance(value, str): + config_data["supplementaryConfiguration"][field] = json.dumps(value) + + return config_data + + +role_config_query = RoleConfigQuery(iam_backends) +policy_config_query = PolicyConfigQuery(iam_backends) diff --git a/moto/iam/models.py b/moto/iam/models.py old mode 100644 new mode 100755 index 18b3a7a6f..ac8402e57 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -8,13 +8,14 @@ import sys from datetime import datetime import json import re +import time from cryptography import x509 from cryptography.hazmat.backends import default_backend -from six.moves.urllib.parse import urlparse +from six.moves.urllib import parse from moto.core.exceptions import RESTError -from moto.core import BaseBackend, BaseModel, ACCOUNT_ID +from moto.core import BaseBackend, BaseModel, ACCOUNT_ID, CloudFormationModel from moto.core.utils import ( iso_8601_datetime_without_milliseconds, iso_8601_datetime_with_milliseconds, @@ -83,7 +84,11 @@ class VirtualMfaDevice(object): return iso_8601_datetime_without_milliseconds(self.enable_date) -class Policy(BaseModel): +class Policy(CloudFormationModel): + + # Note: This class does not implement the CloudFormation support for AWS::IAM::Policy, as that CF resource + # is for creating *inline* policies. That is done in class InlinePolicy. + is_attachable = False def __init__( @@ -120,9 +125,10 @@ class Policy(BaseModel): def update_default_version(self, new_default_version_id): for version in self.versions: + if version.version_id == new_default_version_id: + version.is_default = True if version.version_id == self.default_version_id: version.is_default = False - break self.default_version_id = new_default_version_id @property @@ -149,7 +155,7 @@ class OpenIDConnectProvider(BaseModel): self._errors = [] self._validate(url, thumbprint_list, client_id_list) - parsed_url = urlparse(url) + parsed_url = parse.urlparse(url) self.url = parsed_url.netloc + parsed_url.path self.thumbprint_list = thumbprint_list self.client_id_list = client_id_list @@ -197,7 +203,7 @@ class OpenIDConnectProvider(BaseModel): self._raise_errors() - parsed_url = urlparse(url) + parsed_url = parse.urlparse(url) if not parsed_url.scheme or not parsed_url.netloc: raise ValidationError("Invalid Open ID Connect Provider URL") @@ -261,6 +267,48 @@ class ManagedPolicy(Policy): def arn(self): return "arn:aws:iam::{0}:policy{1}{2}".format(ACCOUNT_ID, self.path, self.name) + def to_config_dict(self): + return { + "version": "1.3", + "configurationItemCaptureTime": str(self.create_date), + "configurationItemStatus": "OK", + "configurationStateId": str( + int(time.mktime(self.create_date.timetuple())) + ), # PY2 and 3 compatible + "arn": "arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, self.name), + "resourceType": "AWS::IAM::Policy", + "resourceId": self.id, + "resourceName": self.name, + "awsRegion": "global", + "availabilityZone": "Not Applicable", + "resourceCreationTime": str(self.create_date), + "configuration": { + "policyName": self.name, + "policyId": self.id, + "arn": "arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, self.name), + "path": self.path, + "defaultVersionId": self.default_version_id, + "attachmentCount": self.attachment_count, + "permissionsBoundaryUsageCount": 0, + "isAttachable": ManagedPolicy.is_attachable, + "description": self.description, + "createDate": str(self.create_date.isoformat()), + "updateDate": str(self.create_date.isoformat()), + "policyVersionList": list( + map( + lambda version: { + "document": parse.quote(version.document), + "versionId": version.version_id, + "isDefaultVersion": version.is_default, + "createDate": str(version.create_date), + }, + self.versions, + ) + ), + }, + "supplementaryConfiguration": {}, + } + class AWSManagedPolicy(ManagedPolicy): """AWS-managed policy.""" @@ -287,17 +335,159 @@ class AWSManagedPolicy(ManagedPolicy): # AWS defines some of its own managed policies and we periodically # import them via `make aws_managed_policies` +# FIXME: Takes about 40ms at import time aws_managed_policies = [ AWSManagedPolicy.from_data(name, d) for name, d in json.loads(aws_managed_policies_data).items() ] -class InlinePolicy(Policy): - """TODO: is this needed?""" +class InlinePolicy(CloudFormationModel): + # Represents an Inline Policy created by CloudFormation + def __init__( + self, + resource_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ): + self.name = resource_name + self.policy_name = None + self.policy_document = None + self.group_names = None + self.role_names = None + self.user_names = None + self.update(policy_name, policy_document, group_names, role_names, user_names) + + def update( + self, policy_name, policy_document, group_names, role_names, user_names, + ): + self.policy_name = policy_name + self.policy_document = ( + json.dumps(policy_document) + if isinstance(policy_document, dict) + else policy_document + ) + self.group_names = group_names + self.role_names = role_names + self.user_names = user_names + + @staticmethod + def cloudformation_name_type(): + return None # Resource never gets named after by template PolicyName! + + @staticmethod + def cloudformation_type(): + return "AWS::IAM::Policy" + + @classmethod + def create_from_cloudformation_json( + cls, resource_physical_name, cloudformation_json, region_name + ): + properties = cloudformation_json.get("Properties", {}) + policy_document = properties.get("PolicyDocument") + policy_name = properties.get("PolicyName") + user_names = properties.get("Users") + role_names = properties.get("Roles") + group_names = properties.get("Groups") + + return iam_backend.create_inline_policy( + resource_physical_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if cls.is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + properties = cloudformation_json.get("Properties", {}) + policy_document = properties.get("PolicyDocument") + policy_name = properties.get("PolicyName", original_resource.name) + user_names = properties.get("Users") + role_names = properties.get("Roles") + group_names = properties.get("Groups") + + return iam_backend.update_inline_policy( + original_resource.name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + iam_backend.delete_inline_policy(resource_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = [] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + @property + def physical_resource_id(self): + return self.name + + def apply_policy(self, backend): + if self.user_names: + for user_name in self.user_names: + backend.put_user_policy( + user_name, self.policy_name, self.policy_document + ) + if self.role_names: + for role_name in self.role_names: + backend.put_role_policy( + role_name, self.policy_name, self.policy_document + ) + if self.group_names: + for group_name in self.group_names: + backend.put_group_policy( + group_name, self.policy_name, self.policy_document + ) + + def unapply_policy(self, backend): + if self.user_names: + for user_name in self.user_names: + backend.delete_user_policy(user_name, self.policy_name) + if self.role_names: + for role_name in self.role_names: + backend.delete_role_policy(role_name, self.policy_name) + if self.group_names: + for group_name in self.group_names: + backend.delete_group_policy(group_name, self.policy_name) -class Role(BaseModel): +class Role(CloudFormationModel): def __init__( self, role_id, @@ -325,14 +515,28 @@ class Role(BaseModel): def created_iso_8601(self): return iso_8601_datetime_with_milliseconds(self.create_date) + @staticmethod + def cloudformation_name_type(): + return "RoleName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html + return "AWS::IAM::Role" + @classmethod def create_from_cloudformation_json( - cls, resource_name, cloudformation_json, region_name + cls, resource_physical_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] + role_name = ( + properties["RoleName"] + if "RoleName" in properties + else resource_physical_name + ) role = iam_backend.create_role( - role_name=resource_name, + role_name=role_name, assume_role_policy_document=properties["AssumeRolePolicyDocument"], path=properties.get("Path", "/"), permissions_boundary=properties.get("PermissionsBoundary", ""), @@ -353,6 +557,69 @@ class Role(BaseModel): def arn(self): return "arn:aws:iam::{0}:role{1}{2}".format(ACCOUNT_ID, self.path, self.name) + def to_config_dict(self): + _managed_policies = [] + for key in self.managed_policies.keys(): + _managed_policies.append( + {"policyArn": key, "policyName": iam_backend.managed_policies[key].name} + ) + + _role_policy_list = [] + for key, value in self.policies.items(): + _role_policy_list.append( + {"policyName": key, "policyDocument": parse.quote(value)} + ) + + _instance_profiles = [] + for key, instance_profile in iam_backend.instance_profiles.items(): + for role in instance_profile.roles: + _instance_profiles.append(instance_profile.to_embedded_config_dict()) + break + + config_dict = { + "version": "1.3", + "configurationItemCaptureTime": str(self.create_date), + "configurationItemStatus": "ResourceDiscovered", + "configurationStateId": str( + int(time.mktime(self.create_date.timetuple())) + ), # PY2 and 3 compatible + "arn": "arn:aws:iam::{}:role/{}".format(ACCOUNT_ID, self.name), + "resourceType": "AWS::IAM::Role", + "resourceId": self.name, + "resourceName": self.name, + "awsRegion": "global", + "availabilityZone": "Not Applicable", + "resourceCreationTime": str(self.create_date), + "relatedEvents": [], + "relationships": [], + "tags": self.tags, + "configuration": { + "path": self.path, + "roleName": self.name, + "roleId": self.id, + "arn": "arn:aws:iam::{}:role/{}".format(ACCOUNT_ID, self.name), + "assumeRolePolicyDocument": parse.quote( + self.assume_role_policy_document + ) + if self.assume_role_policy_document + else None, + "instanceProfileList": _instance_profiles, + "rolePolicyList": _role_policy_list, + "createDate": self.create_date.isoformat(), + "attachedManagedPolicies": _managed_policies, + "permissionsBoundary": self.permissions_boundary, + "tags": list( + map( + lambda key: {"key": key, "value": self.tags[key]["Value"]}, + self.tags, + ) + ), + "roleLastUsed": None, + }, + "supplementaryConfiguration": {}, + } + return config_dict + def put_policy(self, policy_name, policy_json): self.policies[policy_name] = policy_json @@ -379,7 +646,7 @@ class Role(BaseModel): return [self.tags[tag] for tag in self.tags] -class InstanceProfile(BaseModel): +class InstanceProfile(CloudFormationModel): def __init__(self, instance_profile_id, name, path, roles): self.id = instance_profile_id self.name = name @@ -391,15 +658,26 @@ class InstanceProfile(BaseModel): def created_iso_8601(self): return iso_8601_datetime_with_milliseconds(self.create_date) + @staticmethod + def cloudformation_name_type(): + return "InstanceProfileName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html + return "AWS::IAM::InstanceProfile" + @classmethod def create_from_cloudformation_json( - cls, resource_name, cloudformation_json, region_name + cls, resource_physical_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] role_ids = properties["Roles"] return iam_backend.create_instance_profile( - name=resource_name, path=properties.get("Path", "/"), role_ids=role_ids + name=resource_physical_name, + path=properties.get("Path", "/"), + role_ids=role_ids, ) @property @@ -419,6 +697,43 @@ class InstanceProfile(BaseModel): return self.arn raise UnformattedGetAttTemplateException() + def to_embedded_config_dict(self): + # Instance Profiles aren't a config item itself, but they are returned in IAM roles with + # a "config like" json structure It's also different than Role.to_config_dict() + roles = [] + for role in self.roles: + roles.append( + { + "path": role.path, + "roleName": role.name, + "roleId": role.id, + "arn": "arn:aws:iam::{}:role/{}".format(ACCOUNT_ID, role.name), + "createDate": str(role.create_date), + "assumeRolePolicyDocument": parse.quote( + role.assume_role_policy_document + ), + "description": role.description, + "maxSessionDuration": None, + "permissionsBoundary": role.permissions_boundary, + "tags": list( + map( + lambda key: {"key": key, "value": role.tags[key]["Value"]}, + role.tags, + ) + ), + "roleLastUsed": None, + } + ) + + return { + "path": self.path, + "instanceProfileName": self.name, + "instanceProfileId": self.id, + "arn": "arn:aws:iam::{}:instance-profile/{}".format(ACCOUNT_ID, self.name), + "createDate": str(self.create_date), + "roles": roles, + } + class Certificate(BaseModel): def __init__(self, cert_name, cert_body, private_key, cert_chain=None, path=None): @@ -452,14 +767,14 @@ class SigningCertificate(BaseModel): return iso_8601_datetime_without_milliseconds(self.upload_date) -class AccessKey(BaseModel): - def __init__(self, user_name): +class AccessKey(CloudFormationModel): + def __init__(self, user_name, status="Active"): self.user_name = user_name self.access_key_id = "AKIA" + random_access_key() self.secret_access_key = random_alphanumeric(40) - self.status = "Active" + self.status = status self.create_date = datetime.utcnow() - self.last_used = datetime.utcnow() + self.last_used = None @property def created_iso_8601(self): @@ -476,6 +791,66 @@ class AccessKey(BaseModel): return self.secret_access_key raise UnformattedGetAttTemplateException() + @staticmethod + def cloudformation_name_type(): + return None # Resource never gets named after by template PolicyName! + + @staticmethod + def cloudformation_type(): + return "AWS::IAM::AccessKey" + + @classmethod + def create_from_cloudformation_json( + cls, resource_physical_name, cloudformation_json, region_name + ): + properties = cloudformation_json.get("Properties", {}) + user_name = properties.get("UserName") + status = properties.get("Status", "Active") + + return iam_backend.create_access_key(user_name, status=status,) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if cls.is_replacement_update(properties): + new_resource = cls.create_from_cloudformation_json( + new_resource_name, cloudformation_json, region_name + ) + cls.delete_from_cloudformation_json( + original_resource.physical_resource_id, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + properties = cloudformation_json.get("Properties", {}) + status = properties.get("Status") + return iam_backend.update_access_key( + original_resource.user_name, original_resource.access_key_id, status + ) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + iam_backend.delete_access_key_by_name(resource_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = ["Serial", "UserName"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + @property + def physical_resource_id(self): + return self.access_key_id + class SshPublicKey(BaseModel): def __init__(self, user_name, ssh_public_key_body): @@ -541,8 +916,14 @@ class Group(BaseModel): def list_policies(self): return self.policies.keys() + def delete_policy(self, policy_name): + if policy_name not in self.policies: + raise IAMNotFoundException("Policy {0} not found".format(policy_name)) -class User(BaseModel): + del self.policies[policy_name] + + +class User(CloudFormationModel): def __init__(self, name, path=None, tags=None): self.name = name self.id = random_resource_id() @@ -591,8 +972,8 @@ class User(BaseModel): del self.policies[policy_name] - def create_access_key(self): - access_key = AccessKey(self.name) + def create_access_key(self, status="Active"): + access_key = AccessKey(self.name, status) self.access_keys.append(access_key) return access_key @@ -610,9 +991,11 @@ class User(BaseModel): key = self.get_access_key_by_id(access_key_id) self.access_keys.remove(key) - def update_access_key(self, access_key_id, status): + def update_access_key(self, access_key_id, status=None): key = self.get_access_key_by_id(access_key_id) - key.status = status + if status is not None: + key.status = status + return key def get_access_key_by_id(self, access_key_id): for key in self.access_keys: @@ -623,6 +1006,15 @@ class User(BaseModel): "The Access Key with id {0} cannot be found".format(access_key_id) ) + def has_access_key(self, access_key_id): + return any( + [ + access_key + for access_key in self.access_keys + if access_key.access_key_id == access_key_id + ] + ) + def upload_ssh_public_key(self, ssh_public_key_body): pubkey = SshPublicKey(self.name, ssh_public_key_body) self.ssh_public_keys.append(pubkey) @@ -654,7 +1046,7 @@ class User(BaseModel): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == "Arn": - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') + return self.arn raise UnformattedGetAttTemplateException() def to_csv(self): @@ -671,20 +1063,50 @@ class User(BaseModel): if len(self.access_keys) == 0: access_key_1_active = "false" access_key_1_last_rotated = "N/A" + access_key_1_last_used = "N/A" access_key_2_active = "false" access_key_2_last_rotated = "N/A" + access_key_2_last_used = "N/A" elif len(self.access_keys) == 1: - access_key_1_active = "true" - access_key_1_last_rotated = date_created.strftime(date_format) + access_key_1_active = ( + "true" if self.access_keys[0].status == "Active" else "false" + ) + access_key_1_last_rotated = self.access_keys[0].create_date.strftime( + date_format + ) + access_key_1_last_used = ( + "N/A" + if self.access_keys[0].last_used is None + else self.access_keys[0].last_used.strftime(date_format) + ) access_key_2_active = "false" access_key_2_last_rotated = "N/A" + access_key_2_last_used = "N/A" else: - access_key_1_active = "true" - access_key_1_last_rotated = date_created.strftime(date_format) - access_key_2_active = "true" - access_key_2_last_rotated = date_created.strftime(date_format) + access_key_1_active = ( + "true" if self.access_keys[0].status == "Active" else "false" + ) + access_key_1_last_rotated = self.access_keys[0].create_date.strftime( + date_format + ) + access_key_1_last_used = ( + "N/A" + if self.access_keys[0].last_used is None + else self.access_keys[0].last_used.strftime(date_format) + ) + access_key_2_active = ( + "true" if self.access_keys[1].status == "Active" else "false" + ) + access_key_2_last_rotated = self.access_keys[1].create_date.strftime( + date_format + ) + access_key_2_last_used = ( + "N/A" + if self.access_keys[1].last_used is None + else self.access_keys[1].last_used.strftime(date_format) + ) - return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A".format( + return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},not_supported,not_supported,{9},{10},{11},not_supported,not_supported,false,N/A,false,N/A\n".format( self.name, self.arn, date_created.strftime(date_format), @@ -693,10 +1115,72 @@ class User(BaseModel): date_created.strftime(date_format), access_key_1_active, access_key_1_last_rotated, + access_key_1_last_used, access_key_2_active, access_key_2_last_rotated, + access_key_2_last_used, ) + @staticmethod + def cloudformation_name_type(): + return "UserName" + + @staticmethod + def cloudformation_type(): + return "AWS::IAM::User" + + @classmethod + def create_from_cloudformation_json( + cls, resource_physical_name, cloudformation_json, region_name + ): + properties = cloudformation_json.get("Properties", {}) + path = properties.get("Path") + return iam_backend.create_user(resource_physical_name, path) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if cls.is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + if "Path" in properties: + original_resource.path = properties["Path"] + return original_resource + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + iam_backend.delete_user(resource_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = ["UserName"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + @property + def physical_resource_id(self): + return self.name + class AccountPasswordPolicy(BaseModel): def __init__( @@ -909,6 +1393,10 @@ class AccountSummary(BaseModel): return len(self._iam_backend.users) +def filter_items_with_path_prefix(path_prefix, items): + return [role for role in items if role.path.startswith(path_prefix)] + + class IAMBackend(BaseBackend): def __init__(self): self.instance_profiles = {} @@ -925,6 +1413,8 @@ class IAMBackend(BaseBackend): self.virtual_mfa_devices = {} self.account_password_policy = None self.account_summary = AccountSummary(self) + self.inline_policies = {} + self.access_keys = {} super(IAMBackend, self).__init__() def _init_managed_policies(self): @@ -946,6 +1436,23 @@ class IAMBackend(BaseBackend): role.max_session_duration = max_session_duration return role + def put_role_permissions_boundary(self, role_name, permissions_boundary): + if permissions_boundary and not self.policy_arn_regex.match( + permissions_boundary + ): + raise RESTError( + "InvalidParameterValue", + "Value ({}) for parameter PermissionsBoundary is invalid.".format( + permissions_boundary + ), + ) + role = self.get_role(role_name) + role.permissions_boundary = permissions_boundary + + def delete_role_permissions_boundary(self, role_name): + role = self.get_role(role_name) + role.permissions_boundary = None + def detach_role_policy(self, policy_arn, role_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) try: @@ -1038,6 +1545,29 @@ class IAMBackend(BaseBackend): return self._filter_attached_policies(policies, marker, max_items, path_prefix) + def set_default_policy_version(self, policy_arn, version_id): + import re + + if re.match("v[1-9][0-9]*(\.[A-Za-z0-9-]*)?", version_id) is None: + raise ValidationError( + "Value '{0}' at 'versionId' failed to satisfy constraint: Member must satisfy regular expression pattern: v[1-9][0-9]*(\.[A-Za-z0-9-]*)?".format( + version_id + ) + ) + + policy = self.get_policy(policy_arn) + + for version in policy.versions: + if version.version_id == version_id: + policy.update_default_version(version_id) + return True + + raise NoSuchEntity( + "Policy {0} version {1} does not exist or is not attachable.".format( + policy_arn, version_id + ) + ) + def _filter_attached_policies(self, policies, marker, max_items, path_prefix): if path_prefix: policies = [p for p in policies if p.path.startswith(path_prefix)] @@ -1111,8 +1641,8 @@ class IAMBackend(BaseBackend): def delete_role(self, role_name): role = self.get_role(role_name) for instance_profile in self.get_instance_profiles(): - for role in instance_profile.roles: - if role.name == role_name: + for profile_role in instance_profile.roles: + if profile_role.name == role_name: raise IAMConflictException( code="DeleteConflict", message="Cannot delete entity, must remove roles from instance profile first.", @@ -1304,6 +1834,15 @@ class IAMBackend(BaseBackend): self.instance_profiles[name] = instance_profile return instance_profile + def delete_instance_profile(self, name): + instance_profile = self.get_instance_profile(name) + if len(instance_profile.roles) > 0: + raise IAMConflictException( + code="DeleteConflict", + message="Cannot delete entity, must remove roles from instance profile first.", + ) + del self.instance_profiles[name] + def get_instance_profile(self, profile_name): for profile in self.get_instance_profiles(): if profile.name == profile_name: @@ -1313,6 +1852,13 @@ class IAMBackend(BaseBackend): "Instance profile {0} not found".format(profile_name) ) + def get_instance_profile_by_arn(self, profile_arn): + for profile in self.get_instance_profiles(): + if profile.arn == profile_arn: + return profile + + raise IAMNotFoundException("Instance profile {0} not found".format(profile_arn)) + def get_instance_profiles(self): return self.instance_profiles.values() @@ -1410,6 +1956,10 @@ class IAMBackend(BaseBackend): group = self.get_group(group_name) return group.list_policies() + def delete_group_policy(self, group_name, policy_name): + group = self.get_group(group_name) + group.delete_policy(policy_name) + def get_group_policy(self, group_name, policy_name): group = self.get_group(group_name) return group.get_policy(policy_name) @@ -1444,7 +1994,11 @@ class IAMBackend(BaseBackend): def list_users(self, path_prefix, marker, max_items): users = None try: + users = self.users.values() + if path_prefix: + users = filter_items_with_path_prefix(path_prefix, users) + except KeyError: raise IAMNotFoundException( "Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items) @@ -1464,16 +2018,23 @@ class IAMBackend(BaseBackend): user.name = new_user_name self.users[new_user_name] = self.users.pop(user_name) - def list_roles(self, path_prefix, marker, max_items): - roles = None - try: - roles = self.roles.values() - except KeyError: - raise IAMNotFoundException( - "Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items) - ) + def list_roles(self, path_prefix=None, marker=None, max_items=None): + path_prefix = path_prefix if path_prefix else "/" + max_items = int(max_items) if max_items else 100 + start_index = int(marker) if marker else 0 - return roles + roles = self.roles.values() + roles = filter_items_with_path_prefix(path_prefix, roles) + sorted_roles = sorted(roles, key=lambda role: role.id) + + roles_to_return = sorted_roles[start_index : start_index + max_items] + + if len(sorted_roles) <= (start_index + max_items): + marker = None + else: + marker = str(start_index + max_items) + + return roles_to_return, marker def upload_signing_certificate(self, user_name, body): user = self.get_user(user_name) @@ -1602,14 +2163,15 @@ class IAMBackend(BaseBackend): def delete_policy(self, policy_arn): del self.managed_policies[policy_arn] - def create_access_key(self, user_name=None): + def create_access_key(self, user_name=None, status="Active"): user = self.get_user(user_name) - key = user.create_access_key() + key = user.create_access_key(status) + self.access_keys[key.physical_resource_id] = key return key - def update_access_key(self, user_name, access_key_id, status): + def update_access_key(self, user_name, access_key_id, status=None): user = self.get_user(user_name) - user.update_access_key(access_key_id, status) + return user.update_access_key(access_key_id, status) def get_access_key_last_used(self, access_key_id): access_keys_list = self.get_all_access_keys_for_all_users() @@ -1634,7 +2196,17 @@ class IAMBackend(BaseBackend): def delete_access_key(self, access_key_id, user_name): user = self.get_user(user_name) - user.delete_access_key(access_key_id) + access_key = user.get_access_key_by_id(access_key_id) + self.delete_access_key_by_name(access_key.access_key_id) + + def delete_access_key_by_name(self, name): + key = self.access_keys[name] + try: # User may have been deleted before their access key... + user = self.get_user(key.user_name) + user.delete_access_key(key.access_key_id) + except IAMNotFoundException: + pass + del self.access_keys[name] def upload_ssh_public_key(self, user_name, ssh_public_key_body): user = self.get_user(user_name) @@ -1794,7 +2366,7 @@ class IAMBackend(BaseBackend): def get_credential_report(self): if not self.credential_report: raise IAMReportNotPresentException("Credential report not present") - report = "user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_2_active,access_key_2_last_rotated,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n" + report = "user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_1_last_used_date,access_key_1_last_used_region,access_key_1_last_used_service,access_key_2_active,access_key_2_last_rotated,access_key_2_last_used_date,access_key_2_last_used_region,access_key_2_last_used_service,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n" for user in self.users: report += self.users[user].to_csv() return base64.b64encode(report.encode("ascii")).decode("ascii") @@ -1945,5 +2517,62 @@ class IAMBackend(BaseBackend): def get_account_summary(self): return self.account_summary + def create_inline_policy( + self, + resource_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ): + if resource_name in self.inline_policies: + raise IAMConflictException( + "EntityAlreadyExists", + "Inline Policy {0} already exists".format(resource_name), + ) + + inline_policy = InlinePolicy( + resource_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ) + self.inline_policies[resource_name] = inline_policy + inline_policy.apply_policy(self) + return inline_policy + + def get_inline_policy(self, policy_id): + inline_policy = None + try: + inline_policy = self.inline_policies[policy_id] + except KeyError: + raise IAMNotFoundException("Inline policy {0} not found".format(policy_id)) + return inline_policy + + def update_inline_policy( + self, + resource_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ): + inline_policy = self.get_inline_policy(resource_name) + inline_policy.unapply_policy(self) + inline_policy.update( + policy_name, policy_document, group_names, role_names, user_names, + ) + inline_policy.apply_policy(self) + return inline_policy + + def delete_inline_policy(self, policy_id): + inline_policy = self.get_inline_policy(policy_id) + inline_policy.unapply_policy(self) + del self.inline_policies[policy_id] + iam_backend = IAMBackend() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 12501769e..d6f8ae020 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -133,7 +133,7 @@ class IamResponse(BaseResponse): entity_users.append(user.name) elif entity == "Role": - roles = iam_backend.list_roles(path_prefix, marker, max_items) + roles, _ = iam_backend.list_roles(path_prefix, marker, max_items) if roles: for role in roles: for p in role.managed_policies: @@ -156,7 +156,7 @@ class IamResponse(BaseResponse): if p == policy_arn: entity_users.append(user.name) - roles = iam_backend.list_roles(path_prefix, marker, max_items) + roles, _ = iam_backend.list_roles(path_prefix, marker, max_items) if roles: for role in roles: for p in role.managed_policies: @@ -175,6 +175,13 @@ class IamResponse(BaseResponse): roles=entity_roles, users=entity_users, groups=entity_groups ) + def set_default_policy_version(self): + policy_arn = self._get_param("PolicyArn") + version_id = self._get_param("VersionId") + iam_backend.set_default_policy_version(policy_arn, version_id) + template = self.response_template(SET_DEFAULT_POLICY_VERSION_TEMPLATE) + return template.render() + def create_role(self): role_name = self._get_param("RoleName") path = self._get_param("Path") @@ -265,6 +272,19 @@ class IamResponse(BaseResponse): template = self.response_template(UPDATE_ROLE_TEMPLATE) return template.render(role=role) + def put_role_permissions_boundary(self): + permissions_boundary = self._get_param("PermissionsBoundary") + role_name = self._get_param("RoleName") + iam_backend.put_role_permissions_boundary(role_name, permissions_boundary) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name="PutRolePermissionsBoundary") + + def delete_role_permissions_boundary(self): + role_name = self._get_param("RoleName") + iam_backend.delete_role_permissions_boundary(role_name) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name="DeleteRolePermissionsBoundary") + def create_policy_version(self): policy_arn = self._get_param("PolicyArn") policy_document = self._get_param("PolicyDocument") @@ -305,6 +325,13 @@ class IamResponse(BaseResponse): template = self.response_template(CREATE_INSTANCE_PROFILE_TEMPLATE) return template.render(profile=profile) + def delete_instance_profile(self): + profile_name = self._get_param("InstanceProfileName") + + profile = iam_backend.delete_instance_profile(profile_name) + template = self.response_template(DELETE_INSTANCE_PROFILE_TEMPLATE) + return template.render(profile=profile) + def get_instance_profile(self): profile_name = self._get_param("InstanceProfileName") profile = iam_backend.get_instance_profile(profile_name) @@ -329,10 +356,13 @@ class IamResponse(BaseResponse): return template.render() def list_roles(self): - roles = iam_backend.get_roles() + path_prefix = self._get_param("PathPrefix", "/") + marker = self._get_param("Marker", "0") + max_items = self._get_param("MaxItems", 100) + roles, marker = iam_backend.list_roles(path_prefix, marker, max_items) template = self.response_template(LIST_ROLES_TEMPLATE) - return template.render(roles=roles) + return template.render(roles=roles, marker=marker) def list_instance_profiles(self): profiles = iam_backend.get_instance_profiles() @@ -991,6 +1021,13 @@ LIST_ENTITIES_FOR_POLICY_TEMPLATE = """ """ +SET_DEFAULT_POLICY_VERSION_TEMPLATE = """ + + 35f241af-3ebc-11e4-9d0d-6f969EXAMPLE + +""" + + ATTACH_ROLE_POLICY_TEMPLATE = """ 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE @@ -1180,6 +1217,12 @@ CREATE_INSTANCE_PROFILE_TEMPLATE = """ + + 786dff92-6cfd-4fa4-b1eb-27EXAMPLE804 + +""" + GET_INSTANCE_PROFILE_TEMPLATE = """ @@ -1303,6 +1346,12 @@ GET_ROLE_TEMPLATE = """ - false + {{ 'true' if marker else 'false' }} + {% if marker %} + {{ marker }} + {% endif %} {% for role in roles %} @@ -1632,6 +1684,16 @@ USER_TEMPLATE = """<{{ action }}UserResponse> {{ user.id }} {{ user.created_iso_8601 }} {{ user.arn }} + {% if user.tags %} + + {% for tag in user.tags %} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {% endfor %} + + {% endif %} @@ -1779,7 +1841,11 @@ GET_ACCESS_KEY_LAST_USED_TEMPLATE = """ {{ user_name }} - {{ last_used }} + {% if last_used %} + {{ last_used }} + {% endif %} + N/A + N/A @@ -2066,6 +2132,16 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ user.name }} {{ user.arn }} {{ user.created_iso_8601 }} + {% if user.policies %} + + {% for policy in user.policies %} + + {{ policy }} + {{ user.policies[policy] }} + + {% endfor %} + + {% endif %} {% endfor %} @@ -2089,7 +2165,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """ {{ policy }} - {{ group.get_policy(policy) }} + {{ group.policies[policy] }} {% endfor %} @@ -2396,9 +2472,7 @@ GET_ACCOUNT_PASSWORD_POLICY_TEMPLATE = """ {{ password_policy.allow_users_to_change_password | lower }} {{ password_policy.expire_passwords | lower }} - {% if password_policy.hard_expiry %} {{ password_policy.hard_expiry | lower }} - {% endif %} {% if password_policy.max_password_age %} {{ password_policy.max_password_age }} {% endif %} diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 14d577389..e3acf9690 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -7,10 +7,10 @@ class IoTClientError(JsonRESTError): class ResourceNotFoundException(IoTClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 404 super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", "The specified resource does not exist" + "ResourceNotFoundException", msg or "The specified resource does not exist" ) @@ -22,6 +22,15 @@ class InvalidRequestException(IoTClientError): ) +class InvalidStateTransitionException(IoTClientError): + def __init__(self, msg=None): + self.code = 409 + super(InvalidStateTransitionException, self).__init__( + "InvalidStateTransitionException", + msg or "An attempt was made to change to an invalid state.", + ) + + class VersionConflictException(IoTClientError): def __init__(self, name): self.code = 409 @@ -43,3 +52,11 @@ class DeleteConflictException(IoTClientError): def __init__(self, msg): self.code = 409 super(DeleteConflictException, self).__init__("DeleteConflictException", msg) + + +class ResourceAlreadyExistsException(IoTClientError): + def __init__(self, msg): + self.code = 409 + super(ResourceAlreadyExistsException, self).__init__( + "ResourceAlreadyExistsException", msg or "The resource already exists." + ) diff --git a/moto/iot/models.py b/moto/iot/models.py index d59d7533c..4a7d43239 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -17,8 +17,11 @@ from .exceptions import ( DeleteConflictException, ResourceNotFoundException, InvalidRequestException, + InvalidStateTransitionException, VersionConflictException, + ResourceAlreadyExistsException, ) +from moto.utilities.utils import random_string class FakeThing(BaseModel): @@ -29,7 +32,7 @@ class FakeThing(BaseModel): self.attributes = attributes self.arn = "arn:aws:iot:%s:1:thing/%s" % (self.region_name, thing_name) self.version = 1 - # TODO: we need to handle 'version'? + # TODO: we need to handle "version"? # for iot-data self.thing_shadow = None @@ -128,7 +131,7 @@ class FakeThingGroup(BaseModel): class FakeCertificate(BaseModel): def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None): m = hashlib.sha256() - m.update(str(uuid.uuid4()).encode("utf-8")) + m.update(certificate_pem.encode("utf-8")) self.certificate_id = m.hexdigest() self.arn = "arn:aws:iot:%s:1:cert/%s" % (region_name, self.certificate_id) self.certificate_pem = certificate_pem @@ -143,7 +146,7 @@ class FakeCertificate(BaseModel): self.ca_certificate_id = None self.ca_certificate_pem = ca_certificate_pem if ca_certificate_pem: - m.update(str(uuid.uuid4()).encode("utf-8")) + m.update(ca_certificate_pem.encode("utf-8")) self.ca_certificate_id = m.hexdigest() def to_dict(self): @@ -174,18 +177,19 @@ class FakeCertificate(BaseModel): class FakePolicy(BaseModel): - def __init__(self, name, document, region_name): + def __init__(self, name, document, region_name, default_version_id="1"): self.name = name self.document = document self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, name) - self.version = "1" # TODO: handle version + self.default_version_id = default_version_id + self.versions = [FakePolicyVersion(self.name, document, True, region_name)] def to_get_dict(self): return { "policyName": self.name, "policyArn": self.arn, "policyDocument": self.document, - "defaultVersionId": self.version, + "defaultVersionId": self.default_version_id, } def to_dict_at_creation(self): @@ -193,13 +197,52 @@ class FakePolicy(BaseModel): "policyName": self.name, "policyArn": self.arn, "policyDocument": self.document, - "policyVersionId": self.version, + "policyVersionId": self.default_version_id, } def to_dict(self): return {"policyName": self.name, "policyArn": self.arn} +class FakePolicyVersion(object): + def __init__(self, policy_name, document, is_default, region_name): + self.name = policy_name + self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, policy_name) + self.document = document or {} + self.is_default = is_default + self.version_id = "1" + + self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) + + def to_get_dict(self): + return { + "policyName": self.name, + "policyArn": self.arn, + "policyDocument": self.document, + "policyVersionId": self.version_id, + "isDefaultVersion": self.is_default, + "creationDate": self.create_datetime, + "lastModifiedDate": self.last_modified_datetime, + "generationId": self.version_id, + } + + def to_dict_at_creation(self): + return { + "policyArn": self.arn, + "policyDocument": self.document, + "policyVersionId": self.version_id, + "isDefaultVersion": self.is_default, + } + + def to_dict(self): + return { + "versionId": self.version_id, + "isDefaultVersion": self.is_default, + "createDate": self.create_datetime, + } + + class FakeJob(BaseModel): JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) @@ -226,12 +269,14 @@ class FakeJob(BaseModel): self.targets = targets self.document_source = document_source self.document = document + self.force = False self.description = description self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config - self.status = None # IN_PROGRESS | CANCELED | COMPLETED + self.status = "QUEUED" # IN_PROGRESS | CANCELED | COMPLETED self.comment = None + self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.completed_at = None @@ -258,9 +303,11 @@ class FakeJob(BaseModel): "jobExecutionsRolloutConfig": self.job_executions_rollout_config, "status": self.status, "comment": self.comment, + "forceCanceled": self.force, + "reasonCode": self.reason_code, "createdAt": self.created_at, "lastUpdatedAt": self.last_updated_at, - "completedAt": self.completedAt, + "completedAt": self.completed_at, "jobProcessDetails": self.job_process_details, "documentParameters": self.document_parameters, "document": self.document, @@ -275,18 +322,123 @@ class FakeJob(BaseModel): return regex_match and length_match +class FakeJobExecution(BaseModel): + def __init__( + self, + job_id, + thing_arn, + status="QUEUED", + force_canceled=False, + status_details_map={}, + ): + self.job_id = job_id + self.status = status # IN_PROGRESS | CANCELED | COMPLETED + self.force_canceled = force_canceled + self.status_details_map = status_details_map + self.thing_arn = thing_arn + self.queued_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.started_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.execution_number = 123 + self.version_number = 123 + self.approximate_seconds_before_time_out = 123 + + def to_get_dict(self): + obj = { + "jobId": self.job_id, + "status": self.status, + "forceCanceled": self.force_canceled, + "statusDetails": {"detailsMap": self.status_details_map}, + "thingArn": self.thing_arn, + "queuedAt": self.queued_at, + "startedAt": self.started_at, + "lastUpdatedAt": self.last_updated_at, + "executionNumber": self.execution_number, + "versionNumber": self.version_number, + "approximateSecondsBeforeTimedOut": self.approximate_seconds_before_time_out, + } + + return obj + + def to_dict(self): + obj = { + "jobId": self.job_id, + "thingArn": self.thing_arn, + "jobExecutionSummary": { + "status": self.status, + "queuedAt": self.queued_at, + "startedAt": self.started_at, + "lastUpdatedAt": self.last_updated_at, + "executionNumber": self.execution_number, + }, + } + + return obj + + +class FakeEndpoint(BaseModel): + def __init__(self, endpoint_type, region_name): + if endpoint_type not in [ + "iot:Data", + "iot:Data-ATS", + "iot:CredentialProvider", + "iot:Jobs", + ]: + raise InvalidRequestException( + " An error occurred (InvalidRequestException) when calling the DescribeEndpoint " + "operation: Endpoint type %s not recognized." % endpoint_type + ) + self.region_name = region_name + data_identifier = random_string(14) + if endpoint_type == "iot:Data": + self.endpoint = "{i}.iot.{r}.amazonaws.com".format( + i=data_identifier, r=self.region_name + ) + elif "iot:Data-ATS" in endpoint_type: + self.endpoint = "{i}-ats.iot.{r}.amazonaws.com".format( + i=data_identifier, r=self.region_name + ) + elif "iot:CredentialProvider" in endpoint_type: + identifier = random_string(14) + self.endpoint = "{i}.credentials.iot.{r}.amazonaws.com".format( + i=identifier, r=self.region_name + ) + elif "iot:Jobs" in endpoint_type: + identifier = random_string(14) + self.endpoint = "{i}.jobs.iot.{r}.amazonaws.com".format( + i=identifier, r=self.region_name + ) + self.endpoint_type = endpoint_type + + def to_get_dict(self): + obj = { + "endpointAddress": self.endpoint, + } + + return obj + + def to_dict(self): + obj = { + "endpointAddress": self.endpoint, + } + + return obj + + class IoTBackend(BaseBackend): def __init__(self, region_name=None): super(IoTBackend, self).__init__() self.region_name = region_name self.things = OrderedDict() self.jobs = OrderedDict() + self.job_executions = OrderedDict() self.thing_types = OrderedDict() self.thing_groups = OrderedDict() self.certificates = OrderedDict() self.policies = OrderedDict() self.principal_policies = OrderedDict() self.principal_things = OrderedDict() + self.endpoint = None def reset(self): region_name = self.region_name @@ -395,6 +547,10 @@ class IoTBackend(BaseBackend): raise ResourceNotFoundException() return thing_types[0] + def describe_endpoint(self, endpoint_type): + self.endpoint = FakeEndpoint(endpoint_type, self.region_name) + return self.endpoint + def delete_thing(self, thing_name, expected_version): # TODO: handle expected_version @@ -513,6 +669,12 @@ class IoTBackend(BaseBackend): def list_certificates(self): return self.certificates.values() + def __raise_if_certificate_already_exists(self, certificate_id): + if certificate_id in self.certificates: + raise ResourceAlreadyExistsException( + "The certificate is already provisioned or registered" + ) + def register_certificate( self, certificate_pem, ca_certificate_pem, set_as_active, status ): @@ -522,6 +684,15 @@ class IoTBackend(BaseBackend): self.region_name, ca_certificate_pem, ) + self.__raise_if_certificate_already_exists(certificate.certificate_id) + + self.certificates[certificate.certificate_id] = certificate + return certificate + + def register_certificate_without_ca(self, certificate_pem, status): + certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.__raise_if_certificate_already_exists(certificate.certificate_id) + self.certificates[certificate.certificate_id] = certificate return certificate @@ -535,6 +706,28 @@ class IoTBackend(BaseBackend): self.policies[policy.name] = policy return policy + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_attached_policies(self, target): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] + return policies + def list_policies(self): policies = self.policies.values() return policies @@ -559,6 +752,60 @@ class IoTBackend(BaseBackend): policy = self.get_policy(policy_name) del self.policies[policy.name] + def create_policy_version(self, policy_name, policy_document, set_as_default): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + version = FakePolicyVersion( + policy_name, policy_document, set_as_default, self.region_name + ) + policy.versions.append(version) + version.version_id = "{0}".format(len(policy.versions)) + if set_as_default: + self.set_default_policy_version(policy_name, version.version_id) + return version + + def set_default_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + version.is_default = True + policy.default_version_id = version.version_id + policy.document = version.document + else: + version.is_default = False + + def get_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + return version + raise ResourceNotFoundException() + + def list_policy_versions(self, policy_name): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + return policy.versions + + def delete_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + if version_id == policy.default_version_id: + raise InvalidRequestException( + "Cannot delete the default version of a policy" + ) + for i, v in enumerate(policy.versions): + if v.version_id == version_id: + del policy.versions[i] + return + raise ResourceNotFoundException() + def _get_principal(self, principal_arn): """ raise ResourceNotFoundException @@ -574,14 +821,6 @@ class IoTBackend(BaseBackend): pass raise ResourceNotFoundException() - def attach_policy(self, policy_name, target): - principal = self._get_principal(target) - policy = self.get_policy(policy_name) - k = (target, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - def attach_principal_policy(self, policy_name, principal_arn): principal = self._get_principal(principal_arn) policy = self.get_policy(policy_name) @@ -590,15 +829,6 @@ class IoTBackend(BaseBackend): return self.principal_policies[k] = (principal, policy) - def detach_policy(self, policy_name, target): - # this may raises ResourceNotFoundException - self._get_principal(target) - self.get_policy(policy_name) - k = (target, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - def detach_principal_policy(self, policy_name, principal_arn): # this may raises ResourceNotFoundException self._get_principal(principal_arn) @@ -646,6 +876,14 @@ class IoTBackend(BaseBackend): return thing_names def list_thing_principals(self, thing_name): + + things = [_ for _ in self.things.values() if _.thing_name == thing_name] + if len(things) == 0: + raise ResourceNotFoundException( + "Failed to list principals for thing %s because the thing does not exist in your account" + % thing_name + ) + principals = [ k[0] for k, v in self.principal_things.items() if k[1] == thing_name ] @@ -675,12 +913,49 @@ class IoTBackend(BaseBackend): return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id def delete_thing_group(self, thing_group_name, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - del self.thing_groups[thing_group.arn] + child_groups = [ + thing_group + for _, thing_group in self.thing_groups.items() + if thing_group.parent_group_name == thing_group_name + ] + if len(child_groups) > 0: + raise InvalidRequestException( + " Cannot delete thing group : " + + thing_group_name + + " when there are still child groups attached to it" + ) + try: + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + except ResourceNotFoundException: + # AWS returns success even if the thing group does not exist. + pass def list_thing_groups(self, parent_group, name_prefix_filter, recursive): - thing_groups = self.thing_groups.values() - return thing_groups + if recursive is None: + recursive = True + if name_prefix_filter is None: + name_prefix_filter = "" + if parent_group and parent_group not in [ + _.thing_group_name for _ in self.thing_groups.values() + ]: + raise ResourceNotFoundException() + thing_groups = [ + _ for _ in self.thing_groups.values() if _.parent_group_name == parent_group + ] + if recursive: + for g in thing_groups: + thing_groups.extend( + self.list_thing_groups( + parent_group=g.thing_group_name, + name_prefix_filter=None, + recursive=False, + ) + ) + # thing_groups = groups_to_process.values() + return [ + _ for _ in thing_groups if _.thing_group_name.startswith(name_prefix_filter) + ] def update_thing_group( self, thing_group_name, thing_group_properties, expected_version @@ -819,11 +1094,187 @@ class IoTBackend(BaseBackend): self.region_name, ) self.jobs[job_id] = job + + for thing_arn in targets: + thing_name = thing_arn.split(":")[-1].split("/")[-1] + job_execution = FakeJobExecution(job_id, thing_arn) + self.job_executions[(job_id, thing_name)] = job_execution return job.job_arn, job_id, description def describe_job(self, job_id): + jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] + if len(jobs) == 0: + raise ResourceNotFoundException() + return jobs[0] + + def delete_job(self, job_id, force): + job = self.jobs[job_id] + + if job.status == "IN_PROGRESS" and force: + del self.jobs[job_id] + elif job.status != "IN_PROGRESS": + del self.jobs[job_id] + else: + raise InvalidStateTransitionException() + + def cancel_job(self, job_id, reason_code, comment, force): + job = self.jobs[job_id] + + job.reason_code = reason_code if reason_code is not None else job.reason_code + job.comment = comment if comment is not None else job.comment + job.force = force if force is not None and force != job.force else job.force + job.status = "CANCELED" + + if job.status == "IN_PROGRESS" and force: + self.jobs[job_id] = job + elif job.status != "IN_PROGRESS": + self.jobs[job_id] = job + else: + raise InvalidStateTransitionException() + + return job + + def get_job_document(self, job_id): return self.jobs[job_id] + def list_jobs( + self, + status, + target_selection, + max_results, + token, + thing_group_name, + thing_group_id, + ): + # TODO: implement filters + all_jobs = [_.to_dict() for _ in self.jobs.values()] + filtered_jobs = all_jobs + + if token is None: + jobs = filtered_jobs[0:max_results] + next_token = str(max_results) if len(filtered_jobs) > max_results else None + else: + token = int(token) + jobs = filtered_jobs[token : token + max_results] + next_token = ( + str(token + max_results) + if len(filtered_jobs) > token + max_results + else None + ) + + return jobs, next_token + + def describe_job_execution(self, job_id, thing_name, execution_number): + try: + job_execution = self.job_executions[(job_id, thing_name)] + except KeyError: + raise ResourceNotFoundException() + + if job_execution is None or ( + execution_number is not None + and job_execution.execution_number != execution_number + ): + raise ResourceNotFoundException() + + return job_execution + + def cancel_job_execution( + self, job_id, thing_name, force, expected_version, status_details + ): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution is None: + raise ResourceNotFoundException() + + job_execution.force_canceled = ( + force if force is not None else job_execution.force_canceled + ) + # TODO: implement expected_version and status_details (at most 10 can be specified) + + if job_execution.status == "IN_PROGRESS" and force: + job_execution.status = "CANCELED" + self.job_executions[(job_id, thing_name)] = job_execution + elif job_execution.status != "IN_PROGRESS": + job_execution.status = "CANCELED" + self.job_executions[(job_id, thing_name)] = job_execution + else: + raise InvalidStateTransitionException() + + def delete_job_execution(self, job_id, thing_name, execution_number, force): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution.execution_number != execution_number: + raise ResourceNotFoundException() + + if job_execution.status == "IN_PROGRESS" and force: + del self.job_executions[(job_id, thing_name)] + elif job_execution.status != "IN_PROGRESS": + del self.job_executions[(job_id, thing_name)] + else: + raise InvalidStateTransitionException() + + def list_job_executions_for_job(self, job_id, status, max_results, next_token): + job_executions = [ + self.job_executions[je].to_dict() + for je in self.job_executions + if je[0] == job_id + ] + + if status is not None: + job_executions = list( + filter( + lambda elem: status in elem["status"] and elem["status"] == status, + job_executions, + ) + ) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token : token + max_results] + next_token = ( + str(token + max_results) + if len(job_executions) > token + max_results + else None + ) + + return job_executions, next_token + + def list_job_executions_for_thing( + self, thing_name, status, max_results, next_token + ): + job_executions = [ + self.job_executions[je].to_dict() + for je in self.job_executions + if je[1] == thing_name + ] + + if status is not None: + job_executions = list( + filter( + lambda elem: status in elem["status"] and elem["status"] == status, + job_executions, + ) + ) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token : token + max_results] + next_token = ( + str(token + max_results) + if len(job_executions) > token + max_results + else None + ) + + return job_executions, next_token + iot_backends = {} for region in Session().get_available_regions("iot"): diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 5981eaa37..15c62d91e 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +from six.moves.urllib.parse import unquote from moto.core.responses import BaseResponse from .models import iot_backends @@ -87,6 +88,11 @@ class IoTResponse(BaseResponse): ) return json.dumps(thing_type.to_dict()) + def describe_endpoint(self): + endpoint_type = self._get_param("endpointType") + endpoint = self.iot_backend.describe_endpoint(endpoint_type=endpoint_type) + return json.dumps(endpoint.to_dict()) + def delete_thing(self): thing_name = self._get_param("thingName") expected_version = self._get_param("expectedVersion") @@ -141,6 +147,8 @@ class IoTResponse(BaseResponse): createdAt=job.created_at, description=job.description, documentParameters=job.document_parameters, + forceCanceled=job.force, + reasonCode=job.reason_code, jobArn=job.job_arn, jobExecutionsRolloutConfig=job.job_executions_rollout_config, jobId=job.job_id, @@ -154,6 +162,127 @@ class IoTResponse(BaseResponse): ) ) + def delete_job(self): + job_id = self._get_param("jobId") + force = self._get_bool_param("force") + + self.iot_backend.delete_job(job_id=job_id, force=force) + + return json.dumps(dict()) + + def cancel_job(self): + job_id = self._get_param("jobId") + reason_code = self._get_param("reasonCode") + comment = self._get_param("comment") + force = self._get_bool_param("force") + + job = self.iot_backend.cancel_job( + job_id=job_id, reason_code=reason_code, comment=comment, force=force + ) + + return json.dumps(job.to_dict()) + + def get_job_document(self): + job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) + + if job.document is not None: + return json.dumps({"document": job.document}) + else: + # job.document_source is not None: + # TODO: needs to be implemented to get document_source's content from S3 + return json.dumps({"document": ""}) + + def list_jobs(self): + status = (self._get_param("status"),) + target_selection = (self._get_param("targetSelection"),) + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier + previous_next_token = self._get_param("nextToken") + thing_group_name = (self._get_param("thingGroupName"),) + thing_group_id = self._get_param("thingGroupId") + jobs, next_token = self.iot_backend.list_jobs( + status=status, + target_selection=target_selection, + max_results=max_results, + token=previous_next_token, + thing_group_name=thing_group_name, + thing_group_id=thing_group_id, + ) + + return json.dumps(dict(jobs=jobs, nextToken=next_token)) + + def describe_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + job_execution = self.iot_backend.describe_job_execution( + job_id=job_id, thing_name=thing_name, execution_number=execution_number + ) + + return json.dumps(dict(execution=job_execution.to_get_dict())) + + def cancel_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + force = self._get_bool_param("force") + expected_version = self._get_int_param("expectedVersion") + status_details = self._get_param("statusDetails") + + self.iot_backend.cancel_job_execution( + job_id=job_id, + thing_name=thing_name, + force=force, + expected_version=expected_version, + status_details=status_details, + ) + + return json.dumps(dict()) + + def delete_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + force = self._get_bool_param("force") + + self.iot_backend.delete_job_execution( + job_id=job_id, + thing_name=thing_name, + execution_number=execution_number, + force=force, + ) + + return json.dumps(dict()) + + def list_job_executions_for_job(self): + job_id = self._get_param("jobId") + status = self._get_param("status") + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_job( + job_id=job_id, status=status, max_results=max_results, next_token=next_token + ) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) + + def list_job_executions_for_thing(self): + thing_name = self._get_param("thingName") + status = self._get_param("status") + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_thing( + thing_name=thing_name, + status=status, + max_results=max_results, + next_token=next_token, + ) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) + def create_keys_and_certificate(self): set_as_active = self._get_bool_param("setAsActive") cert, key_pair = self.iot_backend.create_keys_and_certificate( @@ -206,6 +335,17 @@ class IoTResponse(BaseResponse): dict(certificateId=cert.certificate_id, certificateArn=cert.arn) ) + def register_certificate_without_ca(self): + certificate_pem = self._get_param("certificatePem") + status = self._get_param("status") + + cert = self.iot_backend.register_certificate_without_ca( + certificate_pem=certificate_pem, status=status, + ) + return json.dumps( + dict(certificateId=cert.certificate_id, certificateArn=cert.arn) + ) + def update_certificate(self): certificate_id = self._get_param("certificateId") new_status = self._get_param("newStatus") @@ -241,12 +381,61 @@ class IoTResponse(BaseResponse): self.iot_backend.delete_policy(policy_name=policy_name) return json.dumps(dict()) + def create_policy_version(self): + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + set_as_default = self._get_bool_param("setAsDefault") + policy_version = self.iot_backend.create_policy_version( + policy_name, policy_document, set_as_default + ) + + return json.dumps(dict(policy_version.to_dict_at_creation())) + + def set_default_policy_version(self): + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") + self.iot_backend.set_default_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def get_policy_version(self): + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") + policy_version = self.iot_backend.get_policy_version(policy_name, version_id) + return json.dumps(dict(policy_version.to_get_dict())) + + def list_policy_versions(self): + policy_name = self._get_param("policyName") + policiy_versions = self.iot_backend.list_policy_versions( + policy_name=policy_name + ) + + return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) + + def delete_policy_version(self): + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") + self.iot_backend.delete_policy_version(policy_name, version_id) + + return json.dumps(dict()) + def attach_policy(self): policy_name = self._get_param("policyName") target = self._get_param("target") self.iot_backend.attach_policy(policy_name=policy_name, target=target) return json.dumps(dict()) + def list_attached_policies(self): + principal = unquote(self._get_param("target")) + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + policies = self.iot_backend.list_attached_policies(target=principal) + # TODO: implement pagination in the future + next_marker = None + return json.dumps( + dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker) + ) + def attach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get("x-amzn-iot-principal") @@ -362,7 +551,7 @@ class IoTResponse(BaseResponse): # max_results = self._get_int_param("maxResults") parent_group = self._get_param("parentGroup") name_prefix_filter = self._get_param("namePrefixFilter") - recursive = self._get_param("recursive") + recursive = self._get_bool_param("recursive") thing_groups = self.iot_backend.list_thing_groups( parent_group=parent_group, name_prefix_filter=name_prefix_filter, diff --git a/moto/iotdata/models.py b/moto/iotdata/models.py index 41b69bc7f..f695fb3fc 100644 --- a/moto/iotdata/models.py +++ b/moto/iotdata/models.py @@ -114,8 +114,7 @@ class FakeShadow(BaseModel): } def to_dict(self, include_delta=True): - """returning nothing except for just top-level keys for now. - """ + """returning nothing except for just top-level keys for now.""" if self.deleted: return {"timestamp": self.timestamp, "version": self.version} delta = self.parse_payload(self.desired, self.reported) diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index ec9655bfa..280402d5f 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -12,7 +12,7 @@ from hashlib import md5 from boto3 import Session from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import unix_time from moto.core import ACCOUNT_ID from .exceptions import ( @@ -53,6 +53,7 @@ class Shard(BaseModel): self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() + self.is_open = True @property def shard_id(self): @@ -116,29 +117,42 @@ class Shard(BaseModel): return r.sequence_number def to_json(self): - return { + response = { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash), }, "SequenceNumberRange": { - "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id, } + if not self.is_open: + response["SequenceNumberRange"][ + "EndingSequenceNumber" + ] = self.get_max_sequence_number() + return response -class Stream(BaseModel): - def __init__(self, stream_name, shard_count, region): +class Stream(CloudFormationModel): + def __init__(self, stream_name, shard_count, retention_period_hours, region_name): self.stream_name = stream_name - self.shard_count = shard_count self.creation_datetime = datetime.datetime.now() - self.region = region + self.region = region_name self.account_number = ACCOUNT_ID self.shards = {} self.tags = {} self.status = "ACTIVE" + self.shard_count = None + self.update_shard_count(shard_count) + self.retention_period_hours = retention_period_hours + + def update_shard_count(self, shard_count): + # ToDo: This was extracted from init. It's only accurate for new streams. + # It doesn't (yet) try to accurately mimic the more complex re-sharding behavior. + # It makes the stream as if it had been created with this number of shards. + # Logically consistent, but not what AWS does. + self.shard_count = shard_count step = 2 ** 128 // shard_count hash_ranges = itertools.chain( @@ -146,7 +160,6 @@ class Stream(BaseModel): [(shard_count - 1, (shard_count - 1) * step, 2 ** 128)], ) for index, start, end in hash_ranges: - shard = Shard(index, start, end) self.shards[shard.shard_id] = shard @@ -201,6 +214,7 @@ class Stream(BaseModel): "StreamName": self.stream_name, "StreamStatus": self.status, "HasMoreShards": False, + "RetentionPeriodHours": self.retention_period_hours, "Shards": [shard.to_json() for shard in self.shards.values()], } } @@ -216,14 +230,95 @@ class Stream(BaseModel): } } + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html + return "AWS::Kinesis::Stream" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - region = properties.get("Region", "us-east-1") + properties = cloudformation_json.get("Properties", {}) shard_count = properties.get("ShardCount", 1) - return Stream(properties["Name"], shard_count, region) + retention_period_hours = properties.get("RetentionPeriodHours", resource_name) + tags = { + tag_item["Key"]: tag_item["Value"] + for tag_item in properties.get("Tags", []) + } + + backend = kinesis_backends[region_name] + stream = backend.create_stream( + resource_name, shard_count, retention_period_hours, region_name + ) + if any(tags): + backend.add_tags_to_stream(stream.stream_name, tags) + return stream + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if Stream.is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + if "ShardCount" in properties: + original_resource.update_shard_count(properties["ShardCount"]) + if "RetentionPeriodHours" in properties: + original_resource.retention_period_hours = properties[ + "RetentionPeriodHours" + ] + if "Tags" in properties: + original_resource.tags = { + tag_item["Key"]: tag_item["Value"] + for tag_item in properties.get("Tags", []) + } + return original_resource + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + backend = kinesis_backends[region_name] + backend.delete_stream(resource_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.arn + raise UnformattedGetAttTemplateException() + + @property + def physical_resource_id(self): + return self.stream_name class FirehoseRecord(BaseModel): @@ -322,10 +417,12 @@ class KinesisBackend(BaseBackend): self.streams = OrderedDict() self.delivery_streams = {} - def create_stream(self, stream_name, shard_count, region): + def create_stream( + self, stream_name, shard_count, retention_period_hours, region_name + ): if stream_name in self.streams: raise ResourceInUseError(stream_name) - stream = Stream(stream_name, shard_count, region) + stream = Stream(stream_name, shard_count, retention_period_hours, region_name) self.streams[stream_name] = stream return stream diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 500f7855d..8e7fc3941 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -25,7 +25,10 @@ class KinesisResponse(BaseResponse): def create_stream(self): stream_name = self.parameters.get("StreamName") shard_count = self.parameters.get("ShardCount") - self.kinesis_backend.create_stream(stream_name, shard_count, self.region) + retention_period_hours = self.parameters.get("RetentionPeriodHours") + self.kinesis_backend.create_stream( + stream_name, shard_count, retention_period_hours, self.region + ) return "" def describe_stream(self): diff --git a/moto/kinesis/urls.py b/moto/kinesis/urls.py index c95f03190..a33225d60 100644 --- a/moto/kinesis/urls.py +++ b/moto/kinesis/urls.py @@ -2,7 +2,8 @@ from __future__ import unicode_literals from .responses import KinesisResponse url_bases = [ - "https?://kinesis.(.+).amazonaws.com", + # Need to avoid conflicting with kinesisvideo + r"https?://kinesis\.(.+).amazonaws.com", "https?://firehose.(.+).amazonaws.com", ] diff --git a/moto/kinesisvideo/__init__.py b/moto/kinesisvideo/__init__.py new file mode 100644 index 000000000..ee79d957b --- /dev/null +++ b/moto/kinesisvideo/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import kinesisvideo_backends +from ..core.models import base_decorator + +kinesisvideo_backend = kinesisvideo_backends["us-east-1"] +mock_kinesisvideo = base_decorator(kinesisvideo_backends) diff --git a/moto/kinesisvideo/exceptions.py b/moto/kinesisvideo/exceptions.py new file mode 100644 index 000000000..e2e119b37 --- /dev/null +++ b/moto/kinesisvideo/exceptions.py @@ -0,0 +1,24 @@ +from __future__ import unicode_literals + +from moto.core.exceptions import RESTError + + +class KinesisvideoClientError(RESTError): + code = 400 + + +class ResourceNotFoundException(KinesisvideoClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The requested stream is not found or not active.", + ) + + +class ResourceInUseException(KinesisvideoClientError): + def __init__(self, message): + self.code = 400 + super(ResourceInUseException, self).__init__( + "ResourceInUseException", message, + ) diff --git a/moto/kinesisvideo/models.py b/moto/kinesisvideo/models.py new file mode 100644 index 000000000..90d84ac02 --- /dev/null +++ b/moto/kinesisvideo/models.py @@ -0,0 +1,147 @@ +from __future__ import unicode_literals +from boto3 import Session +from moto.core import BaseBackend, BaseModel +from datetime import datetime +from .exceptions import ( + ResourceNotFoundException, + ResourceInUseException, +) +import random +import string +from moto.core.utils import get_random_hex +from moto.core import ACCOUNT_ID + + +class Stream(BaseModel): + def __init__( + self, + region_name, + device_name, + stream_name, + media_type, + kms_key_id, + data_retention_in_hours, + tags, + ): + self.region_name = region_name + self.stream_name = stream_name + self.device_name = device_name + self.media_type = media_type + self.kms_key_id = kms_key_id + self.data_retention_in_hours = data_retention_in_hours + self.tags = tags + self.status = "ACTIVE" + self.version = self._get_random_string() + self.creation_time = datetime.utcnow() + stream_arn = "arn:aws:kinesisvideo:{}:{}:stream/{}/1598784211076".format( + self.region_name, ACCOUNT_ID, self.stream_name + ) + self.data_endpoint_number = get_random_hex() + self.arn = stream_arn + + def _get_random_string(self, length=20): + letters = string.ascii_lowercase + result_str = "".join([random.choice(letters) for _ in range(length)]) + return result_str + + def get_data_endpoint(self, api_name): + data_endpoint_prefix = "s-" if api_name in ("PUT_MEDIA", "GET_MEDIA") else "b-" + return "https://{}{}.kinesisvideo.{}.amazonaws.com".format( + data_endpoint_prefix, self.data_endpoint_number, self.region_name + ) + + def to_dict(self): + return { + "DeviceName": self.device_name, + "StreamName": self.stream_name, + "StreamARN": self.arn, + "MediaType": self.media_type, + "KmsKeyId": self.kms_key_id, + "Version": self.version, + "Status": self.status, + "CreationTime": self.creation_time.isoformat(), + "DataRetentionInHours": self.data_retention_in_hours, + } + + +class KinesisVideoBackend(BaseBackend): + def __init__(self, region_name=None): + super(KinesisVideoBackend, self).__init__() + self.region_name = region_name + self.streams = {} + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_stream( + self, + device_name, + stream_name, + media_type, + kms_key_id, + data_retention_in_hours, + tags, + ): + streams = [_ for _ in self.streams.values() if _.stream_name == stream_name] + if len(streams) > 0: + raise ResourceInUseException( + "The stream {} already exists.".format(stream_name) + ) + stream = Stream( + self.region_name, + device_name, + stream_name, + media_type, + kms_key_id, + data_retention_in_hours, + tags, + ) + self.streams[stream.arn] = stream + return stream.arn + + def _get_stream(self, stream_name, stream_arn): + if stream_name: + streams = [_ for _ in self.streams.values() if _.stream_name == stream_name] + if len(streams) == 0: + raise ResourceNotFoundException() + stream = streams[0] + elif stream_arn: + stream = self.streams.get(stream_arn) + if stream is None: + raise ResourceNotFoundException() + return stream + + def describe_stream(self, stream_name, stream_arn): + stream = self._get_stream(stream_name, stream_arn) + stream_info = stream.to_dict() + return stream_info + + def list_streams(self, max_results, next_token, stream_name_condition): + stream_info_list = [_.to_dict() for _ in self.streams.values()] + next_token = None + return stream_info_list, next_token + + def delete_stream(self, stream_arn, current_version): + stream = self.streams.get(stream_arn) + if stream is None: + raise ResourceNotFoundException() + del self.streams[stream_arn] + + def get_data_endpoint(self, stream_name, stream_arn, api_name): + stream = self._get_stream(stream_name, stream_arn) + return stream.get_data_endpoint(api_name) + + # add methods from here + + +kinesisvideo_backends = {} +for region in Session().get_available_regions("kinesisvideo"): + kinesisvideo_backends[region] = KinesisVideoBackend(region) +for region in Session().get_available_regions( + "kinesisvideo", partition_name="aws-us-gov" +): + kinesisvideo_backends[region] = KinesisVideoBackend(region) +for region in Session().get_available_regions("kinesisvideo", partition_name="aws-cn"): + kinesisvideo_backends[region] = KinesisVideoBackend(region) diff --git a/moto/kinesisvideo/responses.py b/moto/kinesisvideo/responses.py new file mode 100644 index 000000000..d1e386f2e --- /dev/null +++ b/moto/kinesisvideo/responses.py @@ -0,0 +1,65 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import kinesisvideo_backends +import json + + +class KinesisVideoResponse(BaseResponse): + SERVICE_NAME = "kinesisvideo" + + @property + def kinesisvideo_backend(self): + return kinesisvideo_backends[self.region] + + def create_stream(self): + device_name = self._get_param("DeviceName") + stream_name = self._get_param("StreamName") + media_type = self._get_param("MediaType") + kms_key_id = self._get_param("KmsKeyId") + data_retention_in_hours = self._get_int_param("DataRetentionInHours") + tags = self._get_param("Tags") + stream_arn = self.kinesisvideo_backend.create_stream( + device_name=device_name, + stream_name=stream_name, + media_type=media_type, + kms_key_id=kms_key_id, + data_retention_in_hours=data_retention_in_hours, + tags=tags, + ) + return json.dumps(dict(StreamARN=stream_arn)) + + def describe_stream(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + stream_info = self.kinesisvideo_backend.describe_stream( + stream_name=stream_name, stream_arn=stream_arn, + ) + return json.dumps(dict(StreamInfo=stream_info)) + + def list_streams(self): + max_results = self._get_int_param("MaxResults") + next_token = self._get_param("NextToken") + stream_name_condition = self._get_param("StreamNameCondition") + stream_info_list, next_token = self.kinesisvideo_backend.list_streams( + max_results=max_results, + next_token=next_token, + stream_name_condition=stream_name_condition, + ) + return json.dumps(dict(StreamInfoList=stream_info_list, NextToken=next_token)) + + def delete_stream(self): + stream_arn = self._get_param("StreamARN") + current_version = self._get_param("CurrentVersion") + self.kinesisvideo_backend.delete_stream( + stream_arn=stream_arn, current_version=current_version, + ) + return json.dumps(dict()) + + def get_data_endpoint(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + api_name = self._get_param("APIName") + data_endpoint = self.kinesisvideo_backend.get_data_endpoint( + stream_name=stream_name, stream_arn=stream_arn, api_name=api_name, + ) + return json.dumps(dict(DataEndpoint=data_endpoint)) diff --git a/moto/kinesisvideo/urls.py b/moto/kinesisvideo/urls.py new file mode 100644 index 000000000..9aab7f8e2 --- /dev/null +++ b/moto/kinesisvideo/urls.py @@ -0,0 +1,18 @@ +from __future__ import unicode_literals +from .responses import KinesisVideoResponse + +url_bases = [ + "https?://kinesisvideo.(.+).amazonaws.com", +] + + +response = KinesisVideoResponse() + + +url_paths = { + "{0}/createStream$": response.dispatch, + "{0}/describeStream$": response.dispatch, + "{0}/deleteStream$": response.dispatch, + "{0}/listStreams$": response.dispatch, + "{0}/getDataEndpoint$": response.dispatch, +} diff --git a/moto/kinesisvideoarchivedmedia/__init__.py b/moto/kinesisvideoarchivedmedia/__init__.py new file mode 100644 index 000000000..c1676c871 --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import kinesisvideoarchivedmedia_backends +from ..core.models import base_decorator + +kinesisvideoarchivedmedia_backend = kinesisvideoarchivedmedia_backends["us-east-1"] +mock_kinesisvideoarchivedmedia = base_decorator(kinesisvideoarchivedmedia_backends) diff --git a/moto/kinesisvideoarchivedmedia/exceptions.py b/moto/kinesisvideoarchivedmedia/exceptions.py new file mode 100644 index 000000000..38c60cea2 --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/exceptions.py @@ -0,0 +1,3 @@ +from __future__ import unicode_literals + +# Not implemented exceptions for now diff --git a/moto/kinesisvideoarchivedmedia/models.py b/moto/kinesisvideoarchivedmedia/models.py new file mode 100644 index 000000000..46fddf567 --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/models.py @@ -0,0 +1,88 @@ +from __future__ import unicode_literals +from boto3 import Session +from moto.core import BaseBackend +from moto.kinesisvideo import kinesisvideo_backends +from moto.sts.utils import random_session_token + + +class KinesisVideoArchivedMediaBackend(BaseBackend): + def __init__(self, region_name=None): + super(KinesisVideoArchivedMediaBackend, self).__init__() + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def _get_streaming_url(self, stream_name, stream_arn, api_name): + stream = kinesisvideo_backends[self.region_name]._get_stream( + stream_name, stream_arn + ) + data_endpoint = stream.get_data_endpoint(api_name) + session_token = random_session_token() + api_to_relative_path = { + "GET_HLS_STREAMING_SESSION_URL": "/hls/v1/getHLSMasterPlaylist.m3u8", + "GET_DASH_STREAMING_SESSION_URL": "/dash/v1/getDASHManifest.mpd", + } + relative_path = api_to_relative_path[api_name] + url = "{}{}?SessionToken={}".format(data_endpoint, relative_path, session_token) + return url + + def get_hls_streaming_session_url( + self, + stream_name, + stream_arn, + playback_mode, + hls_fragment_selector, + container_format, + discontinuity_mode, + display_fragment_timestamp, + expires, + max_media_playlist_fragment_results, + ): + # Ignore option paramters as the format of hls_url does't depends on them + api_name = "GET_HLS_STREAMING_SESSION_URL" + url = self._get_streaming_url(stream_name, stream_arn, api_name) + return url + + def get_dash_streaming_session_url( + self, + stream_name, + stream_arn, + playback_mode, + display_fragment_timestamp, + display_fragment_number, + dash_fragment_selector, + expires, + max_manifest_fragment_results, + ): + # Ignore option paramters as the format of hls_url does't depends on them + api_name = "GET_DASH_STREAMING_SESSION_URL" + url = self._get_streaming_url(stream_name, stream_arn, api_name) + return url + + def get_clip(self, stream_name, stream_arn, clip_fragment_selector): + kinesisvideo_backends[self.region_name]._get_stream(stream_name, stream_arn) + content_type = "video/mp4" # Fixed content_type as it depends on input stream + payload = b"sample-mp4-video" + return content_type, payload + + +kinesisvideoarchivedmedia_backends = {} +for region in Session().get_available_regions("kinesis-video-archived-media"): + kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( + region + ) +for region in Session().get_available_regions( + "kinesis-video-archived-media", partition_name="aws-us-gov" +): + kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( + region + ) +for region in Session().get_available_regions( + "kinesis-video-archived-media", partition_name="aws-cn" +): + kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( + region + ) diff --git a/moto/kinesisvideoarchivedmedia/responses.py b/moto/kinesisvideoarchivedmedia/responses.py new file mode 100644 index 000000000..d021ced0e --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/responses.py @@ -0,0 +1,70 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import kinesisvideoarchivedmedia_backends +import json + + +class KinesisVideoArchivedMediaResponse(BaseResponse): + SERVICE_NAME = "kinesis-video-archived-media" + + @property + def kinesisvideoarchivedmedia_backend(self): + return kinesisvideoarchivedmedia_backends[self.region] + + def get_hls_streaming_session_url(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + playback_mode = self._get_param("PlaybackMode") + hls_fragment_selector = self._get_param("HLSFragmentSelector") + container_format = self._get_param("ContainerFormat") + discontinuity_mode = self._get_param("DiscontinuityMode") + display_fragment_timestamp = self._get_param("DisplayFragmentTimestamp") + expires = self._get_int_param("Expires") + max_media_playlist_fragment_results = self._get_param( + "MaxMediaPlaylistFragmentResults" + ) + hls_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_hls_streaming_session_url( + stream_name=stream_name, + stream_arn=stream_arn, + playback_mode=playback_mode, + hls_fragment_selector=hls_fragment_selector, + container_format=container_format, + discontinuity_mode=discontinuity_mode, + display_fragment_timestamp=display_fragment_timestamp, + expires=expires, + max_media_playlist_fragment_results=max_media_playlist_fragment_results, + ) + return json.dumps(dict(HLSStreamingSessionURL=hls_streaming_session_url)) + + def get_dash_streaming_session_url(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + playback_mode = self._get_param("PlaybackMode") + display_fragment_timestamp = self._get_param("DisplayFragmentTimestamp") + display_fragment_number = self._get_param("DisplayFragmentNumber") + dash_fragment_selector = self._get_param("DASHFragmentSelector") + expires = self._get_int_param("Expires") + max_manifest_fragment_results = self._get_param("MaxManifestFragmentResults") + dash_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_dash_streaming_session_url( + stream_name=stream_name, + stream_arn=stream_arn, + playback_mode=playback_mode, + display_fragment_timestamp=display_fragment_timestamp, + display_fragment_number=display_fragment_number, + dash_fragment_selector=dash_fragment_selector, + expires=expires, + max_manifest_fragment_results=max_manifest_fragment_results, + ) + return json.dumps(dict(DASHStreamingSessionURL=dash_streaming_session_url)) + + def get_clip(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + clip_fragment_selector = self._get_param("ClipFragmentSelector") + content_type, payload = self.kinesisvideoarchivedmedia_backend.get_clip( + stream_name=stream_name, + stream_arn=stream_arn, + clip_fragment_selector=clip_fragment_selector, + ) + new_headers = {"Content-Type": content_type} + return payload, new_headers diff --git a/moto/kinesisvideoarchivedmedia/urls.py b/moto/kinesisvideoarchivedmedia/urls.py new file mode 100644 index 000000000..88c2d59f0 --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import KinesisVideoArchivedMediaResponse + +url_bases = [ + r"https?://.*\.kinesisvideo.(.+).amazonaws.com", +] + + +response = KinesisVideoArchivedMediaResponse() + + +url_paths = { + "{0}/.*$": response.dispatch, +} diff --git a/moto/kms/models.py b/moto/kms/models.py index ff5d0a356..7a9918f2b 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -6,17 +6,17 @@ from datetime import datetime, timedelta from boto3 import Session -from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel from moto.core.utils import unix_time - -from moto.iam.models import ACCOUNT_ID +from moto.utilities.tagging_service import TaggingService +from moto.core.exceptions import JsonRESTError from .utils import decrypt, encrypt, generate_key_id, generate_master_key -class Key(BaseModel): +class Key(CloudFormationModel): def __init__( - self, policy, key_usage, customer_master_key_spec, description, tags, region + self, policy, key_usage, customer_master_key_spec, description, region ): self.id = generate_key_id() self.creation_date = unix_time() @@ -29,7 +29,6 @@ class Key(BaseModel): self.account_id = ACCOUNT_ID self.key_rotation_status = False self.deletion_date = None - self.tags = tags or {} self.key_material = generate_master_key() self.origin = "AWS_KMS" self.key_manager = "CUSTOMER" @@ -99,6 +98,15 @@ class Key(BaseModel): def delete(self, region_name): kms_backends[region_name].delete_key(self.id) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html + return "AWS::KMS::Key" + @classmethod def create_from_cloudformation_json( self, resource_name, cloudformation_json, region_name @@ -111,11 +119,12 @@ class Key(BaseModel): key_usage="ENCRYPT_DECRYPT", customer_master_key_spec="SYMMETRIC_DEFAULT", description=properties["Description"], - tags=properties.get("Tags"), + tags=properties.get("Tags", []), region=region_name, ) key.key_rotation_status = properties["EnableKeyRotation"] key.enabled = properties["Enabled"] + return key def get_cfn_attribute(self, attribute_name): @@ -130,32 +139,26 @@ class KmsBackend(BaseBackend): def __init__(self): self.keys = {} self.key_to_aliases = defaultdict(set) + self.tagger = TaggingService(keyName="TagKey", valueName="TagValue") def create_key( self, policy, key_usage, customer_master_key_spec, description, tags, region ): - key = Key( - policy, key_usage, customer_master_key_spec, description, tags, region - ) + key = Key(policy, key_usage, customer_master_key_spec, description, region) self.keys[key.id] = key + if tags is not None and len(tags) > 0: + self.tag_resource(key.id, tags) return key def update_key_description(self, key_id, description): key = self.keys[self.get_key_id(key_id)] key.description = description - def tag_resource(self, key_id, tags): - key = self.keys[self.get_key_id(key_id)] - key.tags = tags - - def list_resource_tags(self, key_id): - key = self.keys[self.get_key_id(key_id)] - return key.tags - def delete_key(self, key_id): if key_id in self.keys: if key_id in self.key_to_aliases: self.key_to_aliases.pop(key_id) + self.tagger.delete_all_tags_for_resource(key_id) return self.keys.pop(key_id) @@ -325,6 +328,32 @@ class KmsBackend(BaseBackend): return plaintext, ciphertext_blob, arn + def list_resource_tags(self, key_id): + if key_id in self.keys: + return self.tagger.list_tags_for_resource(key_id) + raise JsonRESTError( + "NotFoundException", + "The request was rejected because the specified entity or resource could not be found.", + ) + + def tag_resource(self, key_id, tags): + if key_id in self.keys: + self.tagger.tag_resource(key_id, tags) + return {} + raise JsonRESTError( + "NotFoundException", + "The request was rejected because the specified entity or resource could not be found.", + ) + + def untag_resource(self, key_id, tag_names): + if key_id in self.keys: + self.tagger.untag_resource_using_names(key_id, tag_names) + return {} + raise JsonRESTError( + "NotFoundException", + "The request was rejected because the specified entity or resource could not be found.", + ) + kms_backends = {} for region in Session().get_available_regions("kms"): diff --git a/moto/kms/responses.py b/moto/kms/responses.py index 15b990bbb..995c097e0 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -144,17 +144,27 @@ class KmsResponse(BaseResponse): self._validate_cmk_id(key_id) - self.kms_backend.tag_resource(key_id, tags) - return json.dumps({}) + result = self.kms_backend.tag_resource(key_id, tags) + return json.dumps(result) + + def untag_resource(self): + """https://docs.aws.amazon.com/kms/latest/APIReference/API_UntagResource.html""" + key_id = self.parameters.get("KeyId") + tag_names = self.parameters.get("TagKeys") + + self._validate_cmk_id(key_id) + + result = self.kms_backend.untag_resource(key_id, tag_names) + return json.dumps(result) def list_resource_tags(self): """https://docs.aws.amazon.com/kms/latest/APIReference/API_ListResourceTags.html""" key_id = self.parameters.get("KeyId") - self._validate_cmk_id(key_id) tags = self.kms_backend.list_resource_tags(key_id) - return json.dumps({"Tags": tags, "NextMarker": None, "Truncated": False}) + tags.update({"NextMarker": None, "Truncated": False}) + return json.dumps(tags) def describe_key(self): """https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html""" diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py index 9f6628b0f..022b3a411 100644 --- a/moto/logs/exceptions.py +++ b/moto/logs/exceptions.py @@ -7,10 +7,10 @@ class LogsClientError(JsonRESTError): class ResourceNotFoundException(LogsClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 400 super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", "The specified resource does not exist" + "ResourceNotFoundException", msg or "The specified log group does not exist" ) @@ -28,3 +28,11 @@ class ResourceAlreadyExistsException(LogsClientError): super(ResourceAlreadyExistsException, self).__init__( "ResourceAlreadyExistsException", "The specified log group already exists" ) + + +class LimitExceededException(LogsClientError): + def __init__(self): + self.code = 400 + super(LimitExceededException, self).__init__( + "LimitExceededException", "Resource limit exceeded." + ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 7448319db..8425f87f2 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -6,6 +6,7 @@ from .exceptions import ( ResourceNotFoundException, ResourceAlreadyExistsException, InvalidParameterException, + LimitExceededException, ) @@ -57,6 +58,8 @@ class LogStream: 0 # I'm guessing this is token needed for sequenceToken by put_events ) self.events = [] + self.destination_arn = None + self.filter_name = None self.__class__._log_ids += 1 @@ -97,11 +100,32 @@ class LogStream: self.lastIngestionTime = int(unix_time_millis()) # TODO: make this match AWS if possible self.storedBytes += sum([len(log_event["message"]) for log_event in log_events]) - self.events += [ + events = [ LogEvent(self.lastIngestionTime, log_event) for log_event in log_events ] + self.events += events self.uploadSequenceToken += 1 + if self.destination_arn and self.destination_arn.split(":")[2] == "lambda": + from moto.awslambda import lambda_backends # due to circular dependency + + lambda_log_events = [ + { + "id": event.eventId, + "timestamp": event.timestamp, + "message": event.message, + } + for event in events + ] + + lambda_backends[self.region].send_log_event( + self.destination_arn, + self.filter_name, + log_group_name, + log_stream_name, + lambda_log_events, + ) + return "{:056d}".format(self.uploadSequenceToken) def get_log_events( @@ -134,7 +158,7 @@ class LogStream: return None, 0 events = sorted( - filter(filter_func, self.events), key=lambda event: event.timestamp, + filter(filter_func, self.events), key=lambda event: event.timestamp ) direction, index = get_index_and_direction_from_token(next_token) @@ -169,11 +193,7 @@ class LogStream: if end_index > final_index: end_index = final_index elif end_index < 0: - return ( - [], - "b/{:056d}".format(0), - "f/{:056d}".format(0), - ) + return ([], "b/{:056d}".format(0), "f/{:056d}".format(0)) events_page = [ event.to_response_dict() for event in events[start_index : end_index + 1] @@ -219,7 +239,7 @@ class LogStream: class LogGroup: - def __init__(self, region, name, tags): + def __init__(self, region, name, tags, **kwargs): self.name = name self.region = region self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format( @@ -228,9 +248,10 @@ class LogGroup: self.creationTime = int(unix_time_millis()) self.tags = tags self.streams = dict() # {name: LogStream} - self.retentionInDays = ( - None # AWS defaults to Never Expire for log group retention - ) + self.retention_in_days = kwargs.get( + "RetentionInDays" + ) # AWS defaults to Never Expire for log group retention + self.subscription_filters = [] def create_log_stream(self, log_stream_name): if log_stream_name in self.streams: @@ -368,12 +389,12 @@ class LogGroup: "storedBytes": sum(s.storedBytes for s in self.streams.values()), } # AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire) - if self.retentionInDays: - log_group["retentionInDays"] = self.retentionInDays + if self.retention_in_days: + log_group["retentionInDays"] = self.retention_in_days return log_group def set_retention_policy(self, retention_in_days): - self.retentionInDays = retention_in_days + self.retention_in_days = retention_in_days def list_tags(self): return self.tags if self.tags else {} @@ -390,6 +411,48 @@ class LogGroup: k: v for (k, v) in self.tags.items() if k not in tags_to_remove } + def describe_subscription_filters(self): + return self.subscription_filters + + def put_subscription_filter( + self, filter_name, filter_pattern, destination_arn, role_arn + ): + creation_time = int(unix_time_millis()) + + # only one subscription filter can be associated with a log group + if self.subscription_filters: + if self.subscription_filters[0]["filterName"] == filter_name: + creation_time = self.subscription_filters[0]["creationTime"] + else: + raise LimitExceededException + + for stream in self.streams.values(): + stream.destination_arn = destination_arn + stream.filter_name = filter_name + + self.subscription_filters = [ + { + "filterName": filter_name, + "logGroupName": self.name, + "filterPattern": filter_pattern, + "destinationArn": destination_arn, + "roleArn": role_arn, + "distribution": "ByLogStream", + "creationTime": creation_time, + } + ] + + def delete_subscription_filter(self, filter_name): + if ( + not self.subscription_filters + or self.subscription_filters[0]["filterName"] != filter_name + ): + raise ResourceNotFoundException( + "The specified subscription filter does not exist." + ) + + self.subscription_filters = [] + class LogsBackend(BaseBackend): def __init__(self, region_name): @@ -401,10 +464,13 @@ class LogsBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def create_log_group(self, log_group_name, tags): + def create_log_group(self, log_group_name, tags, **kwargs): if log_group_name in self.groups: raise ResourceAlreadyExistsException() - self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) + self.groups[log_group_name] = LogGroup( + self.region_name, log_group_name, tags, **kwargs + ) + return self.groups[log_group_name] def ensure_log_group(self, log_group_name, tags): if log_group_name in self.groups: @@ -419,20 +485,39 @@ class LogsBackend(BaseBackend): def describe_log_groups(self, limit, log_group_name_prefix, next_token): if log_group_name_prefix is None: log_group_name_prefix = "" - if next_token is None: - next_token = 0 groups = [ group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix) ] - groups = sorted(groups, key=lambda x: x["creationTime"], reverse=True) - groups_page = groups[next_token : next_token + limit] + groups = sorted(groups, key=lambda x: x["logGroupName"]) - next_token += limit - if next_token >= len(groups): - next_token = None + index_start = 0 + if next_token: + try: + index_start = ( + next( + index + for (index, d) in enumerate(groups) + if d["logGroupName"] == next_token + ) + + 1 + ) + except StopIteration: + index_start = 0 + # AWS returns an empty list if it receives an invalid token. + groups = [] + + index_end = index_start + limit + if index_end > len(groups): + index_end = len(groups) + + groups_page = groups[index_start:index_end] + + next_token = None + if groups_page and index_end < len(groups): + next_token = groups_page[-1]["logGroupName"] return groups_page, next_token @@ -558,6 +643,46 @@ class LogsBackend(BaseBackend): log_group = self.groups[log_group_name] log_group.untag(tags) + def describe_subscription_filters(self, log_group_name): + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + return log_group.describe_subscription_filters() + + def put_subscription_filter( + self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn + ): + # TODO: support other destinations like Kinesis stream + from moto.awslambda import lambda_backends # due to circular dependency + + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + lambda_func = lambda_backends[self.region_name].get_function(destination_arn) + + # no specific permission check implemented + if not lambda_func: + raise InvalidParameterException( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + log_group.put_subscription_filter( + filter_name, filter_pattern, destination_arn, role_arn + ) + + def delete_subscription_filter(self, log_group_name, filter_name): + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + log_group.delete_subscription_filter(filter_name) + logs_backends = {} for region in Session().get_available_regions("logs"): diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4631da2f9..715c4b5c1 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -42,7 +42,10 @@ class LogsResponse(BaseResponse): groups, next_token = self.logs_backend.describe_log_groups( limit, log_group_name_prefix, next_token ) - return json.dumps({"logGroups": groups, "nextToken": next_token}) + result = {"logGroups": groups} + if next_token: + result["nextToken"] = next_token + return json.dumps(result) def create_log_stream(self): log_group_name = self._get_param("logGroupName") @@ -178,3 +181,33 @@ class LogsResponse(BaseResponse): tags = self._get_param("tags") self.logs_backend.untag_log_group(log_group_name, tags) return "" + + def describe_subscription_filters(self): + log_group_name = self._get_param("logGroupName") + + subscription_filters = self.logs_backend.describe_subscription_filters( + log_group_name + ) + + return json.dumps({"subscriptionFilters": subscription_filters}) + + def put_subscription_filter(self): + log_group_name = self._get_param("logGroupName") + filter_name = self._get_param("filterName") + filter_pattern = self._get_param("filterPattern") + destination_arn = self._get_param("destinationArn") + role_arn = self._get_param("roleArn") + + self.logs_backend.put_subscription_filter( + log_group_name, filter_name, filter_pattern, destination_arn, role_arn + ) + + return "" + + def delete_subscription_filter(self): + log_group_name = self._get_param("logGroupName") + filter_name = self._get_param("filterName") + + self.logs_backend.delete_subscription_filter(log_group_name, filter_name) + + return "" diff --git a/moto/managedblockchain/__init__.py b/moto/managedblockchain/__init__.py new file mode 100644 index 000000000..a95fa7351 --- /dev/null +++ b/moto/managedblockchain/__init__.py @@ -0,0 +1,9 @@ +from __future__ import unicode_literals +from .models import managedblockchain_backends +from ..core.models import base_decorator, deprecated_base_decorator + +managedblockchain_backend = managedblockchain_backends["us-east-1"] +mock_managedblockchain = base_decorator(managedblockchain_backends) +mock_managedblockchain_deprecated = deprecated_base_decorator( + managedblockchain_backends +) diff --git a/moto/managedblockchain/exceptions.py b/moto/managedblockchain/exceptions.py new file mode 100644 index 000000000..4735389ae --- /dev/null +++ b/moto/managedblockchain/exceptions.py @@ -0,0 +1,59 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class ManagedBlockchainClientError(RESTError): + code = 400 + + +class BadRequestException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + super(BadRequestException, self).__init__( + "BadRequestException", + "An error occurred (BadRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + +class InvalidRequestException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + super(InvalidRequestException, self).__init__( + "InvalidRequestException", + "An error occurred (InvalidRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + +class ResourceNotFoundException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "An error occurred (ResourceNotFoundException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + +class ResourceAlreadyExistsException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 409 + super(ResourceAlreadyExistsException, self).__init__( + "ResourceAlreadyExistsException", + "An error occurred (ResourceAlreadyExistsException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + +class ResourceLimitExceededException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 429 + super(ResourceLimitExceededException, self).__init__( + "ResourceLimitExceededException", + "An error occurred (ResourceLimitExceededException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py new file mode 100644 index 000000000..233e875c3 --- /dev/null +++ b/moto/managedblockchain/models.py @@ -0,0 +1,1100 @@ +from __future__ import unicode_literals, division + +import datetime +import re + +from boto3 import Session + +from moto.core import BaseBackend, BaseModel + +from .exceptions import ( + BadRequestException, + ResourceNotFoundException, + InvalidRequestException, + ResourceLimitExceededException, + ResourceAlreadyExistsException, +) + +from .utils import ( + get_network_id, + get_member_id, + get_proposal_id, + get_invitation_id, + member_name_exist_in_network, + number_of_members_in_network, + admin_password_ok, + get_node_id, + number_of_nodes_in_member, + nodes_in_member, +) + +FRAMEWORKS = [ + "HYPERLEDGER_FABRIC", +] + +FRAMEWORKVERSIONS = [ + "1.2", +] + +EDITIONS = { + "STARTER": { + "MaxMembers": 5, + "MaxNodesPerMember": 2, + "AllowedNodeInstanceTypes": ["bc.t3.small", "bc.t3.medium"], + }, + "STANDARD": { + "MaxMembers": 14, + "MaxNodesPerMember": 3, + "AllowedNodeInstanceTypes": ["bc.t3", "bc.m5", "bc.c5"], + }, +} + +VOTEVALUES = ["YES", "NO"] + + +class ManagedBlockchainNetwork(BaseModel): + def __init__( + self, + id, + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + region, + description=None, + ): + self.creationdate = datetime.datetime.utcnow() + self.id = id + self.name = name + self.description = description + self.framework = framework + self.frameworkversion = frameworkversion + self.frameworkconfiguration = frameworkconfiguration + self.voting_policy = voting_policy + self.member_configuration = member_configuration + self.region = region + + @property + def network_name(self): + return self.name + + @property + def network_framework(self): + return self.framework + + @property + def network_framework_version(self): + return self.frameworkversion + + @property + def network_creationdate(self): + return self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z") + + @property + def network_description(self): + return self.description + + @property + def network_edition(self): + return self.frameworkconfiguration["Fabric"]["Edition"] + + @property + def vote_pol_proposal_duration(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ProposalDurationInHours"] + + @property + def vote_pol_threshold_percentage(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ThresholdPercentage"] + + @property + def vote_pol_threshold_comparator(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ThresholdComparator"] + + def to_dict(self): + # Format for list_networks + d = { + "Id": self.id, + "Name": self.name, + "Framework": self.framework, + "FrameworkVersion": self.frameworkversion, + "Status": "AVAILABLE", + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if self.description is not None: + d["Description"] = self.description + return d + + def get_format(self): + # Format for get_network + frameworkattributes = { + "Fabric": { + "OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format( + self.id.lower(), self.region + ), + "Edition": self.frameworkconfiguration["Fabric"]["Edition"], + } + } + + vpcendpointname = "com.amazonaws.{0}.managedblockchain.{1}".format( + self.region, self.id.lower() + ) + + d = { + "Id": self.id, + "Name": self.name, + "Framework": self.framework, + "FrameworkVersion": self.frameworkversion, + "FrameworkAttributes": frameworkattributes, + "VpcEndpointServiceName": vpcendpointname, + "VotingPolicy": self.voting_policy, + "Status": "AVAILABLE", + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if self.description is not None: + d["Description"] = self.description + return d + + +class ManagedBlockchainProposal(BaseModel): + def __init__( + self, + id, + networkid, + memberid, + membername, + numofmembers, + actions, + network_expirtation, + network_threshold, + network_threshold_comp, + description=None, + ): + # In general, passing all values instead of creating + # an apparatus to look them up + self.id = id + self.networkid = networkid + self.memberid = memberid + self.membername = membername + self.numofmembers = numofmembers + self.actions = actions + self.network_expirtation = network_expirtation + self.network_threshold = network_threshold + self.network_threshold_comp = network_threshold_comp + self.description = description + + self.creationdate = datetime.datetime.utcnow() + self.expirtationdate = self.creationdate + datetime.timedelta( + hours=network_expirtation + ) + self.yes_vote_count = 0 + self.no_vote_count = 0 + self.outstanding_vote_count = self.numofmembers + self.status = "IN_PROGRESS" + self.votes = {} + + @property + def network_id(self): + return self.networkid + + @property + def proposal_status(self): + return self.status + + @property + def proposal_votes(self): + return self.votes + + def proposal_actions(self, action_type): + default_return = [] + if action_type.lower() == "invitations": + if "Invitations" in self.actions: + return self.actions["Invitations"] + elif action_type.lower() == "removals": + if "Removals" in self.actions: + return self.actions["Removals"] + return default_return + + def check_to_expire_proposal(self): + if datetime.datetime.utcnow() > self.expirtationdate: + self.status = "EXPIRED" + + def to_dict(self): + # Format for list_proposals + d = { + "ProposalId": self.id, + "ProposedByMemberId": self.memberid, + "ProposedByMemberName": self.membername, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + return d + + def get_format(self): + # Format for get_proposal + d = { + "ProposalId": self.id, + "NetworkId": self.networkid, + "Actions": self.actions, + "ProposedByMemberId": self.memberid, + "ProposedByMemberName": self.membername, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "YesVoteCount": self.yes_vote_count, + "NoVoteCount": self.no_vote_count, + "OutstandingVoteCount": self.outstanding_vote_count, + } + if self.description is not None: + d["Description"] = self.description + return d + + def set_vote(self, votermemberid, votermembername, vote): + if vote.upper() == "YES": + self.yes_vote_count += 1 + else: + self.no_vote_count += 1 + self.outstanding_vote_count -= 1 + + perct_yes = (self.yes_vote_count / self.numofmembers) * 100 + perct_no = (self.no_vote_count / self.numofmembers) * 100 + self.votes[votermemberid] = { + "MemberId": votermemberid, + "MemberName": votermembername, + "Vote": vote.upper(), + } + + if self.network_threshold_comp == "GREATER_THAN_OR_EQUAL_TO": + if perct_yes >= self.network_threshold: + self.status = "APPROVED" + elif perct_no >= self.network_threshold: + self.status = "REJECTED" + else: + if perct_yes > self.network_threshold: + self.status = "APPROVED" + elif perct_no > self.network_threshold: + self.status = "REJECTED" + + # It is a tie - reject + if ( + self.status == "IN_PROGRESS" + and self.network_threshold_comp == "GREATER_THAN" + and self.outstanding_vote_count == 0 + and perct_yes == perct_no + ): + self.status = "REJECTED" + + +class ManagedBlockchainInvitation(BaseModel): + def __init__( + self, + id, + networkid, + networkname, + networkframework, + networkframeworkversion, + networkcreationdate, + region, + networkdescription=None, + ): + self.id = id + self.networkid = networkid + self.networkname = networkname + self.networkdescription = networkdescription + self.networkframework = networkframework + self.networkframeworkversion = networkframeworkversion + self.networkstatus = "AVAILABLE" + self.networkcreationdate = networkcreationdate + self.status = "PENDING" + self.region = region + + self.creationdate = datetime.datetime.utcnow() + self.expirtationdate = self.creationdate + datetime.timedelta(days=7) + + @property + def invitation_status(self): + return self.status + + @property + def invitation_networkid(self): + return self.networkid + + def to_dict(self): + d = { + "InvitationId": self.id, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "Status": self.status, + "NetworkSummary": { + "Id": self.networkid, + "Name": self.networkname, + "Framework": self.networkframework, + "FrameworkVersion": self.networkframeworkversion, + "Status": self.networkstatus, + "CreationDate": self.networkcreationdate, + }, + } + if self.networkdescription is not None: + d["NetworkSummary"]["Description"] = self.networkdescription + return d + + def accept_invitation(self): + self.status = "ACCEPTED" + + def reject_invitation(self): + self.status = "REJECTED" + + def set_network_status(self, network_status): + self.networkstatus = network_status + + +class ManagedBlockchainMember(BaseModel): + def __init__( + self, id, networkid, member_configuration, region, + ): + self.creationdate = datetime.datetime.utcnow() + self.id = id + self.networkid = networkid + self.member_configuration = member_configuration + self.status = "AVAILABLE" + self.region = region + self.description = None + + @property + def network_id(self): + return self.networkid + + @property + def name(self): + return self.member_configuration["Name"] + + @property + def member_status(self): + return self.status + + def to_dict(self): + # Format for list_members + d = { + "Id": self.id, + "Name": self.member_configuration["Name"], + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "IsOwned": True, + } + if "Description" in self.member_configuration: + self.description = self.member_configuration["Description"] + return d + + def get_format(self): + # Format for get_member + frameworkattributes = { + "Fabric": { + "AdminUsername": self.member_configuration["FrameworkConfiguration"][ + "Fabric" + ]["AdminUsername"], + "CaEndpoint": "ca.{0}.{1}.managedblockchain.{2}.amazonaws.com:30002".format( + self.id.lower(), self.networkid.lower(), self.region + ), + } + } + + d = { + "NetworkId": self.networkid, + "Id": self.id, + "Name": self.name, + "FrameworkAttributes": frameworkattributes, + "LogPublishingConfiguration": self.member_configuration[ + "LogPublishingConfiguration" + ], + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if "Description" in self.member_configuration: + d["Description"] = self.description + return d + + def delete(self): + self.status = "DELETED" + + def update(self, logpublishingconfiguration): + self.member_configuration[ + "LogPublishingConfiguration" + ] = logpublishingconfiguration + + +class ManagedBlockchainNode(BaseModel): + def __init__( + self, + id, + networkid, + memberid, + availabilityzone, + instancetype, + logpublishingconfiguration, + region, + ): + self.creationdate = datetime.datetime.utcnow() + self.id = id + self.instancetype = instancetype + self.networkid = networkid + self.memberid = memberid + self.logpublishingconfiguration = logpublishingconfiguration + self.region = region + self.status = "AVAILABLE" + self.availabilityzone = availabilityzone + + @property + def member_id(self): + return self.memberid + + @property + def node_status(self): + return self.status + + def to_dict(self): + # Format for list_nodes + d = { + "Id": self.id, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "AvailabilityZone": self.availabilityzone, + "InstanceType": self.instancetype, + } + return d + + def get_format(self): + # Format for get_node + frameworkattributes = { + "Fabric": { + "PeerEndpoint": "{0}.{1}.{2}.managedblockchain.{3}.amazonaws.com:30003".format( + self.id.lower(), + self.networkid.lower(), + self.memberid.lower(), + self.region, + ), + "PeerEventEndpoint": "{0}.{1}.{2}.managedblockchain.{3}.amazonaws.com:30004".format( + self.id.lower(), + self.networkid.lower(), + self.memberid.lower(), + self.region, + ), + } + } + + d = { + "NetworkId": self.networkid, + "MemberId": self.memberid, + "Id": self.id, + "InstanceType": self.instancetype, + "AvailabilityZone": self.availabilityzone, + "FrameworkAttributes": frameworkattributes, + "LogPublishingConfiguration": self.logpublishingconfiguration, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + return d + + def delete(self): + self.status = "DELETED" + + def update(self, logpublishingconfiguration): + self.logpublishingconfiguration = logpublishingconfiguration + + +class ManagedBlockchainBackend(BaseBackend): + def __init__(self, region_name): + self.networks = {} + self.members = {} + self.proposals = {} + self.invitations = {} + self.nodes = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_network( + self, + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + description=None, + ): + # Check framework + if framework not in FRAMEWORKS: + raise BadRequestException("CreateNetwork", "Invalid request body") + + # Check framework version + if frameworkversion not in FRAMEWORKVERSIONS: + raise BadRequestException( + "CreateNetwork", + "Invalid version {0} requested for framework HYPERLEDGER_FABRIC".format( + frameworkversion + ), + ) + + # Check edition + if frameworkconfiguration["Fabric"]["Edition"] not in EDITIONS: + raise BadRequestException("CreateNetwork", "Invalid request body") + + # Generate network ID + network_id = get_network_id() + + # Generate memberid ID and initial member + member_id = get_member_id() + self.members[member_id] = ManagedBlockchainMember( + id=member_id, + networkid=network_id, + member_configuration=member_configuration, + region=self.region_name, + ) + + self.networks[network_id] = ManagedBlockchainNetwork( + id=network_id, + name=name, + framework=framework, + frameworkversion=frameworkversion, + frameworkconfiguration=frameworkconfiguration, + voting_policy=voting_policy, + member_configuration=member_configuration, + region=self.region_name, + description=description, + ) + + # Return the network and member ID + d = {"NetworkId": network_id, "MemberId": member_id} + return d + + def list_networks(self): + return self.networks.values() + + def get_network(self, network_id): + if network_id not in self.networks: + raise ResourceNotFoundException( + "GetNetwork", "Network {0} not found.".format(network_id) + ) + return self.networks.get(network_id) + + def create_proposal( + self, networkid, memberid, actions, description=None, + ): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "CreateProposal", "Network {0} not found.".format(networkid) + ) + + # Check if member exists + if memberid not in self.members: + raise ResourceNotFoundException( + "CreateProposal", "Member {0} not found.".format(memberid) + ) + + # CLI docs say that Invitations and Removals cannot both be passed - but it does + # not throw an error and can be performed + if "Invitations" in actions: + for propinvitation in actions["Invitations"]: + if re.match("[0-9]{12}", propinvitation["Principal"]) is None: + raise InvalidRequestException( + "CreateProposal", + "Account ID format specified in proposal is not valid.", + ) + + if "Removals" in actions: + for propmember in actions["Removals"]: + if propmember["MemberId"] not in self.members: + raise InvalidRequestException( + "CreateProposal", + "Member ID format specified in proposal is not valid.", + ) + + # Generate proposal ID + proposal_id = get_proposal_id() + + self.proposals[proposal_id] = ManagedBlockchainProposal( + id=proposal_id, + networkid=networkid, + memberid=memberid, + membername=self.members.get(memberid).name, + numofmembers=number_of_members_in_network(self.members, networkid), + actions=actions, + network_expirtation=self.networks.get(networkid).vote_pol_proposal_duration, + network_threshold=self.networks.get( + networkid + ).vote_pol_threshold_percentage, + network_threshold_comp=self.networks.get( + networkid + ).vote_pol_threshold_comparator, + description=description, + ) + + # Return the proposal ID + d = {"ProposalId": proposal_id} + return d + + def list_proposals(self, networkid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListProposals", "Network {0} not found.".format(networkid) + ) + + proposalsfornetwork = [] + for proposal_id in self.proposals: + if self.proposals.get(proposal_id).network_id == networkid: + # See if any are expired + self.proposals.get(proposal_id).check_to_expire_proposal() + proposalsfornetwork.append(self.proposals[proposal_id]) + return proposalsfornetwork + + def get_proposal(self, networkid, proposalid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "GetProposal", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "GetProposal", "Proposal {0} not found.".format(proposalid) + ) + + # See if it needs to be set to expipred + self.proposals.get(proposalid).check_to_expire_proposal() + return self.proposals.get(proposalid) + + def vote_on_proposal(self, networkid, proposalid, votermemberid, vote): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "VoteOnProposal", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "VoteOnProposal", "Proposal {0} not found.".format(proposalid) + ) + + if votermemberid not in self.members: + raise ResourceNotFoundException( + "VoteOnProposal", "Member {0} not found.".format(votermemberid) + ) + + if vote.upper() not in VOTEVALUES: + raise BadRequestException("VoteOnProposal", "Invalid request body") + + # See if it needs to be set to expipred + self.proposals.get(proposalid).check_to_expire_proposal() + + # Exception if EXPIRED + if self.proposals.get(proposalid).proposal_status == "EXPIRED": + raise InvalidRequestException( + "VoteOnProposal", + "Proposal {0} is expired and you cannot vote on it.".format(proposalid), + ) + + # Check if IN_PROGRESS + if self.proposals.get(proposalid).proposal_status != "IN_PROGRESS": + raise InvalidRequestException( + "VoteOnProposal", + "Proposal {0} has status {1} and you cannot vote on it.".format( + proposalid, self.proposals.get(proposalid).proposal_status + ), + ) + + # Check to see if this member already voted + if votermemberid in self.proposals.get(proposalid).proposal_votes: + raise ResourceAlreadyExistsException( + "VoteOnProposal", + "Member {0} has already voted on proposal {1}.".format( + votermemberid, proposalid + ), + ) + + # Cast vote + self.proposals.get(proposalid).set_vote( + votermemberid, self.members.get(votermemberid).name, vote.upper() + ) + + if self.proposals.get(proposalid).proposal_status == "APPROVED": + # Generate invitations + for propinvitation in self.proposals.get(proposalid).proposal_actions( + "Invitations" + ): + invitation_id = get_invitation_id() + self.invitations[invitation_id] = ManagedBlockchainInvitation( + id=invitation_id, + networkid=networkid, + networkname=self.networks.get(networkid).network_name, + networkframework=self.networks.get(networkid).network_framework, + networkframeworkversion=self.networks.get( + networkid + ).network_framework_version, + networkcreationdate=self.networks.get( + networkid + ).network_creationdate, + region=self.region_name, + networkdescription=self.networks.get(networkid).network_description, + ) + + # Delete members + for propmember in self.proposals.get(proposalid).proposal_actions( + "Removals" + ): + self.delete_member(networkid, propmember["MemberId"]) + + def list_proposal_votes(self, networkid, proposalid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListProposalVotes", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "ListProposalVotes", "Proposal {0} not found.".format(proposalid) + ) + + # Output the vote summaries + proposalvotesfornetwork = [] + for proposal_id in self.proposals: + if self.proposals.get(proposal_id).network_id == networkid: + for pvmemberid in self.proposals.get(proposal_id).proposal_votes: + proposalvotesfornetwork.append( + self.proposals.get(proposal_id).proposal_votes[pvmemberid] + ) + return proposalvotesfornetwork + + def list_invitations(self): + return self.invitations.values() + + def reject_invitation(self, invitationid): + if invitationid not in self.invitations: + raise ResourceNotFoundException( + "RejectInvitation", "InvitationId {0} not found.".format(invitationid) + ) + self.invitations.get(invitationid).reject_invitation() + + def create_member( + self, invitationid, networkid, member_configuration, + ): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "CreateMember", "Network {0} not found.".format(networkid) + ) + + if invitationid not in self.invitations: + raise InvalidRequestException( + "CreateMember", "Invitation {0} not valid".format(invitationid) + ) + + if self.invitations.get(invitationid).invitation_status != "PENDING": + raise InvalidRequestException( + "CreateMember", "Invitation {0} not valid".format(invitationid) + ) + + if ( + member_name_exist_in_network( + self.members, networkid, member_configuration["Name"] + ) + is True + ): + raise InvalidRequestException( + "CreateMember", + "Member name {0} already exists in network {1}.".format( + member_configuration["Name"], networkid + ), + ) + + networkedition = self.networks.get(networkid).network_edition + if ( + number_of_members_in_network(self.members, networkid) + >= EDITIONS[networkedition]["MaxMembers"] + ): + raise ResourceLimitExceededException( + "CreateMember", + "You cannot create a member in network {0}.{1} is the maximum number of members allowed in a {2} Edition network.".format( + networkid, EDITIONS[networkedition]["MaxMembers"], networkedition + ), + ) + + memberadminpassword = member_configuration["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] + if admin_password_ok(memberadminpassword) is False: + raise BadRequestException("CreateMember", "Invalid request body") + + member_id = get_member_id() + self.members[member_id] = ManagedBlockchainMember( + id=member_id, + networkid=networkid, + member_configuration=member_configuration, + region=self.region_name, + ) + + # Accept the invitaiton + self.invitations.get(invitationid).accept_invitation() + + # Return the member ID + d = {"MemberId": member_id} + return d + + def list_members(self, networkid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListMembers", "Network {0} not found.".format(networkid) + ) + + membersfornetwork = [] + for member_id in self.members: + if self.members.get(member_id).network_id == networkid: + membersfornetwork.append(self.members[member_id]) + return membersfornetwork + + def get_member(self, networkid, memberid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "GetMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "GetMember", "Member {0} not found.".format(memberid) + ) + + # Cannot get a member than has been deleted (it does show up in the list) + if self.members.get(memberid).member_status == "DELETED": + raise ResourceNotFoundException( + "GetMember", "Member {0} not found.".format(memberid) + ) + + return self.members.get(memberid) + + def delete_member(self, networkid, memberid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "DeleteMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "DeleteMember", "Member {0} not found.".format(memberid) + ) + + self.members.get(memberid).delete() + + # Is this the last member in the network? (all set to DELETED) + if number_of_members_in_network( + self.members, networkid, member_status="DELETED" + ) == len(self.members): + # Set network status to DELETED for all invitations + for invitation_id in self.invitations: + if ( + self.invitations.get(invitation_id).invitation_networkid + == networkid + ): + self.invitations.get(invitation_id).set_network_status("DELETED") + + # Remove network + del self.networks[networkid] + + # Remove any nodes associated + for nodeid in nodes_in_member(self.nodes, memberid): + del self.nodes[nodeid] + + def update_member(self, networkid, memberid, logpublishingconfiguration): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "UpdateMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "UpdateMember", "Member {0} not found.".format(memberid) + ) + + self.members.get(memberid).update(logpublishingconfiguration) + + def create_node( + self, + networkid, + memberid, + availabilityzone, + instancetype, + logpublishingconfiguration, + ): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "CreateNode", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "CreateNode", "Member {0} not found.".format(memberid) + ) + + networkedition = self.networks.get(networkid).network_edition + if ( + number_of_nodes_in_member(self.nodes, memberid) + >= EDITIONS[networkedition]["MaxNodesPerMember"] + ): + raise ResourceLimitExceededException( + "CreateNode", + "Maximum number of nodes exceeded in member {0}. The maximum number of nodes you can have in a member in a {1} Edition network is {2}".format( + memberid, + networkedition, + EDITIONS[networkedition]["MaxNodesPerMember"], + ), + ) + + # See if the instance family is correct + correctinstancefamily = False + for chkinsttypepre in EDITIONS["STANDARD"]["AllowedNodeInstanceTypes"]: + chkinsttypepreregex = chkinsttypepre + ".*" + if re.match(chkinsttypepreregex, instancetype, re.IGNORECASE): + correctinstancefamily = True + break + + if correctinstancefamily is False: + raise InvalidRequestException( + "CreateNode", + "Requested instance {0} isn't supported.".format(instancetype), + ) + + # Check for specific types for starter + if networkedition == "STARTER": + if instancetype not in EDITIONS["STARTER"]["AllowedNodeInstanceTypes"]: + raise InvalidRequestException( + "CreateNode", + "Instance type {0} is not supported with STARTER Edition networks.".format( + instancetype + ), + ) + + # Simple availability zone check + chkregionpreregex = self.region_name + "[a-z]" + if re.match(chkregionpreregex, availabilityzone, re.IGNORECASE) is None: + raise InvalidRequestException( + "CreateNode", "Availability Zone is not valid", + ) + + node_id = get_node_id() + self.nodes[node_id] = ManagedBlockchainNode( + id=node_id, + networkid=networkid, + memberid=memberid, + availabilityzone=availabilityzone, + instancetype=instancetype, + logpublishingconfiguration=logpublishingconfiguration, + region=self.region_name, + ) + + # Return the node ID + d = {"NodeId": node_id} + return d + + def list_nodes(self, networkid, memberid, status=None): + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListNodes", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "ListNodes", "Member {0} not found.".format(memberid) + ) + + # If member is deleted, cannot list nodes + if self.members.get(memberid).member_status == "DELETED": + raise ResourceNotFoundException( + "ListNodes", "Member {0} not found.".format(memberid) + ) + + nodesformember = [] + for node_id in self.nodes: + if self.nodes.get(node_id).member_id == memberid and ( + status is None or self.nodes.get(node_id).node_status == status + ): + nodesformember.append(self.nodes[node_id]) + return nodesformember + + def get_node(self, networkid, memberid, nodeid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "GetNode", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "GetNode", "Member {0} not found.".format(memberid) + ) + + if nodeid not in self.nodes: + raise ResourceNotFoundException( + "GetNode", "Node {0} not found.".format(nodeid) + ) + + # Cannot get a node than has been deleted (it does show up in the list) + if self.nodes.get(nodeid).node_status == "DELETED": + raise ResourceNotFoundException( + "GetNode", "Node {0} not found.".format(nodeid) + ) + + return self.nodes.get(nodeid) + + def delete_node(self, networkid, memberid, nodeid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "DeleteNode", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "DeleteNode", "Member {0} not found.".format(memberid) + ) + + if nodeid not in self.nodes: + raise ResourceNotFoundException( + "DeleteNode", "Node {0} not found.".format(nodeid) + ) + + self.nodes.get(nodeid).delete() + + def update_node(self, networkid, memberid, nodeid, logpublishingconfiguration): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "UpdateNode", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "UpdateNode", "Member {0} not found.".format(memberid) + ) + + if nodeid not in self.nodes: + raise ResourceNotFoundException( + "UpdateNode", "Node {0} not found.".format(nodeid) + ) + + self.nodes.get(nodeid).update(logpublishingconfiguration) + + +managedblockchain_backends = {} +for region in Session().get_available_regions("managedblockchain"): + managedblockchain_backends[region] = ManagedBlockchainBackend(region) diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py new file mode 100644 index 000000000..7dd628eba --- /dev/null +++ b/moto/managedblockchain/responses.py @@ -0,0 +1,427 @@ +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import urlparse, parse_qs + +from moto.core.responses import BaseResponse +from .models import managedblockchain_backends +from .utils import ( + region_from_managedblckchain_url, + networkid_from_managedblockchain_url, + proposalid_from_managedblockchain_url, + invitationid_from_managedblockchain_url, + memberid_from_managedblockchain_url, + nodeid_from_managedblockchain_url, +) + + +class ManagedBlockchainResponse(BaseResponse): + def __init__(self, backend): + super(ManagedBlockchainResponse, self).__init__() + self.backend = backend + + @classmethod + def network_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._network_response(request, full_url, headers) + + def _network_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + if method == "GET": + return self._all_networks_response(request, full_url, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._network_response_post(json_body, querystring, headers) + + def _all_networks_response(self, request, full_url, headers): + mbcnetworks = self.backend.list_networks() + response = json.dumps( + {"Networks": [mbcnetwork.to_dict() for mbcnetwork in mbcnetworks]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + def _network_response_post(self, json_body, querystring, headers): + name = json_body["Name"] + framework = json_body["Framework"] + frameworkversion = json_body["FrameworkVersion"] + frameworkconfiguration = json_body["FrameworkConfiguration"] + voting_policy = json_body["VotingPolicy"] + member_configuration = json_body["MemberConfiguration"] + + # Optional + description = json_body.get("Description", None) + + response = self.backend.create_network( + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + description, + ) + return 200, headers, json.dumps(response) + + @classmethod + def networkid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._networkid_response(request, full_url, headers) + + def _networkid_response(self, request, full_url, headers): + method = request.method + + if method == "GET": + network_id = networkid_from_managedblockchain_url(full_url) + return self._networkid_response_get(network_id, headers) + + def _networkid_response_get(self, network_id, headers): + mbcnetwork = self.backend.get_network(network_id) + response = json.dumps({"Network": mbcnetwork.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + @classmethod + def proposal_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposal_response(request, full_url, headers) + + def _proposal_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_proposals_response(network_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._proposal_response_post( + network_id, json_body, querystring, headers + ) + + def _all_proposals_response(self, network_id, headers): + proposals = self.backend.list_proposals(network_id) + response = json.dumps( + {"Proposals": [proposal.to_dict() for proposal in proposals]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + def _proposal_response_post(self, network_id, json_body, querystring, headers): + memberid = json_body["MemberId"] + actions = json_body["Actions"] + + # Optional + description = json_body.get("Description", None) + + response = self.backend.create_proposal( + network_id, memberid, actions, description, + ) + return 200, headers, json.dumps(response) + + @classmethod + def proposalid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposalid_response(request, full_url, headers) + + def _proposalid_response(self, request, full_url, headers): + method = request.method + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + proposal_id = proposalid_from_managedblockchain_url(full_url) + return self._proposalid_response_get(network_id, proposal_id, headers) + + def _proposalid_response_get(self, network_id, proposal_id, headers): + proposal = self.backend.get_proposal(network_id, proposal_id) + response = json.dumps({"Proposal": proposal.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + @classmethod + def proposal_votes_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposal_votes_response(request, full_url, headers) + + def _proposal_votes_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + proposal_id = proposalid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_proposal_votes_response(network_id, proposal_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._proposal_votes_response_post( + network_id, proposal_id, json_body, querystring, headers + ) + + def _all_proposal_votes_response(self, network_id, proposal_id, headers): + proposalvotes = self.backend.list_proposal_votes(network_id, proposal_id) + response = json.dumps({"ProposalVotes": proposalvotes}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _proposal_votes_response_post( + self, network_id, proposal_id, json_body, querystring, headers + ): + votermemberid = json_body["VoterMemberId"] + vote = json_body["Vote"] + + self.backend.vote_on_proposal( + network_id, proposal_id, votermemberid, vote, + ) + return 200, headers, "" + + @classmethod + def invitation_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._invitation_response(request, full_url, headers) + + def _invitation_response(self, request, full_url, headers): + method = request.method + if method == "GET": + return self._all_invitation_response(request, full_url, headers) + + def _all_invitation_response(self, request, full_url, headers): + invitations = self.backend.list_invitations() + response = json.dumps( + {"Invitations": [invitation.to_dict() for invitation in invitations]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + @classmethod + def invitationid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._invitationid_response(request, full_url, headers) + + def _invitationid_response(self, request, full_url, headers): + method = request.method + if method == "DELETE": + invitation_id = invitationid_from_managedblockchain_url(full_url) + return self._invitationid_response_delete(invitation_id, headers) + + def _invitationid_response_delete(self, invitation_id, headers): + self.backend.reject_invitation(invitation_id) + headers["content-type"] = "application/json" + return 200, headers, "" + + @classmethod + def member_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._member_response(request, full_url, headers) + + def _member_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_members_response(network_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._member_response_post( + network_id, json_body, querystring, headers + ) + + def _all_members_response(self, network_id, headers): + members = self.backend.list_members(network_id) + response = json.dumps({"Members": [member.to_dict() for member in members]}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _member_response_post(self, network_id, json_body, querystring, headers): + invitationid = json_body["InvitationId"] + member_configuration = json_body["MemberConfiguration"] + + response = self.backend.create_member( + invitationid, network_id, member_configuration, + ) + return 200, headers, json.dumps(response) + + @classmethod + def memberid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._memberid_response(request, full_url, headers) + + def _memberid_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + network_id = networkid_from_managedblockchain_url(full_url) + member_id = memberid_from_managedblockchain_url(full_url) + if method == "GET": + return self._memberid_response_get(network_id, member_id, headers) + elif method == "PATCH": + json_body = json.loads(body.decode("utf-8")) + return self._memberid_response_patch( + network_id, member_id, json_body, headers + ) + elif method == "DELETE": + return self._memberid_response_delete(network_id, member_id, headers) + + def _memberid_response_get(self, network_id, member_id, headers): + member = self.backend.get_member(network_id, member_id) + response = json.dumps({"Member": member.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _memberid_response_patch(self, network_id, member_id, json_body, headers): + logpublishingconfiguration = json_body["LogPublishingConfiguration"] + self.backend.update_member( + network_id, member_id, logpublishingconfiguration, + ) + return 200, headers, "" + + def _memberid_response_delete(self, network_id, member_id, headers): + self.backend.delete_member(network_id, member_id) + headers["content-type"] = "application/json" + return 200, headers, "" + + @classmethod + def node_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._node_response(request, full_url, headers) + + def _node_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + member_id = memberid_from_managedblockchain_url(full_url) + if method == "GET": + status = None + if "status" in querystring: + status = querystring["status"][0] + return self._all_nodes_response(network_id, member_id, status, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._node_response_post( + network_id, member_id, json_body, querystring, headers + ) + + def _all_nodes_response(self, network_id, member_id, status, headers): + nodes = self.backend.list_nodes(network_id, member_id, status) + response = json.dumps({"Nodes": [node.to_dict() for node in nodes]}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _node_response_post( + self, network_id, member_id, json_body, querystring, headers + ): + instancetype = json_body["NodeConfiguration"]["InstanceType"] + availabilityzone = json_body["NodeConfiguration"]["AvailabilityZone"] + logpublishingconfiguration = json_body["NodeConfiguration"][ + "LogPublishingConfiguration" + ] + + response = self.backend.create_node( + network_id, + member_id, + availabilityzone, + instancetype, + logpublishingconfiguration, + ) + return 200, headers, json.dumps(response) + + @classmethod + def nodeid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._nodeid_response(request, full_url, headers) + + def _nodeid_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + network_id = networkid_from_managedblockchain_url(full_url) + member_id = memberid_from_managedblockchain_url(full_url) + node_id = nodeid_from_managedblockchain_url(full_url) + if method == "GET": + return self._nodeid_response_get(network_id, member_id, node_id, headers) + elif method == "PATCH": + json_body = json.loads(body.decode("utf-8")) + return self._nodeid_response_patch( + network_id, member_id, node_id, json_body, headers + ) + elif method == "DELETE": + return self._nodeid_response_delete(network_id, member_id, node_id, headers) + + def _nodeid_response_get(self, network_id, member_id, node_id, headers): + node = self.backend.get_node(network_id, member_id, node_id) + response = json.dumps({"Node": node.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _nodeid_response_patch( + self, network_id, member_id, node_id, json_body, headers + ): + logpublishingconfiguration = json_body + self.backend.update_node( + network_id, member_id, node_id, logpublishingconfiguration, + ) + return 200, headers, "" + + def _nodeid_response_delete(self, network_id, member_id, node_id, headers): + self.backend.delete_node(network_id, member_id, node_id) + headers["content-type"] = "application/json" + return 200, headers, "" diff --git a/moto/managedblockchain/urls.py b/moto/managedblockchain/urls.py new file mode 100644 index 000000000..442a73233 --- /dev/null +++ b/moto/managedblockchain/urls.py @@ -0,0 +1,19 @@ +from __future__ import unicode_literals +from .responses import ManagedBlockchainResponse + +url_bases = ["https?://managedblockchain.(.+).amazonaws.com"] + +url_paths = { + "{0}/networks$": ManagedBlockchainResponse.network_response, + "{0}/networks/(?P[^/.]+)$": ManagedBlockchainResponse.networkid_response, + "{0}/networks/(?P[^/.]+)/proposals$": ManagedBlockchainResponse.proposal_response, + "{0}/networks/(?P[^/.]+)/proposals/(?P[^/.]+)$": ManagedBlockchainResponse.proposalid_response, + "{0}/networks/(?P[^/.]+)/proposals/(?P[^/.]+)/votes$": ManagedBlockchainResponse.proposal_votes_response, + "{0}/invitations$": ManagedBlockchainResponse.invitation_response, + "{0}/invitations/(?P[^/.]+)$": ManagedBlockchainResponse.invitationid_response, + "{0}/networks/(?P[^/.]+)/members$": ManagedBlockchainResponse.member_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)$": ManagedBlockchainResponse.memberid_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)/nodes$": ManagedBlockchainResponse.node_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)/nodes?(?P[^/.]+)$": ManagedBlockchainResponse.node_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)/nodes/(?P[^/.]+)$": ManagedBlockchainResponse.nodeid_response, +} diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py new file mode 100644 index 000000000..d0485829b --- /dev/null +++ b/moto/managedblockchain/utils.py @@ -0,0 +1,135 @@ +import random +import re +import string + +from six.moves.urllib.parse import urlparse + + +def region_from_managedblckchain_url(url): + domain = urlparse(url).netloc + region = "us-east-1" + if "." in domain: + region = domain.split(".")[1] + return region + + +def networkid_from_managedblockchain_url(full_url): + id_search = re.search(r"\/n-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_network_id(): + return "n-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def memberid_from_managedblockchain_url(full_url): + id_search = re.search(r"\/m-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_member_id(): + return "m-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def proposalid_from_managedblockchain_url(full_url): + id_search = re.search(r"\/p-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_proposal_id(): + return "p-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def invitationid_from_managedblockchain_url(full_url): + id_search = re.search(r"\/in-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_invitation_id(): + return "in-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def member_name_exist_in_network(members, networkid, membername): + membernamexists = False + for member_id in members: + if members.get(member_id).network_id == networkid: + if members.get(member_id).name == membername: + membernamexists = True + break + return membernamexists + + +def number_of_members_in_network(members, networkid, member_status=None): + return len( + [ + membid + for membid in members + if members.get(membid).network_id == networkid + and ( + member_status is None + or members.get(membid).member_status == member_status + ) + ] + ) + + +def admin_password_ok(password): + if not re.search("[a-z]", password): + return False + elif not re.search("[A-Z]", password): + return False + elif not re.search("[0-9]", password): + return False + elif re.search("['\"@\\/]", password): + return False + else: + return True + + +def nodeid_from_managedblockchain_url(full_url): + id_search = re.search(r"\/nd-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_node_id(): + return "nd-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def number_of_nodes_in_member(nodes, memberid, node_status=None): + return len( + [ + nodid + for nodid in nodes + if nodes.get(nodid).member_id == memberid + and (node_status is None or nodes.get(nodid).node_status == node_status) + ] + ) + + +def nodes_in_member(nodes, memberid): + return [nodid for nodid in nodes if nodes.get(nodid).member_id == memberid] diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index 96d918cc9..84bd3b103 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -125,6 +125,9 @@ class OpsworkInstance(BaseModel): def status(self): if self.instance is None: return "stopped" + # OpsWorks reports the "running" state as "online" + elif self.instance._state.name == "running": + return "online" return self.instance._state.name def to_dict(self): diff --git a/moto/organizations/exceptions.py b/moto/organizations/exceptions.py index b40908862..ca64b9931 100644 --- a/moto/organizations/exceptions.py +++ b/moto/organizations/exceptions.py @@ -2,14 +2,59 @@ from __future__ import unicode_literals from moto.core.exceptions import JsonRESTError +class AccountAlreadyRegisteredException(JsonRESTError): + code = 400 + + def __init__(self): + super(AccountAlreadyRegisteredException, self).__init__( + "AccountAlreadyRegisteredException", + "The provided account is already a delegated administrator for your organization.", + ) + + +class AccountNotRegisteredException(JsonRESTError): + code = 400 + + def __init__(self): + super(AccountNotRegisteredException, self).__init__( + "AccountNotRegisteredException", + "The provided account is not a registered delegated administrator for your organization.", + ) + + +class AccountNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(AccountNotFoundException, self).__init__( + "AccountNotFoundException", "You specified an account that doesn't exist." + ) + + +class AWSOrganizationsNotInUseException(JsonRESTError): + code = 400 + + def __init__(self): + super(AWSOrganizationsNotInUseException, self).__init__( + "AWSOrganizationsNotInUseException", + "Your account is not a member of an organization.", + ) + + +class ConstraintViolationException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ConstraintViolationException, self).__init__( + "ConstraintViolationException", message + ) + + class InvalidInputException(JsonRESTError): code = 400 - def __init__(self): - super(InvalidInputException, self).__init__( - "InvalidInputException", - "You provided a value that does not match the required pattern.", - ) + def __init__(self, message): + super(InvalidInputException, self).__init__("InvalidInputException", message) class DuplicateOrganizationalUnitException(JsonRESTError): @@ -20,3 +65,50 @@ class DuplicateOrganizationalUnitException(JsonRESTError): "DuplicateOrganizationalUnitException", "An OU with the same name already exists.", ) + + +class DuplicatePolicyException(JsonRESTError): + code = 400 + + def __init__(self): + super(DuplicatePolicyException, self).__init__( + "DuplicatePolicyException", "A policy with the same name already exists." + ) + + +class PolicyTypeAlreadyEnabledException(JsonRESTError): + code = 400 + + def __init__(self): + super(PolicyTypeAlreadyEnabledException, self).__init__( + "PolicyTypeAlreadyEnabledException", + "The specified policy type is already enabled.", + ) + + +class PolicyTypeNotEnabledException(JsonRESTError): + code = 400 + + def __init__(self): + super(PolicyTypeNotEnabledException, self).__init__( + "PolicyTypeNotEnabledException", + "This operation can be performed only for enabled policy types.", + ) + + +class RootNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(RootNotFoundException, self).__init__( + "RootNotFoundException", "You specified a root that doesn't exist." + ) + + +class TargetNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(TargetNotFoundException, self).__init__( + "TargetNotFoundException", "You specified a target that doesn't exist." + ) diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 0db069f9a..5655326c0 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -4,13 +4,23 @@ import datetime import re import json -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, ACCOUNT_ID from moto.core.exceptions import RESTError from moto.core.utils import unix_time from moto.organizations import utils from moto.organizations.exceptions import ( InvalidInputException, DuplicateOrganizationalUnitException, + DuplicatePolicyException, + AccountNotFoundException, + ConstraintViolationException, + AccountAlreadyRegisteredException, + AWSOrganizationsNotInUseException, + AccountNotRegisteredException, + RootNotFoundException, + PolicyTypeAlreadyEnabledException, + PolicyTypeNotEnabledException, + TargetNotFoundException, ) @@ -84,15 +94,13 @@ class FakeAccount(BaseModel): def describe(self): return { - "Account": { - "Id": self.id, - "Arn": self.arn, - "Email": self.email, - "Name": self.name, - "Status": self.status, - "JoinedMethod": self.joined_method, - "JoinedTimestamp": unix_time(self.create_time), - } + "Id": self.id, + "Arn": self.arn, + "Email": self.email, + "Name": self.name, + "Status": self.status, + "JoinedMethod": self.joined_method, + "JoinedTimestamp": unix_time(self.create_time), } @@ -120,6 +128,13 @@ class FakeOrganizationalUnit(BaseModel): class FakeRoot(FakeOrganizationalUnit): + SUPPORTED_POLICY_TYPES = [ + "AISERVICES_OPT_OUT_POLICY", + "BACKUP_POLICY", + "SERVICE_CONTROL_POLICY", + "TAG_POLICY", + ] + def __init__(self, organization, **kwargs): super(FakeRoot, self).__init__(organization, **kwargs) self.type = "ROOT" @@ -137,20 +152,55 @@ class FakeRoot(FakeOrganizationalUnit): "PolicyTypes": self.policy_types, } + def add_policy_type(self, policy_type): + if policy_type not in self.SUPPORTED_POLICY_TYPES: + raise InvalidInputException("You specified an invalid value.") + + if any(type["Type"] == policy_type for type in self.policy_types): + raise PolicyTypeAlreadyEnabledException + + self.policy_types.append({"Type": policy_type, "Status": "ENABLED"}) + + def remove_policy_type(self, policy_type): + if not FakePolicy.supported_policy_type(policy_type): + raise InvalidInputException("You specified an invalid value.") + + if all(type["Type"] != policy_type for type in self.policy_types): + raise PolicyTypeNotEnabledException + + self.policy_types.remove({"Type": policy_type, "Status": "ENABLED"}) + + +class FakePolicy(BaseModel): + SUPPORTED_POLICY_TYPES = [ + "AISERVICES_OPT_OUT_POLICY", + "BACKUP_POLICY", + "SERVICE_CONTROL_POLICY", + "TAG_POLICY", + ] -class FakeServiceControlPolicy(BaseModel): def __init__(self, organization, **kwargs): self.content = kwargs.get("Content") self.description = kwargs.get("Description") self.name = kwargs.get("Name") self.type = kwargs.get("Type") - self.id = utils.make_random_service_control_policy_id() + self.id = utils.make_random_policy_id() self.aws_managed = False self.organization_id = organization.id self.master_account_id = organization.master_account_id - self._arn_format = utils.SCP_ARN_FORMAT self.attachments = [] + if not FakePolicy.supported_policy_type(self.type): + raise InvalidInputException("You specified an invalid value.") + elif self.type == "AISERVICES_OPT_OUT_POLICY": + self._arn_format = utils.AI_POLICY_ARN_FORMAT + elif self.type == "SERVICE_CONTROL_POLICY": + self._arn_format = utils.SCP_ARN_FORMAT + else: + raise NotImplementedError( + "The {0} policy type has not been implemented".format(self.type) + ) + @property def arn(self): return self._arn_format.format( @@ -172,6 +222,107 @@ class FakeServiceControlPolicy(BaseModel): } } + @staticmethod + def supported_policy_type(policy_type): + return policy_type in FakePolicy.SUPPORTED_POLICY_TYPES + + +class FakeServiceAccess(BaseModel): + # List of trusted services, which support trusted access with Organizations + # https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrated-services-list.html + TRUSTED_SERVICES = [ + "aws-artifact-account-sync.amazonaws.com", + "backup.amazonaws.com", + "member.org.stacksets.cloudformation.amazonaws.com", + "cloudtrail.amazonaws.com", + "compute-optimizer.amazonaws.com", + "config.amazonaws.com", + "config-multiaccountsetup.amazonaws.com", + "controltower.amazonaws.com", + "ds.amazonaws.com", + "fms.amazonaws.com", + "guardduty.amazonaws.com", + "access-analyzer.amazonaws.com", + "license-manager.amazonaws.com", + "license-manager.member-account.amazonaws.com.", + "macie.amazonaws.com", + "ram.amazonaws.com", + "servicecatalog.amazonaws.com", + "servicequotas.amazonaws.com", + "sso.amazonaws.com", + "ssm.amazonaws.com", + "tagpolicies.tag.amazonaws.com", + ] + + def __init__(self, **kwargs): + if not self.trusted_service(kwargs["ServicePrincipal"]): + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + self.service_principal = kwargs["ServicePrincipal"] + self.date_enabled = datetime.datetime.utcnow() + + def describe(self): + return { + "ServicePrincipal": self.service_principal, + "DateEnabled": unix_time(self.date_enabled), + } + + @staticmethod + def trusted_service(service_principal): + return service_principal in FakeServiceAccess.TRUSTED_SERVICES + + +class FakeDelegatedAdministrator(BaseModel): + # List of services, which support a different Account to ba a delegated administrator + # https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrated-services-list.html + SUPPORTED_SERVICES = [ + "config-multiaccountsetup.amazonaws.com", + "guardduty.amazonaws.com", + "access-analyzer.amazonaws.com", + "macie.amazonaws.com", + "servicecatalog.amazonaws.com", + "ssm.amazonaws.com", + ] + + def __init__(self, account): + self.account = account + self.enabled_date = datetime.datetime.utcnow() + self.services = {} + + def add_service_principal(self, service_principal): + if service_principal in self.services: + raise AccountAlreadyRegisteredException + + if not self.supported_service(service_principal): + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + self.services[service_principal] = { + "ServicePrincipal": service_principal, + "DelegationEnabledDate": unix_time(datetime.datetime.utcnow()), + } + + def remove_service_principal(self, service_principal): + if service_principal not in self.services: + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + self.services.pop(service_principal) + + def describe(self): + admin = self.account.describe() + admin["DelegationEnabledDate"] = unix_time(self.enabled_date) + + return admin + + @staticmethod + def supported_service(service_principal): + return service_principal in FakeDelegatedAdministrator.SUPPORTED_SERVICES + class OrganizationsBackend(BaseBackend): def __init__(self): @@ -179,6 +330,15 @@ class OrganizationsBackend(BaseBackend): self.accounts = [] self.ou = [] self.policies = [] + self.services = [] + self.admins = [] + + def _get_root_by_id(self, root_id): + root = next((ou for ou in self.ou if ou.id == root_id), None) + if not root: + raise RootNotFoundException + + return root def create_organization(self, **kwargs): self.org = FakeOrganization(kwargs["FeatureSet"]) @@ -189,7 +349,7 @@ class OrganizationsBackend(BaseBackend): ) master_account.id = self.org.master_account_id self.accounts.append(master_account) - default_policy = FakeServiceControlPolicy( + default_policy = FakePolicy( self.org, Name="FullAWSAccess", Description="Allows access to every operation", @@ -210,10 +370,7 @@ class OrganizationsBackend(BaseBackend): def describe_organization(self): if not self.org: - raise RESTError( - "AWSOrganizationsNotInUseException", - "Your account is not a member of an organization.", - ) + raise AWSOrganizationsNotInUseException return self.org.describe() def list_roots(self): @@ -276,10 +433,7 @@ class OrganizationsBackend(BaseBackend): (account for account in self.accounts if account.id == account_id), None ) if account is None: - raise RESTError( - "AccountNotFoundException", - "You specified an account that doesn't exist.", - ) + raise AccountNotFoundException return account def get_account_by_attr(self, attr, value): @@ -292,15 +446,12 @@ class OrganizationsBackend(BaseBackend): None, ) if account is None: - raise RESTError( - "AccountNotFoundException", - "You specified an account that doesn't exist.", - ) + raise AccountNotFoundException return account def describe_account(self, **kwargs): account = self.get_account_by_id(kwargs["AccountId"]) - return account.describe() + return dict(Account=account.describe()) def describe_create_account_status(self, **kwargs): account = self.get_account_by_attr( @@ -309,15 +460,13 @@ class OrganizationsBackend(BaseBackend): return account.create_account_status def list_accounts(self): - return dict( - Accounts=[account.describe()["Account"] for account in self.accounts] - ) + return dict(Accounts=[account.describe() for account in self.accounts]) def list_accounts_for_parent(self, **kwargs): parent_id = self.validate_parent_id(kwargs["ParentId"]) return dict( Accounts=[ - account.describe()["Account"] + account.describe() for account in self.accounts if account.parent_id == parent_id ] @@ -350,7 +499,7 @@ class OrganizationsBackend(BaseBackend): elif kwargs["ChildType"] == "ORGANIZATIONAL_UNIT": obj_list = self.ou else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") return dict( Children=[ {"Id": obj.id, "Type": kwargs["ChildType"]} @@ -360,12 +509,15 @@ class OrganizationsBackend(BaseBackend): ) def create_policy(self, **kwargs): - new_policy = FakeServiceControlPolicy(self.org, **kwargs) + new_policy = FakePolicy(self.org, **kwargs) + for policy in self.policies: + if kwargs["Name"] == policy.name: + raise DuplicatePolicyException self.policies.append(new_policy) return new_policy.describe() def describe_policy(self, **kwargs): - if re.compile(utils.SCP_ID_REGEX).match(kwargs["PolicyId"]): + if re.compile(utils.POLICY_ID_REGEX).match(kwargs["PolicyId"]): policy = next( (p for p in self.policies if p.id == kwargs["PolicyId"]), None ) @@ -375,11 +527,29 @@ class OrganizationsBackend(BaseBackend): "You specified a policy that doesn't exist.", ) else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") + return policy.describe() + + def get_policy_by_id(self, policy_id): + policy = next( + (policy for policy in self.policies if policy.id == policy_id), None + ) + if policy is None: + raise RESTError( + "PolicyNotFoundException", + "We can't find a policy with the PolicyId that you specified.", + ) + return policy + + def update_policy(self, **kwargs): + policy = self.get_policy_by_id(kwargs["PolicyId"]) + policy.name = kwargs.get("Name", policy.name) + policy.description = kwargs.get("Description", policy.description) + policy.content = kwargs.get("Content", policy.content) return policy.describe() def attach_policy(self, **kwargs): - policy = next((p for p in self.policies if p.id == kwargs["PolicyId"]), None) + policy = self.get_policy_by_id(kwargs["PolicyId"]) if re.compile(utils.ROOT_ID_REGEX).match(kwargs["TargetId"]) or re.compile( utils.OU_ID_REGEX ).match(kwargs["TargetId"]): @@ -402,20 +572,38 @@ class OrganizationsBackend(BaseBackend): account.attached_policies.append(policy) policy.attachments.append(account) else: - raise RESTError( - "AccountNotFoundException", - "You specified an account that doesn't exist.", - ) + raise AccountNotFoundException else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") def list_policies(self, **kwargs): return dict( Policies=[p.describe()["Policy"]["PolicySummary"] for p in self.policies] ) + def delete_policy(self, **kwargs): + for idx, policy in enumerate(self.policies): + if policy.id == kwargs["PolicyId"]: + if self.list_targets_for_policy(PolicyId=policy.id)["Targets"]: + raise RESTError( + "PolicyInUseException", + "The policy is attached to one or more entities. You must detach it from all roots, OUs, and accounts before performing this operation.", + ) + del self.policies[idx] + return + raise RESTError( + "PolicyNotFoundException", + "We can't find a policy with the PolicyId that you specified.", + ) + def list_policies_for_target(self, **kwargs): - if re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]): + filter = kwargs["Filter"] + + if re.match(utils.ROOT_ID_REGEX, kwargs["TargetId"]): + obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None) + if obj is None: + raise TargetNotFoundException + elif re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]): obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None) if obj is None: raise RESTError( @@ -425,20 +613,28 @@ class OrganizationsBackend(BaseBackend): elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs["TargetId"]): obj = next((a for a in self.accounts if a.id == kwargs["TargetId"]), None) if obj is None: - raise RESTError( - "AccountNotFoundException", - "You specified an account that doesn't exist.", - ) + raise AccountNotFoundException else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") + + if not FakePolicy.supported_policy_type(filter): + raise InvalidInputException("You specified an invalid value.") + + if filter not in ["AISERVICES_OPT_OUT_POLICY", "SERVICE_CONTROL_POLICY"]: + raise NotImplementedError( + "The {0} policy type has not been implemented".format(filter) + ) + return dict( Policies=[ - p.describe()["Policy"]["PolicySummary"] for p in obj.attached_policies + p.describe()["Policy"]["PolicySummary"] + for p in obj.attached_policies + if p.type == filter ] ) def list_targets_for_policy(self, **kwargs): - if re.compile(utils.SCP_ID_REGEX).match(kwargs["PolicyId"]): + if re.compile(utils.POLICY_ID_REGEX).match(kwargs["PolicyId"]): policy = next( (p for p in self.policies if p.id == kwargs["PolicyId"]), None ) @@ -448,7 +644,7 @@ class OrganizationsBackend(BaseBackend): "You specified a policy that doesn't exist.", ) else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") objects = [ {"TargetId": obj.id, "Arn": obj.arn, "Name": obj.name, "Type": obj.type} for obj in policy.attachments @@ -459,7 +655,9 @@ class OrganizationsBackend(BaseBackend): account = next((a for a in self.accounts if a.id == kwargs["ResourceId"]), None) if account is None: - raise InvalidInputException + raise InvalidInputException( + "You provided a value that does not match the required pattern." + ) new_tags = {tag["Key"]: tag["Value"] for tag in kwargs["Tags"]} account.tags.update(new_tags) @@ -468,7 +666,9 @@ class OrganizationsBackend(BaseBackend): account = next((a for a in self.accounts if a.id == kwargs["ResourceId"]), None) if account is None: - raise InvalidInputException + raise InvalidInputException( + "You provided a value that does not match the required pattern." + ) tags = [{"Key": key, "Value": value} for key, value in account.tags.items()] return dict(Tags=tags) @@ -477,10 +677,180 @@ class OrganizationsBackend(BaseBackend): account = next((a for a in self.accounts if a.id == kwargs["ResourceId"]), None) if account is None: - raise InvalidInputException + raise InvalidInputException( + "You provided a value that does not match the required pattern." + ) for key in kwargs["TagKeys"]: account.tags.pop(key, None) + def enable_aws_service_access(self, **kwargs): + service = FakeServiceAccess(**kwargs) + + # enabling an existing service results in no changes + if any( + service["ServicePrincipal"] == kwargs["ServicePrincipal"] + for service in self.services + ): + return + + self.services.append(service.describe()) + + def list_aws_service_access_for_organization(self): + return dict(EnabledServicePrincipals=self.services) + + def disable_aws_service_access(self, **kwargs): + if not FakeServiceAccess.trusted_service(kwargs["ServicePrincipal"]): + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + service_principal = next( + ( + service + for service in self.services + if service["ServicePrincipal"] == kwargs["ServicePrincipal"] + ), + None, + ) + + if service_principal: + self.services.remove(service_principal) + + def register_delegated_administrator(self, **kwargs): + account_id = kwargs["AccountId"] + + if account_id == ACCOUNT_ID: + raise ConstraintViolationException( + "You cannot register master account/yourself as delegated administrator for your organization." + ) + + account = self.get_account_by_id(account_id) + + admin = next( + (admin for admin in self.admins if admin.account.id == account_id), None + ) + if admin is None: + admin = FakeDelegatedAdministrator(account) + self.admins.append(admin) + + admin.add_service_principal(kwargs["ServicePrincipal"]) + + def list_delegated_administrators(self, **kwargs): + admins = self.admins + service = kwargs.get("ServicePrincipal") + + if service: + if not FakeDelegatedAdministrator.supported_service(service): + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + admins = [admin for admin in admins if service in admin.services] + + delegated_admins = [admin.describe() for admin in admins] + + return dict(DelegatedAdministrators=delegated_admins) + + def list_delegated_services_for_account(self, **kwargs): + admin = next( + (admin for admin in self.admins if admin.account.id == kwargs["AccountId"]), + None, + ) + if admin is None: + account = next( + ( + account + for account in self.accounts + if account.id == kwargs["AccountId"] + ), + None, + ) + if account: + raise AccountNotRegisteredException + + raise AWSOrganizationsNotInUseException + + services = [service for service in admin.services.values()] + + return dict(DelegatedServices=services) + + def deregister_delegated_administrator(self, **kwargs): + account_id = kwargs["AccountId"] + service = kwargs["ServicePrincipal"] + + if account_id == ACCOUNT_ID: + raise ConstraintViolationException( + "You cannot register master account/yourself as delegated administrator for your organization." + ) + + admin = next( + (admin for admin in self.admins if admin.account.id == account_id), None, + ) + if admin is None: + account = next( + ( + account + for account in self.accounts + if account.id == kwargs["AccountId"] + ), + None, + ) + if account: + raise AccountNotRegisteredException + + raise AccountNotFoundException + + admin.remove_service_principal(service) + + # remove account, when no services attached + if not admin.services: + self.admins.remove(admin) + + def enable_policy_type(self, **kwargs): + root = self._get_root_by_id(kwargs["RootId"]) + + root.add_policy_type(kwargs["PolicyType"]) + + return dict(Root=root.describe()) + + def disable_policy_type(self, **kwargs): + root = self._get_root_by_id(kwargs["RootId"]) + + root.remove_policy_type(kwargs["PolicyType"]) + + return dict(Root=root.describe()) + + def detach_policy(self, **kwargs): + policy = self.get_policy_by_id(kwargs["PolicyId"]) + root_id_regex = utils.ROOT_ID_REGEX + ou_id_regex = utils.OU_ID_REGEX + account_id_regex = utils.ACCOUNT_ID_REGEX + target_id = kwargs["TargetId"] + + if re.match(root_id_regex, target_id) or re.match(ou_id_regex, target_id): + ou = next((ou for ou in self.ou if ou.id == target_id), None) + if ou is not None: + if ou in ou.attached_policies: + ou.attached_policies.remove(policy) + policy.attachments.remove(ou) + else: + raise RESTError( + "OrganizationalUnitNotFoundException", + "You specified an organizational unit that doesn't exist.", + ) + elif re.match(account_id_regex, target_id): + account = next( + (account for account in self.accounts if account.id == target_id), None, + ) + if account is not None: + if account in account.attached_policies: + account.attached_policies.remove(policy) + policy.attachments.remove(account) + else: + raise AccountNotFoundException + else: + raise InvalidInputException("You specified an invalid value.") + organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index ba7dd4453..73e25178a 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -105,6 +105,11 @@ class OrganizationsResponse(BaseResponse): self.organizations_backend.describe_policy(**self.request_params) ) + def update_policy(self): + return json.dumps( + self.organizations_backend.update_policy(**self.request_params) + ) + def attach_policy(self): return json.dumps( self.organizations_backend.attach_policy(**self.request_params) @@ -115,6 +120,10 @@ class OrganizationsResponse(BaseResponse): self.organizations_backend.list_policies(**self.request_params) ) + def delete_policy(self): + self.organizations_backend.delete_policy(**self.request_params) + return json.dumps({}) + def list_policies_for_target(self): return json.dumps( self.organizations_backend.list_policies_for_target(**self.request_params) @@ -139,3 +148,61 @@ class OrganizationsResponse(BaseResponse): return json.dumps( self.organizations_backend.untag_resource(**self.request_params) ) + + def enable_aws_service_access(self): + return json.dumps( + self.organizations_backend.enable_aws_service_access(**self.request_params) + ) + + def list_aws_service_access_for_organization(self): + return json.dumps( + self.organizations_backend.list_aws_service_access_for_organization() + ) + + def disable_aws_service_access(self): + return json.dumps( + self.organizations_backend.disable_aws_service_access(**self.request_params) + ) + + def register_delegated_administrator(self): + return json.dumps( + self.organizations_backend.register_delegated_administrator( + **self.request_params + ) + ) + + def list_delegated_administrators(self): + return json.dumps( + self.organizations_backend.list_delegated_administrators( + **self.request_params + ) + ) + + def list_delegated_services_for_account(self): + return json.dumps( + self.organizations_backend.list_delegated_services_for_account( + **self.request_params + ) + ) + + def deregister_delegated_administrator(self): + return json.dumps( + self.organizations_backend.deregister_delegated_administrator( + **self.request_params + ) + ) + + def enable_policy_type(self): + return json.dumps( + self.organizations_backend.enable_policy_type(**self.request_params) + ) + + def disable_policy_type(self): + return json.dumps( + self.organizations_backend.disable_policy_type(**self.request_params) + ) + + def detach_policy(self): + return json.dumps( + self.organizations_backend.detach_policy(**self.request_params) + ) diff --git a/moto/organizations/utils.py b/moto/organizations/utils.py index e71357ce6..cec34834c 100644 --- a/moto/organizations/utils.py +++ b/moto/organizations/utils.py @@ -14,6 +14,9 @@ ACCOUNT_ARN_FORMAT = "arn:aws:organizations::{0}:account/{1}/{2}" ROOT_ARN_FORMAT = "arn:aws:organizations::{0}:root/{1}/{2}" OU_ARN_FORMAT = "arn:aws:organizations::{0}:ou/{1}/{2}" SCP_ARN_FORMAT = "arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}" +AI_POLICY_ARN_FORMAT = ( + "arn:aws:organizations::{0}:policy/{1}/aiservices_opt_out_policy/{2}" +) CHARSET = string.ascii_lowercase + string.digits ORG_ID_SIZE = 10 @@ -21,7 +24,7 @@ ROOT_ID_SIZE = 4 ACCOUNT_ID_SIZE = 12 OU_ID_SUFFIX_SIZE = 8 CREATE_ACCOUNT_STATUS_ID_SIZE = 8 -SCP_ID_SIZE = 8 +POLICY_ID_SIZE = 8 EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" ORG_ID_REGEX = r"o-[a-z0-9]{%s}" % ORG_ID_SIZE @@ -29,7 +32,7 @@ ROOT_ID_REGEX = r"r-[a-z0-9]{%s}" % ROOT_ID_SIZE OU_ID_REGEX = r"ou-[a-z0-9]{%s}-[a-z0-9]{%s}" % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE) ACCOUNT_ID_REGEX = r"[0-9]{%s}" % ACCOUNT_ID_SIZE CREATE_ACCOUNT_STATUS_ID_REGEX = r"car-[a-z0-9]{%s}" % CREATE_ACCOUNT_STATUS_ID_SIZE -SCP_ID_REGEX = r"%s|p-[a-z0-9]{%s}" % (DEFAULT_POLICY_ID, SCP_ID_SIZE) +POLICY_ID_REGEX = r"%s|p-[a-z0-9]{%s}" % (DEFAULT_POLICY_ID, POLICY_ID_SIZE) def make_random_org_id(): @@ -76,8 +79,8 @@ def make_random_create_account_status_id(): ) -def make_random_service_control_policy_id(): +def make_random_policy_id(): # The regex pattern for a policy ID string requires "p-" followed by # from 8 to 128 lower-case letters or digits. # e.g. 'p-k2av4a8a' - return "p-" + "".join(random.choice(CHARSET) for x in range(SCP_ID_SIZE)) + return "p-" + "".join(random.choice(CHARSET) for x in range(POLICY_ID_SIZE)) diff --git a/moto/packages/boto/README.md b/moto/packages/boto/README.md new file mode 100644 index 000000000..f3a247a58 --- /dev/null +++ b/moto/packages/boto/README.md @@ -0,0 +1,18 @@ +## Removing the `boto` Dependency + +In order to rid `moto` of a direct dependency on the long-deprecated `boto` +package, a subset of the `boto` code has been vendored here. + +This directory contains only the `boto` files required for `moto` to run, +which is a very small subset of the original package's contents. Furthermore, +the `boto` models collected here have been stripped of all superfluous +methods/attributes not used by `moto`. (Any copyright headers on the +original files have been left intact.) + +## Next Steps + +Currently, a small number of `moto` models inherit from these `boto` classes. +With some additional work, the inheritance can be dropped in favor of simply +adding the required methods/properties from these `boto` models to their +respective `moto` subclasses, which would allow for these files/directories +to be removed entirely. \ No newline at end of file diff --git a/moto/packages/boto/__init__.py b/moto/packages/boto/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/packages/boto/cloudformation/__init__.py b/moto/packages/boto/cloudformation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/packages/boto/cloudformation/stack.py b/moto/packages/boto/cloudformation/stack.py new file mode 100644 index 000000000..26c4bfdf7 --- /dev/null +++ b/moto/packages/boto/cloudformation/stack.py @@ -0,0 +1,9 @@ +class Output(object): + def __init__(self, connection=None): + self.connection = connection + self.description = None + self.key = None + self.value = None + + def __repr__(self): + return 'Output:"%s"="%s"' % (self.key, self.value) diff --git a/moto/packages/boto/ec2/__init__.py b/moto/packages/boto/ec2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/packages/boto/ec2/blockdevicemapping.py b/moto/packages/boto/ec2/blockdevicemapping.py new file mode 100644 index 000000000..462060115 --- /dev/null +++ b/moto/packages/boto/ec2/blockdevicemapping.py @@ -0,0 +1,83 @@ +# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class BlockDeviceType(object): + """ + Represents parameters for a block device. + """ + + def __init__( + self, + connection=None, + ephemeral_name=None, + no_device=False, + volume_id=None, + snapshot_id=None, + status=None, + attach_time=None, + delete_on_termination=False, + size=None, + volume_type=None, + iops=None, + encrypted=None, + ): + self.connection = connection + self.ephemeral_name = ephemeral_name + self.no_device = no_device + self.volume_id = volume_id + self.snapshot_id = snapshot_id + self.status = status + self.attach_time = attach_time + self.delete_on_termination = delete_on_termination + self.size = size + self.volume_type = volume_type + self.iops = iops + self.encrypted = encrypted + + +# for backwards compatibility +EBSBlockDeviceType = BlockDeviceType + + +class BlockDeviceMapping(dict): + """ + Represents a collection of BlockDeviceTypes when creating ec2 instances. + + Example: + dev_sda1 = BlockDeviceType() + dev_sda1.size = 100 # change root volume to 100GB instead of default + bdm = BlockDeviceMapping() + bdm['/dev/sda1'] = dev_sda1 + reservation = image.run(..., block_device_map=bdm, ...) + """ + + def __init__(self, connection=None): + """ + :type connection: :class:`boto.ec2.EC2Connection` + :param connection: Optional connection. + """ + dict.__init__(self) + self.connection = connection + self.current_name = None + self.current_value = None diff --git a/moto/packages/boto/ec2/ec2object.py b/moto/packages/boto/ec2/ec2object.py new file mode 100644 index 000000000..0067f59ce --- /dev/null +++ b/moto/packages/boto/ec2/ec2object.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Object +""" +from moto.packages.boto.ec2.tag import TagSet + + +class EC2Object(object): + def __init__(self, connection=None): + self.connection = connection + self.region = None + + +class TaggedEC2Object(EC2Object): + """ + Any EC2 resource that can be tagged should be represented + by a Python object that subclasses this class. This class + has the mechanism in place to handle the tagSet element in + the Describe* responses. If tags are found, it will create + a TagSet object and allow it to parse and collect the tags + into a dict that is stored in the "tags" attribute of the + object. + """ + + def __init__(self, connection=None): + super(TaggedEC2Object, self).__init__(connection) + self.tags = TagSet() diff --git a/moto/packages/boto/ec2/elb/__init__.py b/moto/packages/boto/ec2/elb/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/packages/boto/ec2/elb/attributes.py b/moto/packages/boto/ec2/elb/attributes.py new file mode 100644 index 000000000..fbb387ec6 --- /dev/null +++ b/moto/packages/boto/ec2/elb/attributes.py @@ -0,0 +1,100 @@ +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# Created by Chris Huegle for TellApart, Inc. + + +class ConnectionSettingAttribute(object): + """ + Represents the ConnectionSetting segment of ELB Attributes. + """ + + def __init__(self, connection=None): + self.idle_timeout = None + + def __repr__(self): + return "ConnectionSettingAttribute(%s)" % (self.idle_timeout) + + +class CrossZoneLoadBalancingAttribute(object): + """ + Represents the CrossZoneLoadBalancing segement of ELB Attributes. + """ + + def __init__(self, connection=None): + self.enabled = None + + def __repr__(self): + return "CrossZoneLoadBalancingAttribute(%s)" % (self.enabled) + + +class AccessLogAttribute(object): + """ + Represents the AccessLog segment of ELB attributes. + """ + + def __init__(self, connection=None): + self.enabled = None + self.s3_bucket_name = None + self.s3_bucket_prefix = None + self.emit_interval = None + + def __repr__(self): + return "AccessLog(%s, %s, %s, %s)" % ( + self.enabled, + self.s3_bucket_name, + self.s3_bucket_prefix, + self.emit_interval, + ) + + +class ConnectionDrainingAttribute(object): + """ + Represents the ConnectionDraining segment of ELB attributes. + """ + + def __init__(self, connection=None): + self.enabled = None + self.timeout = None + + def __repr__(self): + return "ConnectionDraining(%s, %s)" % (self.enabled, self.timeout) + + +class LbAttributes(object): + """ + Represents the Attributes of an Elastic Load Balancer. + """ + + def __init__(self, connection=None): + self.connection = connection + self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute( + self.connection + ) + self.access_log = AccessLogAttribute(self.connection) + self.connection_draining = ConnectionDrainingAttribute(self.connection) + self.connecting_settings = ConnectionSettingAttribute(self.connection) + + def __repr__(self): + return "LbAttributes(%s, %s, %s, %s)" % ( + repr(self.cross_zone_load_balancing), + repr(self.access_log), + repr(self.connection_draining), + repr(self.connecting_settings), + ) diff --git a/moto/packages/boto/ec2/elb/policies.py b/moto/packages/boto/ec2/elb/policies.py new file mode 100644 index 000000000..a5c216f7e --- /dev/null +++ b/moto/packages/boto/ec2/elb/policies.py @@ -0,0 +1,55 @@ +# Copyright (c) 2010 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class AppCookieStickinessPolicy(object): + def __init__(self, connection=None): + self.cookie_name = None + self.policy_name = None + + def __repr__(self): + return "AppCookieStickiness(%s, %s)" % (self.policy_name, self.cookie_name) + + +class OtherPolicy(object): + def __init__(self, connection=None): + self.policy_name = None + + def __repr__(self): + return "OtherPolicy(%s)" % (self.policy_name) + + +class Policies(object): + """ + ELB Policies + """ + + def __init__(self, connection=None): + self.connection = connection + self.app_cookie_stickiness_policies = None + self.lb_cookie_stickiness_policies = None + self.other_policies = None + + def __repr__(self): + app = "AppCookieStickiness%s" % self.app_cookie_stickiness_policies + lb = "LBCookieStickiness%s" % self.lb_cookie_stickiness_policies + other = "Other%s" % self.other_policies + return "Policies(%s,%s,%s)" % (app, lb, other) diff --git a/moto/packages/boto/ec2/image.py b/moto/packages/boto/ec2/image.py new file mode 100644 index 000000000..b1fba4197 --- /dev/null +++ b/moto/packages/boto/ec2/image.py @@ -0,0 +1,25 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class ProductCodes(list): + pass diff --git a/moto/packages/boto/ec2/instance.py b/moto/packages/boto/ec2/instance.py new file mode 100644 index 000000000..3ba81ee95 --- /dev/null +++ b/moto/packages/boto/ec2/instance.py @@ -0,0 +1,217 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Instance +""" +from moto.packages.boto.ec2.ec2object import EC2Object, TaggedEC2Object +from moto.packages.boto.ec2.image import ProductCodes + + +class InstanceState(object): + """ + The state of the instance. + + :ivar code: The low byte represents the state. The high byte is an + opaque internal value and should be ignored. Valid values: + + * 0 (pending) + * 16 (running) + * 32 (shutting-down) + * 48 (terminated) + * 64 (stopping) + * 80 (stopped) + + :ivar name: The name of the state of the instance. Valid values: + + * "pending" + * "running" + * "shutting-down" + * "terminated" + * "stopping" + * "stopped" + """ + + def __init__(self, code=0, name=None): + self.code = code + self.name = name + + def __repr__(self): + return "%s(%d)" % (self.name, self.code) + + +class InstancePlacement(object): + """ + The location where the instance launched. + + :ivar zone: The Availability Zone of the instance. + :ivar group_name: The name of the placement group the instance is + in (for cluster compute instances). + :ivar tenancy: The tenancy of the instance (if the instance is + running within a VPC). An instance with a tenancy of dedicated + runs on single-tenant hardware. + """ + + def __init__(self, zone=None, group_name=None, tenancy=None): + self.zone = zone + self.group_name = group_name + self.tenancy = tenancy + + def __repr__(self): + return self.zone + + +class Reservation(EC2Object): + """ + Represents a Reservation response object. + + :ivar id: The unique ID of the Reservation. + :ivar owner_id: The unique ID of the owner of the Reservation. + :ivar groups: A list of Group objects representing the security + groups associated with launched instances. + :ivar instances: A list of Instance objects launched in this + Reservation. + """ + + def __init__(self, connection=None): + super(Reservation, self).__init__(connection) + self.id = None + self.owner_id = None + self.groups = [] + self.instances = [] + + def __repr__(self): + return "Reservation:%s" % self.id + + +class Instance(TaggedEC2Object): + """ + Represents an instance. + + :ivar id: The unique ID of the Instance. + :ivar groups: A list of Group objects representing the security + groups associated with the instance. + :ivar public_dns_name: The public dns name of the instance. + :ivar private_dns_name: The private dns name of the instance. + :ivar state: The string representation of the instance's current state. + :ivar state_code: An integer representation of the instance's + current state. + :ivar previous_state: The string representation of the instance's + previous state. + :ivar previous_state_code: An integer representation of the + instance's current state. + :ivar key_name: The name of the SSH key associated with the instance. + :ivar instance_type: The type of instance (e.g. m1.small). + :ivar launch_time: The time the instance was launched. + :ivar image_id: The ID of the AMI used to launch this instance. + :ivar placement: The availability zone in which the instance is running. + :ivar placement_group: The name of the placement group the instance + is in (for cluster compute instances). + :ivar placement_tenancy: The tenancy of the instance, if the instance + is running within a VPC. An instance with a tenancy of dedicated + runs on a single-tenant hardware. + :ivar kernel: The kernel associated with the instance. + :ivar ramdisk: The ramdisk associated with the instance. + :ivar architecture: The architecture of the image (i386|x86_64). + :ivar hypervisor: The hypervisor used. + :ivar virtualization_type: The type of virtualization used. + :ivar product_codes: A list of product codes associated with this instance. + :ivar ami_launch_index: This instances position within it's launch group. + :ivar monitored: A boolean indicating whether monitoring is enabled or not. + :ivar monitoring_state: A string value that contains the actual value + of the monitoring element returned by EC2. + :ivar spot_instance_request_id: The ID of the spot instance request + if this is a spot instance. + :ivar subnet_id: The VPC Subnet ID, if running in VPC. + :ivar vpc_id: The VPC ID, if running in VPC. + :ivar private_ip_address: The private IP address of the instance. + :ivar ip_address: The public IP address of the instance. + :ivar platform: Platform of the instance (e.g. Windows) + :ivar root_device_name: The name of the root device. + :ivar root_device_type: The root device type (ebs|instance-store). + :ivar block_device_mapping: The Block Device Mapping for the instance. + :ivar state_reason: The reason for the most recent state transition. + :ivar interfaces: List of Elastic Network Interfaces associated with + this instance. + :ivar ebs_optimized: Whether instance is using optimized EBS volumes + or not. + :ivar instance_profile: A Python dict containing the instance + profile id and arn associated with this instance. + """ + + def __init__(self, connection=None): + super(Instance, self).__init__(connection) + self.id = None + self.dns_name = None + self.public_dns_name = None + self.private_dns_name = None + self.key_name = None + self.instance_type = None + self.launch_time = None + self.image_id = None + self.kernel = None + self.ramdisk = None + self.product_codes = ProductCodes() + self.ami_launch_index = None + self.monitored = False + self.monitoring_state = None + self.spot_instance_request_id = None + self.subnet_id = None + self.vpc_id = None + self.private_ip_address = None + self.ip_address = None + self.requester_id = None + self._in_monitoring_element = False + self.persistent = False + self.root_device_name = None + self.root_device_type = None + self.block_device_mapping = None + self.state_reason = None + self.group_name = None + self.client_token = None + self.eventsSet = None + self.groups = [] + self.platform = None + self.interfaces = [] + self.hypervisor = None + self.virtualization_type = None + self.architecture = None + self.instance_profile = None + self._previous_state = None + self._state = InstanceState() + self._placement = InstancePlacement() + + def __repr__(self): + return "Instance:%s" % self.id + + @property + def state(self): + return self._state.name + + @property + def state_code(self): + return self._state.code + + @property + def placement(self): + return self._placement.zone diff --git a/moto/packages/boto/ec2/instancetype.py b/moto/packages/boto/ec2/instancetype.py new file mode 100644 index 000000000..a84e4879e --- /dev/null +++ b/moto/packages/boto/ec2/instancetype.py @@ -0,0 +1,50 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from moto.packages.boto.ec2.ec2object import EC2Object + + +class InstanceType(EC2Object): + """ + Represents an EC2 VM Type + + :ivar name: The name of the vm type + :ivar cores: The number of cpu cores for this vm type + :ivar memory: The amount of memory in megabytes for this vm type + :ivar disk: The amount of disk space in gigabytes for this vm type + """ + + def __init__(self, connection=None, name=None, cores=None, memory=None, disk=None): + super(InstanceType, self).__init__(connection) + self.connection = connection + self.name = name + self.cores = cores + self.memory = memory + self.disk = disk + + def __repr__(self): + return "InstanceType:%s-%s,%s,%s" % ( + self.name, + self.cores, + self.memory, + self.disk, + ) diff --git a/moto/packages/boto/ec2/launchspecification.py b/moto/packages/boto/ec2/launchspecification.py new file mode 100644 index 000000000..df6c99fc5 --- /dev/null +++ b/moto/packages/boto/ec2/launchspecification.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a launch specification for Spot instances. +""" + +from moto.packages.boto.ec2.ec2object import EC2Object + + +class LaunchSpecification(EC2Object): + def __init__(self, connection=None): + super(LaunchSpecification, self).__init__(connection) + self.key_name = None + self.instance_type = None + self.image_id = None + self.groups = [] + self.placement = None + self.kernel = None + self.ramdisk = None + self.monitored = False + self.subnet_id = None + self._in_monitoring_element = False + self.block_device_mapping = None + self.instance_profile = None + self.ebs_optimized = False + + def __repr__(self): + return "LaunchSpecification(%s)" % self.image_id diff --git a/moto/packages/boto/ec2/spotinstancerequest.py b/moto/packages/boto/ec2/spotinstancerequest.py new file mode 100644 index 000000000..c8630e74a --- /dev/null +++ b/moto/packages/boto/ec2/spotinstancerequest.py @@ -0,0 +1,85 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Request +""" + +from moto.packages.boto.ec2.ec2object import TaggedEC2Object + + +class SpotInstanceRequest(TaggedEC2Object): + """ + + :ivar id: The ID of the Spot Instance Request. + :ivar price: The maximum hourly price for any Spot Instance launched to + fulfill the request. + :ivar type: The Spot Instance request type. + :ivar state: The state of the Spot Instance request. + :ivar fault: The fault codes for the Spot Instance request, if any. + :ivar valid_from: The start date of the request. If this is a one-time + request, the request becomes active at this date and time and remains + active until all instances launch, the request expires, or the request is + canceled. If the request is persistent, the request becomes active at this + date and time and remains active until it expires or is canceled. + :ivar valid_until: The end date of the request. If this is a one-time + request, the request remains active until all instances launch, the request + is canceled, or this date is reached. If the request is persistent, it + remains active until it is canceled or this date is reached. + :ivar launch_group: The instance launch group. Launch groups are Spot + Instances that launch together and terminate together. + :ivar launched_availability_zone: foo + :ivar product_description: The Availability Zone in which the bid is + launched. + :ivar availability_zone_group: The Availability Zone group. If you specify + the same Availability Zone group for all Spot Instance requests, all Spot + Instances are launched in the same Availability Zone. + :ivar create_time: The time stamp when the Spot Instance request was + created. + :ivar launch_specification: Additional information for launching instances. + :ivar instance_id: The instance ID, if an instance has been launched to + fulfill the Spot Instance request. + :ivar status: The status code and status message describing the Spot + Instance request. + + """ + + def __init__(self, connection=None): + super(SpotInstanceRequest, self).__init__(connection) + self.id = None + self.price = None + self.type = None + self.state = None + self.fault = None + self.valid_from = None + self.valid_until = None + self.launch_group = None + self.launched_availability_zone = None + self.product_description = None + self.availability_zone_group = None + self.create_time = None + self.launch_specification = None + self.instance_id = None + self.status = None + + def __repr__(self): + return "SpotInstanceRequest:%s" % self.id diff --git a/moto/packages/boto/ec2/tag.py b/moto/packages/boto/ec2/tag.py new file mode 100644 index 000000000..9f5c2ef88 --- /dev/null +++ b/moto/packages/boto/ec2/tag.py @@ -0,0 +1,35 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class TagSet(dict): + """ + A TagSet is used to collect the tags associated with a particular + EC2 resource. Not all resources can be tagged but for those that + can, this dict object will be used to collect those values. See + :class:`boto.ec2.ec2object.TaggedEC2Object` for more details. + """ + + def __init__(self, connection=None): + self.connection = connection + self._current_key = None + self._current_value = None diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index 83bd19237..2f48ad567 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -269,13 +269,13 @@ class fakesock(object): _sock=None, ): """ - Matches both the Python 2 API: - def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): - https://github.com/python/cpython/blob/2.7/Lib/socket.py + Matches both the Python 2 API: + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): + https://github.com/python/cpython/blob/2.7/Lib/socket.py - and the Python 3 API: - def __init__(self, family=-1, type=-1, proto=-1, fileno=None): - https://github.com/python/cpython/blob/3.5/Lib/socket.py + and the Python 3 API: + def __init__(self, family=-1, type=-1, proto=-1, fileno=None): + https://github.com/python/cpython/blob/3.5/Lib/socket.py """ if httpretty.allow_net_connect: if PY3: diff --git a/moto/packages/httpretty/http.py b/moto/packages/httpretty/http.py index 20c00707e..1b4379f5b 100644 --- a/moto/packages/httpretty/http.py +++ b/moto/packages/httpretty/http.py @@ -134,7 +134,7 @@ def parse_requestline(s): ValueError: Not a Request-Line """ methods = "|".join(HttpBaseClass.METHODS) - m = re.match(r"(" + methods + ")\s+(.*)\s+HTTP/(1.[0|1])", s, re.I) + m = re.match(r"({})\s+(.*)\s+HTTP/(1.[0|1])".format(methods), s, re.I) if m: return m.group(1).upper(), m.group(2), m.group(3) else: diff --git a/moto/ram/__init__.py b/moto/ram/__init__.py new file mode 100644 index 000000000..a4925944f --- /dev/null +++ b/moto/ram/__init__.py @@ -0,0 +1,5 @@ +from .models import ram_backends +from ..core.models import base_decorator + +ram_backend = ram_backends["us-east-1"] +mock_ram = base_decorator(ram_backends) diff --git a/moto/ram/exceptions.py b/moto/ram/exceptions.py new file mode 100644 index 000000000..49e57a61a --- /dev/null +++ b/moto/ram/exceptions.py @@ -0,0 +1,39 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class InvalidParameterException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidParameterException, self).__init__( + "InvalidParameterException", message + ) + + +class MalformedArnException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(MalformedArnException, self).__init__("MalformedArnException", message) + + +class OperationNotPermittedException(JsonRESTError): + code = 400 + + def __init__(self): + super(OperationNotPermittedException, self).__init__( + "OperationNotPermittedException", + "Unable to enable sharing with AWS Organizations. " + "Received AccessDeniedException from AWSOrganizations with the following error message: " + "You don't have permissions to access this resource.", + ) + + +class UnknownResourceException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(UnknownResourceException, self).__init__( + "UnknownResourceException", message + ) diff --git a/moto/ram/models.py b/moto/ram/models.py new file mode 100644 index 000000000..d38099374 --- /dev/null +++ b/moto/ram/models.py @@ -0,0 +1,247 @@ +import re +import string +from datetime import datetime +import random +from uuid import uuid4 + +from boto3 import Session +from moto.core import BaseBackend, BaseModel, ACCOUNT_ID +from moto.core.utils import unix_time +from moto.organizations import organizations_backends +from moto.ram.exceptions import ( + MalformedArnException, + InvalidParameterException, + UnknownResourceException, + OperationNotPermittedException, +) + + +def random_resource_id(size): + return "".join(random.choice(string.digits + "abcdef") for _ in range(size)) + + +class ResourceShare(BaseModel): + # List of shareable resources can be found here + # https://docs.aws.amazon.com/ram/latest/userguide/shareable.html + SHAREABLE_RESOURCES = [ + "cluster", # Amazon Aurora cluster + "component", # Amazon EC2 Image Builder component + "group", # AWS Resource Groups + "image", # Amazon EC2 Image Builder image + "image-recipe", # Amazon EC2 Image Builder image recipe + "license-configuration", # AWS License Manager configuration + "mesh", # AWS App Mesh + "prefix-list", # Amazon EC2 prefix list + "project", # AWS CodeBuild project + "report-group", # AWS CodeBuild report group + "resolver-rule", # Amazon Route 53 forwarding rule + "subnet", # Amazon EC2 subnet + "transit-gateway", # Amazon EC2 transit gateway + ] + + def __init__(self, region, **kwargs): + self.region = region + + self.allow_external_principals = kwargs.get("allowExternalPrincipals", True) + self.arn = "arn:aws:ram:{0}:{1}:resource-share/{2}".format( + self.region, ACCOUNT_ID, uuid4() + ) + self.creation_time = datetime.utcnow() + self.feature_set = "STANDARD" + self.last_updated_time = datetime.utcnow() + self.name = kwargs["name"] + self.owning_account_id = ACCOUNT_ID + self.principals = [] + self.resource_arns = [] + self.status = "ACTIVE" + + @property + def organizations_backend(self): + return organizations_backends["global"] + + def add_principals(self, principals): + for principal in principals: + match = re.search( + r"^arn:aws:organizations::\d{12}:organization/(o-\w+)$", principal + ) + if match: + organization = self.organizations_backend.describe_organization() + if principal == organization["Organization"]["Arn"]: + continue + else: + raise UnknownResourceException( + "Organization {} could not be found.".format(match.group(1)) + ) + + match = re.search( + r"^arn:aws:organizations::\d{12}:ou/(o-\w+)/(ou-[\w-]+)$", principal + ) + if match: + roots = self.organizations_backend.list_roots() + root_id = next( + ( + root["Id"] + for root in roots["Roots"] + if root["Name"] == "Root" and match.group(1) in root["Arn"] + ), + None, + ) + + if root_id: + ous = self.organizations_backend.list_organizational_units_for_parent( + ParentId=root_id + ) + if any(principal == ou["Arn"] for ou in ous["OrganizationalUnits"]): + continue + + raise UnknownResourceException( + "OrganizationalUnit {} in unknown organization could not be found.".format( + match.group(2) + ) + ) + + if not re.match(r"^\d{12}$", principal): + raise InvalidParameterException( + "Principal ID {} is malformed. " + "Verify the ID and try again.".format(principal) + ) + + for principal in principals: + self.principals.append(principal) + + def add_resources(self, resource_arns): + for resource in resource_arns: + match = re.search( + r"^arn:aws:[a-z0-9-]+:[a-z0-9-]*:[0-9]{12}:([a-z-]+)[/:].*$", resource + ) + if not match: + raise MalformedArnException( + "The specified resource ARN {} is not valid. " + "Verify the ARN and try again.".format(resource) + ) + + if match.group(1) not in self.SHAREABLE_RESOURCES: + raise MalformedArnException( + "You cannot share the selected resource type." + ) + + for resource in resource_arns: + self.resource_arns.append(resource) + + def delete(self): + self.last_updated_time = datetime.utcnow() + self.status = "DELETED" + + def describe(self): + return { + "allowExternalPrincipals": self.allow_external_principals, + "creationTime": unix_time(self.creation_time), + "featureSet": self.feature_set, + "lastUpdatedTime": unix_time(self.last_updated_time), + "name": self.name, + "owningAccountId": self.owning_account_id, + "resourceShareArn": self.arn, + "status": self.status, + } + + def update(self, **kwargs): + self.allow_external_principals = kwargs.get( + "allowExternalPrincipals", self.allow_external_principals + ) + self.last_updated_time = datetime.utcnow() + self.name = kwargs.get("name", self.name) + + +class ResourceAccessManagerBackend(BaseBackend): + def __init__(self, region_name=None): + super(ResourceAccessManagerBackend, self).__init__() + self.region_name = region_name + self.resource_shares = [] + + @property + def organizations_backend(self): + return organizations_backends["global"] + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_resource_share(self, **kwargs): + resource = ResourceShare(self.region_name, **kwargs) + resource.add_principals(kwargs.get("principals", [])) + resource.add_resources(kwargs.get("resourceArns", [])) + + self.resource_shares.append(resource) + + response = resource.describe() + response.pop("featureSet") + + return dict(resourceShare=response) + + def get_resource_shares(self, **kwargs): + owner = kwargs["resourceOwner"] + + if owner not in ["SELF", "OTHER-ACCOUNTS"]: + raise InvalidParameterException( + "{} is not a valid resource owner. " + "Specify either SELF or OTHER-ACCOUNTS and try again.".format(owner) + ) + + if owner == "OTHER-ACCOUNTS": + raise NotImplementedError( + "Value 'OTHER-ACCOUNTS' for parameter 'resourceOwner' not implemented." + ) + + resouces = [resource.describe() for resource in self.resource_shares] + + return dict(resourceShares=resouces) + + def update_resource_share(self, **kwargs): + arn = kwargs["resourceShareArn"] + + resource = next( + (resource for resource in self.resource_shares if arn == resource.arn), + None, + ) + + if not resource: + raise UnknownResourceException( + "ResourceShare {} could not be found.".format(arn) + ) + + resource.update(**kwargs) + response = resource.describe() + response.pop("featureSet") + + return dict(resourceShare=response) + + def delete_resource_share(self, arn): + resource = next( + (resource for resource in self.resource_shares if arn == resource.arn), + None, + ) + + if not resource: + raise UnknownResourceException( + "ResourceShare {} could not be found.".format(arn) + ) + + resource.delete() + + return dict(returnValue=True) + + def enable_sharing_with_aws_organization(self): + if not self.organizations_backend.org: + raise OperationNotPermittedException + + return dict(returnValue=True) + + +ram_backends = {} +for region in Session().get_available_regions("ram"): + ram_backends[region] = ResourceAccessManagerBackend(region) +for region in Session().get_available_regions("ram", partition_name="aws-us-gov"): + ram_backends[region] = ResourceAccessManagerBackend(region) +for region in Session().get_available_regions("ram", partition_name="aws-cn"): + ram_backends[region] = ResourceAccessManagerBackend(region) diff --git a/moto/ram/responses.py b/moto/ram/responses.py new file mode 100644 index 000000000..b01254007 --- /dev/null +++ b/moto/ram/responses.py @@ -0,0 +1,39 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import ram_backends +import json + + +class ResourceAccessManagerResponse(BaseResponse): + SERVICE_NAME = "ram" + + @property + def ram_backend(self): + return ram_backends[self.region] + + @property + def request_params(self): + try: + if self.method == "DELETE": + return None + + return json.loads(self.body) + except ValueError: + return {} + + def create_resource_share(self): + return json.dumps(self.ram_backend.create_resource_share(**self.request_params)) + + def get_resource_shares(self): + return json.dumps(self.ram_backend.get_resource_shares(**self.request_params)) + + def update_resource_share(self): + return json.dumps(self.ram_backend.update_resource_share(**self.request_params)) + + def delete_resource_share(self): + return json.dumps( + self.ram_backend.delete_resource_share(self._get_param("resourceShareArn")) + ) + + def enable_sharing_with_aws_organization(self): + return json.dumps(self.ram_backend.enable_sharing_with_aws_organization()) diff --git a/moto/ram/urls.py b/moto/ram/urls.py new file mode 100644 index 000000000..1414b89b0 --- /dev/null +++ b/moto/ram/urls.py @@ -0,0 +1,12 @@ +from __future__ import unicode_literals +from .responses import ResourceAccessManagerResponse + +url_bases = ["https?://ram.(.+).amazonaws.com"] + +url_paths = { + "{0}/createresourceshare$": ResourceAccessManagerResponse.dispatch, + "{0}/deleteresourceshare/?$": ResourceAccessManagerResponse.dispatch, + "{0}/enablesharingwithawsorganization$": ResourceAccessManagerResponse.dispatch, + "{0}/getresourceshares$": ResourceAccessManagerResponse.dispatch, + "{0}/updateresourceshare$": ResourceAccessManagerResponse.dispatch, +} diff --git a/moto/rds/exceptions.py b/moto/rds/exceptions.py index cf9b9aac6..6fe30878b 100644 --- a/moto/rds/exceptions.py +++ b/moto/rds/exceptions.py @@ -36,3 +36,13 @@ class DBSubnetGroupNotFoundError(RDSClientError): "DBSubnetGroupNotFound", "Subnet Group {0} not found.".format(subnet_group_name), ) + + +class UnformattedGetAttTemplateException(Exception): + """Duplicated from CloudFormation to prevent circular deps.""" + + description = ( + "Template error: resource {0} does not support attribute type {1} in Fn::GetAtt" + ) + + status_code = 400 diff --git a/moto/rds/models.py b/moto/rds/models.py index 421f3784b..5039d9a26 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -1,16 +1,15 @@ from __future__ import unicode_literals -import boto.rds +from boto3 import Session from jinja2 import Template -from moto.cloudformation.exceptions import UnformattedGetAttTemplateException -from moto.core import BaseBackend, BaseModel -from moto.core.utils import get_random_hex +from moto.core import BaseBackend, CloudFormationModel from moto.ec2.models import ec2_backends +from moto.rds.exceptions import UnformattedGetAttTemplateException from moto.rds2.models import rds2_backends -class Database(BaseModel): +class Database(CloudFormationModel): def get_cfn_attribute(self, attribute_name): if attribute_name == "Endpoint.Address": return self.address @@ -18,15 +17,21 @@ class Database(BaseModel): return self.port raise UnformattedGetAttTemplateException() + @staticmethod + def cloudformation_name_type(): + return "DBInstanceIdentifier" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html + return "AWS::RDS::DBInstance" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - db_instance_identifier = properties.get("DBInstanceIdentifier") - if not db_instance_identifier: - db_instance_identifier = resource_name.lower() + get_random_hex(12) db_security_groups = properties.get("DBSecurityGroups") if not db_security_groups: db_security_groups = [] @@ -39,7 +44,7 @@ class Database(BaseModel): "availability_zone": properties.get("AvailabilityZone"), "backup_retention_period": properties.get("BackupRetentionPeriod"), "db_instance_class": properties.get("DBInstanceClass"), - "db_instance_identifier": db_instance_identifier, + "db_instance_identifier": resource_name, "db_name": properties.get("DBName"), "db_subnet_group_name": db_subnet_group_name, "engine": properties.get("Engine"), @@ -163,7 +168,7 @@ class Database(BaseModel): backend.delete_database(self.db_instance_identifier) -class SecurityGroup(BaseModel): +class SecurityGroup(CloudFormationModel): def __init__(self, group_name, description): self.group_name = group_name self.description = description @@ -206,12 +211,21 @@ class SecurityGroup(BaseModel): def authorize_security_group(self, security_group): self.ec2_security_groups.append(security_group) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsecuritygroup.html + return "AWS::RDS::DBSecurityGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - group_name = resource_name.lower() + get_random_hex(12) + group_name = resource_name.lower() description = properties["GroupDescription"] security_group_ingress_rules = properties.get("DBSecurityGroupIngress", []) tags = properties.get("Tags") @@ -239,7 +253,7 @@ class SecurityGroup(BaseModel): backend.delete_security_group(self.group_name) -class SubnetGroup(BaseModel): +class SubnetGroup(CloudFormationModel): def __init__(self, subnet_name, description, subnets): self.subnet_name = subnet_name self.description = description @@ -271,13 +285,21 @@ class SubnetGroup(BaseModel): ) return template.render(subnet_group=self) + @staticmethod + def cloudformation_name_type(): + return "DBSubnetGroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsubnetgroup.html + return "AWS::RDS::DBSubnetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - - subnet_name = resource_name.lower() + get_random_hex(12) + subnet_name = resource_name.lower() description = properties["DBSubnetGroupDescription"] subnet_ids = properties["SubnetIds"] tags = properties.get("Tags") @@ -313,6 +335,10 @@ class RDSBackend(BaseBackend): return rds2_backends[self.region] -rds_backends = dict( - (region.name, RDSBackend(region.name)) for region in boto.rds.regions() -) +rds_backends = {} +for region in Session().get_available_regions("rds"): + rds_backends[region] = RDSBackend(region) +for region in Session().get_available_regions("rds", partition_name="aws-us-gov"): + rds_backends[region] = RDSBackend(region) +for region in Session().get_available_regions("rds", partition_name="aws-cn"): + rds_backends[region] = RDSBackend(region) diff --git a/moto/rds/urls.py b/moto/rds/urls.py index 9c7570167..86e6ec00b 100644 --- a/moto/rds/urls.py +++ b/moto/rds/urls.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .responses import RDSResponse -url_bases = ["https?://rds(\..+)?.amazonaws.com"] +url_bases = [r"https?://rds(\..+)?.amazonaws.com"] url_paths = {"{0}/$": RDSResponse.dispatch} diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 963af1c63..eb4159025 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -8,10 +8,9 @@ from collections import defaultdict from boto3 import Session from jinja2 import Template from re import compile as re_compile -from moto.cloudformation.exceptions import UnformattedGetAttTemplateException from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel -from moto.core.utils import get_random_hex +from moto.core import BaseBackend, BaseModel, CloudFormationModel, ACCOUNT_ID + from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2.models import ec2_backends from .exceptions import ( @@ -29,7 +28,7 @@ from .exceptions import ( ) -class Database(BaseModel): +class Database(CloudFormationModel): def __init__(self, **kwargs): self.status = "available" self.is_replica = False @@ -159,6 +158,7 @@ class Database(BaseModel): family=db_family, description=description, tags={}, + region=self.region, ) ] else: @@ -282,6 +282,14 @@ class Database(BaseModel): {{ database.port }} {{ database.db_instance_arn }} + + {%- for tag in database.tags -%} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {%- endfor -%} + """ ) return template.render(database=self) @@ -308,6 +316,9 @@ class Database(BaseModel): setattr(self, key, value) def get_cfn_attribute(self, attribute_name): + # Local import to avoid circular dependency with cloudformation.parsing + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == "Endpoint.Address": return self.address elif attribute_name == "Endpoint.Port": @@ -354,15 +365,21 @@ class Database(BaseModel): "sqlserver-web": {"gp2": 20, "io1": 100, "standard": 20}, }[engine][storage_type] + @staticmethod + def cloudformation_name_type(): + return "DBInstanceIdentifier" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html + return "AWS::RDS::DBInstance" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - db_instance_identifier = properties.get("DBInstanceIdentifier") - if not db_instance_identifier: - db_instance_identifier = resource_name.lower() + get_random_hex(12) db_security_groups = properties.get("DBSecurityGroups") if not db_security_groups: db_security_groups = [] @@ -375,7 +392,7 @@ class Database(BaseModel): "availability_zone": properties.get("AvailabilityZone"), "backup_retention_period": properties.get("BackupRetentionPeriod"), "db_instance_class": properties.get("DBInstanceClass"), - "db_instance_identifier": db_instance_identifier, + "db_instance_identifier": resource_name, "db_name": properties.get("DBName"), "db_subnet_group_name": db_subnet_group_name, "engine": properties.get("Engine"), @@ -562,7 +579,7 @@ class Snapshot(BaseModel): self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys] -class SecurityGroup(BaseModel): +class SecurityGroup(CloudFormationModel): def __init__(self, group_name, description, tags): self.group_name = group_name self.description = description @@ -625,12 +642,21 @@ class SecurityGroup(BaseModel): def authorize_security_group(self, security_group): self.ec2_security_groups.append(security_group) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsecuritygroup.html + return "AWS::RDS::DBSecurityGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - group_name = resource_name.lower() + get_random_hex(12) + group_name = resource_name.lower() description = properties["GroupDescription"] security_group_ingress_rules = properties.get("DBSecurityGroupIngress", []) tags = properties.get("Tags") @@ -669,7 +695,7 @@ class SecurityGroup(BaseModel): backend.delete_security_group(self.group_name) -class SubnetGroup(BaseModel): +class SubnetGroup(CloudFormationModel): def __init__(self, subnet_name, description, subnets, tags): self.subnet_name = subnet_name self.description = description @@ -724,13 +750,21 @@ class SubnetGroup(BaseModel): ) return template.render(subnet_group=self) + @staticmethod + def cloudformation_name_type(): + return "DBSubnetGroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsubnetgroup.html + return "AWS::RDS::DBSubnetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - subnet_name = resource_name.lower() + get_random_hex(12) description = properties["DBSubnetGroupDescription"] subnet_ids = properties["SubnetIds"] tags = properties.get("Tags") @@ -739,7 +773,7 @@ class SubnetGroup(BaseModel): subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids] rds2_backend = rds2_backends[region_name] subnet_group = rds2_backend.create_subnet_group( - subnet_name, description, subnets, tags + resource_name, description, subnets, tags ) return subnet_group @@ -865,7 +899,10 @@ class RDS2Backend(BaseBackend): def stop_database(self, db_instance_identifier, db_snapshot_identifier=None): database = self.describe_databases(db_instance_identifier)[0] # todo: certain rds types not allowed to be stopped at this time. - if database.is_replica or database.multi_az: + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html#USER_StopInstance.Limitations + if database.is_replica or ( + database.multi_az and database.engine.lower().startswith("sqlserver") + ): # todo: more db types not supported by stop/start instance api raise InvalidDBClusterStateFaultError(db_instance_identifier) if database.status != "available": @@ -1137,7 +1174,7 @@ class RDS2Backend(BaseBackend): "InvalidParameterValue", "The parameter DBParameterGroupName must be provided and must not be blank.", ) - + db_parameter_group_kwargs["region"] = self.region db_parameter_group = DBParameterGroup(**db_parameter_group_kwargs) self.db_parameter_groups[db_parameter_group_id] = db_parameter_group return db_parameter_group @@ -1436,13 +1473,18 @@ class OptionGroupOptionSetting(object): return template.render(option_group_option_setting=self) -class DBParameterGroup(object): - def __init__(self, name, description, family, tags): +def make_rds_arn(region, name): + return "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, name) + + +class DBParameterGroup(CloudFormationModel): + def __init__(self, name, description, family, tags, region): self.name = name self.description = description self.family = family self.tags = tags self.parameters = defaultdict(dict) + self.arn = make_rds_arn(region, name) def to_xml(self): template = Template( @@ -1450,6 +1492,7 @@ class DBParameterGroup(object): {{ param_group.name }} {{ param_group.family }} {{ param_group.description }} + {{ param_group.arn }} """ ) return template.render(param_group=self) @@ -1475,6 +1518,15 @@ class DBParameterGroup(object): backend = rds2_backends[region_name] backend.delete_db_parameter_group(self.name) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbparametergroup.html + return "AWS::RDS::DBParameterGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 0a17e8aab..eb6cea99e 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -136,3 +136,24 @@ class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError): cluster_identifier ), ) + + +class ClusterAlreadyExistsFaultError(RedshiftClientError): + def __init__(self): + super(ClusterAlreadyExistsFaultError, self).__init__( + "ClusterAlreadyExists", "Cluster already exists" + ) + + +class InvalidParameterCombinationError(RedshiftClientError): + def __init__(self, message): + super(InvalidParameterCombinationError, self).__init__( + "InvalidParameterCombination", message + ) + + +class UnknownSnapshotCopyRegionFaultError(RedshiftClientError): + def __init__(self, message): + super(UnknownSnapshotCopyRegionFaultError, self).__init__( + "UnknownSnapshotCopyRegionFault", message + ) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 17840fb86..bb28af029 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -4,18 +4,20 @@ import copy import datetime from boto3 import Session -from botocore.exceptions import ClientError + from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends from .exceptions import ( + ClusterAlreadyExistsFaultError, ClusterNotFoundError, ClusterParameterGroupNotFoundError, ClusterSecurityGroupNotFoundError, ClusterSnapshotAlreadyExistsError, ClusterSnapshotNotFoundError, ClusterSubnetGroupNotFoundError, + InvalidParameterCombinationError, InvalidParameterValueError, InvalidSubnetError, ResourceNotFoundFaultError, @@ -24,6 +26,7 @@ from .exceptions import ( SnapshotCopyDisabledFaultError, SnapshotCopyGrantAlreadyExistsFaultError, SnapshotCopyGrantNotFoundFaultError, + UnknownSnapshotCopyRegionFaultError, ) @@ -62,7 +65,7 @@ class TaggableResourceMixin(object): return self.tags -class Cluster(TaggableResourceMixin, BaseModel): +class Cluster(TaggableResourceMixin, CloudFormationModel): resource_type = "cluster" @@ -156,6 +159,15 @@ class Cluster(TaggableResourceMixin, BaseModel): self.iam_roles_arn = iam_roles_arn or [] self.restored_from_snapshot = restored_from_snapshot + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html + return "AWS::Redshift::Cluster" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -169,6 +181,7 @@ class Cluster(TaggableResourceMixin, BaseModel): ].cluster_subnet_group_name else: subnet_group_name = None + cluster = redshift_backend.create_cluster( cluster_identifier=resource_name, node_type=properties.get("NodeType"), @@ -320,7 +333,7 @@ class SnapshotCopyGrant(TaggableResourceMixin, BaseModel): } -class SubnetGroup(TaggableResourceMixin, BaseModel): +class SubnetGroup(TaggableResourceMixin, CloudFormationModel): resource_type = "subnetgroup" @@ -341,6 +354,15 @@ class SubnetGroup(TaggableResourceMixin, BaseModel): if not self.subnets: raise InvalidSubnetError(subnet_ids) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clustersubnetgroup.html + return "AWS::Redshift::ClusterSubnetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -411,7 +433,7 @@ class SecurityGroup(TaggableResourceMixin, BaseModel): } -class ParameterGroup(TaggableResourceMixin, BaseModel): +class ParameterGroup(TaggableResourceMixin, CloudFormationModel): resource_type = "parametergroup" @@ -428,6 +450,15 @@ class ParameterGroup(TaggableResourceMixin, BaseModel): self.group_family = group_family self.description = description + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clusterparametergroup.html + return "AWS::Redshift::ClusterParameterGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -544,10 +575,12 @@ class RedshiftBackend(BaseBackend): cluster.encrypted == "true" and kwargs["snapshot_copy_grant_name"] is None ): - raise ClientError( - "InvalidParameterValue", - "SnapshotCopyGrantName is required for Snapshot Copy " - "on KMS encrypted clusters.", + raise InvalidParameterValueError( + "SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters." + ) + if kwargs["destination_region"] == self.region: + raise UnknownSnapshotCopyRegionFaultError( + "Invalid region {}".format(self.region) ) status = { "DestinationRegion": kwargs["destination_region"], @@ -580,6 +613,8 @@ class RedshiftBackend(BaseBackend): def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs["cluster_identifier"] + if cluster_identifier in self.clusters: + raise ClusterAlreadyExistsFaultError() cluster = Cluster(self, **cluster_kwargs) self.clusters[cluster_identifier] = cluster return cluster @@ -626,10 +661,8 @@ class RedshiftBackend(BaseBackend): cluster_skip_final_snapshot is False and cluster_snapshot_identifer is None ): - raise ClientError( - "InvalidParameterValue", - "FinalSnapshotIdentifier is required for Snapshot copy " - "when SkipFinalSnapshot is False", + raise InvalidParameterCombinationError( + "FinalClusterSnapshotIdentifier is required unless SkipFinalClusterSnapshot is specified." ) elif ( cluster_skip_final_snapshot is False @@ -748,7 +781,6 @@ class RedshiftBackend(BaseBackend): cluster_snapshots.append(snapshot) if cluster_snapshots: return cluster_snapshots - raise ClusterNotFoundError(cluster_identifier) if snapshot_identifier: if snapshot_identifier in self.snapshots: diff --git a/moto/resourcegroups/urls.py b/moto/resourcegroups/urls.py index b40179145..3e5f7b7f5 100644 --- a/moto/resourcegroups/urls.py +++ b/moto/resourcegroups/urls.py @@ -4,9 +4,14 @@ from .responses import ResourceGroupsResponse url_bases = ["https?://resource-groups(-fips)?.(.+).amazonaws.com"] url_paths = { + "{0}/delete-group$": ResourceGroupsResponse.dispatch, + "{0}/get-group$": ResourceGroupsResponse.dispatch, + "{0}/get-group-query$": ResourceGroupsResponse.dispatch, "{0}/groups$": ResourceGroupsResponse.dispatch, "{0}/groups/(?P[^/]+)$": ResourceGroupsResponse.dispatch, "{0}/groups/(?P[^/]+)/query$": ResourceGroupsResponse.dispatch, "{0}/groups-list$": ResourceGroupsResponse.dispatch, "{0}/resources/(?P[^/]+)/tags$": ResourceGroupsResponse.dispatch, + "{0}/update-group$": ResourceGroupsResponse.dispatch, + "{0}/update-group-query$": ResourceGroupsResponse.dispatch, } diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index 850ab5c04..1cf38e8d1 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -3,6 +3,7 @@ import uuid import six from boto3 import Session +from moto.core import ACCOUNT_ID from moto.core import BaseBackend from moto.core.exceptions import RESTError @@ -113,42 +114,42 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html # TODO move these to their respective backends - filters = [lambda t, v: True] + filters = [] for tag_filter_dict in tag_filters: values = tag_filter_dict.get("Values", []) if len(values) == 0: # Check key matches - filters.append(lambda t, v: t == tag_filter_dict["Key"]) + filters.append(lambda t, v, key=tag_filter_dict["Key"]: t == key) elif len(values) == 1: # Check its exactly the same as key, value filters.append( - lambda t, v: t == tag_filter_dict["Key"] and v == values[0] + lambda t, v, key=tag_filter_dict["Key"], value=values[0]: t == key + and v == value ) else: # Check key matches and value is one of the provided values - filters.append(lambda t, v: t == tag_filter_dict["Key"] and v in values) + filters.append( + lambda t, v, key=tag_filter_dict["Key"], vl=values: t == key + and v in vl + ) def tag_filter(tag_list): result = [] if tag_filters: - for tag in tag_list: + for f in filters: temp_result = [] - for f in filters: + for tag in tag_list: f_result = f(tag["Key"], tag["Value"]) temp_result.append(f_result) - result.append(all(temp_result)) - - return any(result) + result.append(any(temp_result)) + return all(result) else: return True # Do S3, resource type s3 if not resource_type_filters or "s3" in resource_type_filters: for bucket in self.s3_backend.buckets.values(): - tags = [] - for tag in bucket.tags.tag_set.tags: - tags.append({"Key": tag.key, "Value": tag.value}) - + tags = self.s3_backend.tagger.list_tags_for_resource(bucket.arn)["Tags"] if not tags or not tag_filter( tags ): # Skip if no tags, or invalid filter @@ -289,8 +290,7 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): } # TODO add these to the keys and values functions / combine functions - # ELB - + # ELB, resource type elasticloadbalancing:loadbalancer def get_elbv2_tags(arn): result = [] for key, value in self.elbv2_backend.load_balancers[elb.arn].tags.items(): @@ -299,8 +299,8 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): if ( not resource_type_filters - or "elasticloadbalancer" in resource_type_filters - or "elasticloadbalancer:loadbalancer" in resource_type_filters + or "elasticloadbalancing" in resource_type_filters + or "elasticloadbalancing:loadbalancer" in resource_type_filters ): for elb in self.elbv2_backend.load_balancers.values(): tags = get_elbv2_tags(elb.arn) @@ -309,6 +309,27 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): yield {"ResourceARN": "{0}".format(elb.arn), "Tags": tags} + # ELB Target Group, resource type elasticloadbalancing:targetgroup + def get_target_group_tags(arn): + result = [] + for key, value in self.elbv2_backend.target_groups[ + target_group.arn + ].tags.items(): + result.append({"Key": key, "Value": value}) + return result + + if ( + not resource_type_filters + or "elasticloadbalancing" in resource_type_filters + or "elasticloadbalancing:targetgroup" in resource_type_filters + ): + for target_group in self.elbv2_backend.target_groups.values(): + tags = get_target_group_tags(target_group.arn) + if not tag_filter(tags): # Skip if no tags, or invalid filter + continue + + yield {"ResourceARN": "{0}".format(target_group.arn), "Tags": tags} + # EMR Cluster # Glacier Vault @@ -318,7 +339,7 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): # KMS def get_kms_tags(kms_key_id): result = [] - for tag in self.kms_backend.list_resource_tags(kms_key_id): + for tag in self.kms_backend.list_resource_tags(kms_key_id).get("Tags", []): result.append({"Key": tag["TagKey"], "Value": tag["TagValue"]}) return result @@ -347,6 +368,23 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): # RedShift Subnet group # VPC + if ( + not resource_type_filters + or "ec2" in resource_type_filters + or "ec2:vpc" in resource_type_filters + ): + for vpc in self.ec2_backend.vpcs.values(): + tags = get_ec2_tags(vpc.id) + if not tags or not tag_filter( + tags + ): # Skip if no tags, or invalid filter + continue + yield { + "ResourceARN": "arn:aws:ec2:{0}:{1}:vpc/{2}".format( + self.region_name, ACCOUNT_ID, vpc.id + ), + "Tags": tags, + } # VPC Customer Gateway # VPC DHCP Option Set # VPC Internet Gateway @@ -362,8 +400,9 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): # Do S3, resource type s3 for bucket in self.s3_backend.buckets.values(): - for tag in bucket.tags.tag_set.tags: - yield tag.key + tags = self.s3_backend.tagger.get_tag_dict_for_resource(bucket.arn) + for key, _ in tags.items(): + yield key # EC2 tags def get_ec2_keys(res_id): @@ -414,9 +453,10 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): # Do S3, resource type s3 for bucket in self.s3_backend.buckets.values(): - for tag in bucket.tags.tag_set.tags: - if tag.key == tag_key: - yield tag.value + tags = self.s3_backend.tagger.get_tag_dict_for_resource(bucket.arn) + for key, value in tags.items(): + if key == tag_key: + yield value # EC2 tags def get_ec2_values(res_id): diff --git a/moto/route53/models.py b/moto/route53/models.py index 2ae03e54d..f4303c2ae 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -7,7 +7,7 @@ import random import uuid from jinja2 import Template -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, CloudFormationModel ROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits @@ -18,7 +18,7 @@ def create_route53_zone_id(): return "".join([random.choice(ROUTE53_ID_CHOICE) for _ in range(0, 15)]) -class HealthCheck(BaseModel): +class HealthCheck(CloudFormationModel): def __init__(self, health_check_id, health_check_args): self.id = health_check_id self.ip_address = health_check_args.get("ip_address") @@ -34,6 +34,15 @@ class HealthCheck(BaseModel): def physical_resource_id(self): return self.id + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-healthcheck.html + return "AWS::Route53::HealthCheck" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -75,7 +84,7 @@ class HealthCheck(BaseModel): return template.render(health_check=self) -class RecordSet(BaseModel): +class RecordSet(CloudFormationModel): def __init__(self, kwargs): self.name = kwargs.get("Name") self.type_ = kwargs.get("Type") @@ -88,6 +97,17 @@ class RecordSet(BaseModel): self.hosted_zone_name = kwargs.get("HostedZoneName") self.hosted_zone_id = kwargs.get("HostedZoneId") self.alias_target = kwargs.get("AliasTarget") + self.failover = kwargs.get("Failover") + self.geo_location = kwargs.get("GeoLocation") + + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-recordset.html + return "AWS::Route53::RecordSet" @classmethod def create_from_cloudformation_json( @@ -154,6 +174,16 @@ class RecordSet(BaseModel): {% if record_set.ttl %} {{ record_set.ttl }} {% endif %} + {% if record_set.failover %} + {{ record_set.failover }} + {% endif %} + {% if record_set.geo_location %} + + {% for geo_key in ['ContinentCode','CountryCode','SubdivisionCode'] %} + {% if record_set.geo_location[geo_key] %}<{{ geo_key }}>{{ record_set.geo_location[geo_key] }}{% endif %} + {% endfor %} + + {% endif %} {% if record_set.alias_target %} {{ record_set.alias_target['HostedZoneId'] }} @@ -164,7 +194,7 @@ class RecordSet(BaseModel): {% for record in record_set.records %} - {{ record }} + {{ record|e }} {% endfor %} @@ -190,7 +220,7 @@ def reverse_domain_name(domain_name): return ".".join(reversed(domain_name.split("."))) -class FakeZone(BaseModel): +class FakeZone(CloudFormationModel): def __init__(self, name, id_, private_zone, comment=None): self.name = name self.id = id_ @@ -255,18 +285,26 @@ class FakeZone(BaseModel): def physical_resource_id(self): return self.id + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-hostedzone.html + return "AWS::Route53::HostedZone" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - name = properties["Name"] - - hosted_zone = route53_backend.create_hosted_zone(name, private_zone=False) + hosted_zone = route53_backend.create_hosted_zone( + resource_name, private_zone=False + ) return hosted_zone -class RecordSetGroup(BaseModel): +class RecordSetGroup(CloudFormationModel): def __init__(self, hosted_zone_id, record_sets): self.hosted_zone_id = hosted_zone_id self.record_sets = record_sets @@ -275,6 +313,15 @@ class RecordSetGroup(BaseModel): def physical_resource_id(self): return "arn:aws:route53:::hostedzone/{0}".format(self.hosted_zone_id) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-recordsetgroup.html + return "AWS::Route53::RecordSetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 077c89a2c..e831820e1 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -243,6 +243,15 @@ class Route53(BaseResponse): return 200, headers, template.render() + def get_change(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + if request.method == "GET": + parsed_url = urlparse(full_url) + change_id = parsed_url.path.rstrip("/").rsplit("/", 1)[1] + template = Template(GET_CHANGE_RESPONSE) + return 200, headers, template.render(change_id=change_id) + LIST_TAGS_FOR_RESOURCE_RESPONSE = """ @@ -382,3 +391,12 @@ LIST_HEALTH_CHECKS_RESPONSE = """ DELETE_HEALTH_CHECK_RESPONSE = """ """ + +GET_CHANGE_RESPONSE = """ + + + INSYNC + 2010-09-10T01:36:41.958Z + {{ change_id }} + +""" diff --git a/moto/route53/urls.py b/moto/route53/urls.py index a697d258a..3bca32715 100644 --- a/moto/route53/urls.py +++ b/moto/route53/urls.py @@ -13,12 +13,13 @@ def tag_response2(*args, **kwargs): url_paths = { - "{0}/(?P[\d_-]+)/hostedzone$": Route53().list_or_create_hostzone_response, - "{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)$": Route53().get_or_delete_hostzone_response, - "{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)/rrset/?$": Route53().rrset_response, - "{0}/(?P[\d_-]+)/hostedzonesbyname": Route53().list_hosted_zones_by_name_response, - "{0}/(?P[\d_-]+)/healthcheck": Route53().health_check_response, - "{0}/(?P[\d_-]+)/tags/healthcheck/(?P[^/]+)$": tag_response1, - "{0}/(?P[\d_-]+)/tags/hostedzone/(?P[^/]+)$": tag_response2, - "{0}/(?P[\d_-]+)/trafficpolicyinstances/*": Route53().not_implemented_response, + r"{0}/(?P[\d_-]+)/hostedzone$": Route53().list_or_create_hostzone_response, + r"{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)$": Route53().get_or_delete_hostzone_response, + r"{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)/rrset/?$": Route53().rrset_response, + r"{0}/(?P[\d_-]+)/hostedzonesbyname": Route53().list_hosted_zones_by_name_response, + r"{0}/(?P[\d_-]+)/healthcheck": Route53().health_check_response, + r"{0}/(?P[\d_-]+)/tags/healthcheck/(?P[^/]+)$": tag_response1, + r"{0}/(?P[\d_-]+)/tags/hostedzone/(?P[^/]+)$": tag_response2, + r"{0}/(?P[\d_-]+)/trafficpolicyinstances/*": Route53().not_implemented_response, + r"{0}/(?P[\d_-]+)/change/(?P[^/]+)$": Route53().get_change, } diff --git a/moto/s3/cloud_formation.py b/moto/s3/cloud_formation.py new file mode 100644 index 000000000..0bf6022ef --- /dev/null +++ b/moto/s3/cloud_formation.py @@ -0,0 +1,33 @@ +from collections import OrderedDict + + +def cfn_to_api_encryption(bucket_encryption_properties): + + sse_algorithm = bucket_encryption_properties["ServerSideEncryptionConfiguration"][ + 0 + ]["ServerSideEncryptionByDefault"]["SSEAlgorithm"] + kms_master_key_id = bucket_encryption_properties[ + "ServerSideEncryptionConfiguration" + ][0]["ServerSideEncryptionByDefault"].get("KMSMasterKeyID") + apply_server_side_encryption_by_default = OrderedDict() + apply_server_side_encryption_by_default["SSEAlgorithm"] = sse_algorithm + if kms_master_key_id: + apply_server_side_encryption_by_default["KMSMasterKeyID"] = kms_master_key_id + rule = OrderedDict( + {"ApplyServerSideEncryptionByDefault": apply_server_side_encryption_by_default} + ) + bucket_encryption = OrderedDict( + {"@xmlns": "http://s3.amazonaws.com/doc/2006-03-01/"} + ) + bucket_encryption["Rule"] = rule + return bucket_encryption + + +def is_replacement_update(properties): + properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) diff --git a/moto/s3/config.py b/moto/s3/config.py index 8098addfc..932ebc3be 100644 --- a/moto/s3/config.py +++ b/moto/s3/config.py @@ -1,8 +1,13 @@ +import datetime import json +import time + +from boto3 import Session from moto.core.exceptions import InvalidNextTokenException from moto.core.models import ConfigQueryModel from moto.s3 import s3_backends +from moto.s3.models import get_moto_s3_account_id class S3ConfigQuery(ConfigQueryModel): @@ -14,6 +19,7 @@ class S3ConfigQuery(ConfigQueryModel): next_token, backend_region=None, resource_region=None, + aggregator=None, ): # The resource_region only matters for aggregated queries as you can filter on bucket regions for them. # For other resource types, you would need to iterate appropriately for the backend_region. @@ -118,4 +124,147 @@ class S3ConfigQuery(ConfigQueryModel): return config_data +class S3AccountPublicAccessBlockConfigQuery(ConfigQueryModel): + def list_config_service_resources( + self, + resource_ids, + resource_name, + limit, + next_token, + backend_region=None, + resource_region=None, + aggregator=None, + ): + # For the Account Public Access Block, they are the same for all regions. The resource ID is the AWS account ID + # There is no resource name -- it should be a blank string "" if provided. + + # The resource name can only ever be None or an empty string: + if resource_name is not None and resource_name != "": + return [], None + + pab = None + account_id = get_moto_s3_account_id() + regions = [region for region in Session().get_available_regions("config")] + + # If a resource ID was passed in, then filter accordingly: + if resource_ids: + for id in resource_ids: + if account_id == id: + pab = self.backends["global"].account_public_access_block + break + + # Otherwise, just grab the one from the backend: + if not resource_ids: + pab = self.backends["global"].account_public_access_block + + # If it's not present, then return nothing + if not pab: + return [], None + + # Filter on regions (and paginate on them as well): + if backend_region: + pab_list = [backend_region] + elif resource_region: + # Invalid region? + if resource_region not in regions: + return [], None + + pab_list = [resource_region] + + # Aggregated query where no regions were supplied so return them all: + else: + pab_list = regions + + # Pagination logic: + sorted_regions = sorted(pab_list) + new_token = None + + # Get the start: + if not next_token: + start = 0 + else: + # Tokens for this moto feature is just the region-name: + # For OTHER non-global resource types, it's the region concatenated with the resource ID. + if next_token not in sorted_regions: + raise InvalidNextTokenException() + + start = sorted_regions.index(next_token) + + # Get the list of items to collect: + pab_list = sorted_regions[start : (start + limit)] + + if len(sorted_regions) > (start + limit): + new_token = sorted_regions[start + limit] + + return ( + [ + { + "type": "AWS::S3::AccountPublicAccessBlock", + "id": account_id, + "region": region, + } + for region in pab_list + ], + new_token, + ) + + def get_config_resource( + self, resource_id, resource_name=None, backend_region=None, resource_region=None + ): + # Do we even have this defined? + if not self.backends["global"].account_public_access_block: + return None + + # Resource name can only ever be "" if it's supplied: + if resource_name is not None and resource_name != "": + return None + + # Are we filtering based on region? + account_id = get_moto_s3_account_id() + regions = [region for region in Session().get_available_regions("config")] + + # Is the resource ID correct?: + if account_id == resource_id: + if backend_region: + pab_region = backend_region + + # Invalid region? + elif resource_region not in regions: + return None + + else: + pab_region = resource_region + + else: + return None + + # Format the PAB to the AWS Config format: + creation_time = datetime.datetime.utcnow() + config_data = { + "version": "1.3", + "accountId": account_id, + "configurationItemCaptureTime": str(creation_time), + "configurationItemStatus": "OK", + "configurationStateId": str( + int(time.mktime(creation_time.timetuple())) + ), # PY2 and 3 compatible + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": account_id, + "awsRegion": pab_region, + "availabilityZone": "Not Applicable", + "configuration": self.backends[ + "global" + ].account_public_access_block.to_config_dict(), + "supplementaryConfiguration": {}, + } + + # The 'configuration' field is also a JSON string: + config_data["configuration"] = json.dumps(config_data["configuration"]) + + return config_data + + s3_config_query = S3ConfigQuery(s3_backends) +s3_account_public_access_block_query = S3AccountPublicAccessBlockConfigQuery( + s3_backends +) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 1f2ead639..3b33791c5 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from moto.core.exceptions import RESTError +from moto.core.exceptions import RESTError ERROR_WITH_BUCKET_NAME = """{% extends 'single_error' %} {% block extra %}{{ bucket }}{% endblock %} @@ -10,6 +10,15 @@ ERROR_WITH_KEY_NAME = """{% extends 'single_error' %} {% block extra %}{{ key_name }}{% endblock %} """ +ERROR_WITH_CONDITION_NAME = """{% extends 'single_error' %} +{% block extra %}{{ condition }}{% endblock %} +""" + +ERROR_WITH_RANGE = """{% extends 'single_error' %} +{% block extra %}{{ actual_size }} +{{ range_requested }}{% endblock %} +""" + class S3ClientError(RESTError): def __init__(self, *args, **kwargs): @@ -127,6 +136,18 @@ class InvalidRequest(S3ClientError): ) +class IllegalLocationConstraintException(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(IllegalLocationConstraintException, self).__init__( + "IllegalLocationConstraintException", + "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.", + *args, + **kwargs + ) + + class MalformedXML(S3ClientError): code = 400 @@ -347,3 +368,59 @@ class InvalidPublicAccessBlockConfiguration(S3ClientError): *args, **kwargs ) + + +class WrongPublicAccessBlockAccountIdError(S3ClientError): + code = 403 + + def __init__(self): + super(WrongPublicAccessBlockAccountIdError, self).__init__( + "AccessDenied", "Access Denied" + ) + + +class NoSystemTags(S3ClientError): + code = 400 + + def __init__(self): + super(NoSystemTags, self).__init__( + "InvalidTag", "System tags cannot be added/updated by requester" + ) + + +class NoSuchUpload(S3ClientError): + code = 404 + + def __init__(self): + super(NoSuchUpload, self).__init__( + "NoSuchUpload", "The specified multipart upload does not exist." + ) + + +class PreconditionFailed(S3ClientError): + code = 412 + + def __init__(self, failed_condition, **kwargs): + kwargs.setdefault("template", "condition_error") + self.templates["condition_error"] = ERROR_WITH_CONDITION_NAME + super(PreconditionFailed, self).__init__( + "PreconditionFailed", + "At least one of the pre-conditions you specified did not hold", + condition=failed_condition, + **kwargs + ) + + +class InvalidRange(S3ClientError): + code = 416 + + def __init__(self, range_requested, actual_size, **kwargs): + kwargs.setdefault("template", "range_error") + self.templates["range_error"] = ERROR_WITH_RANGE + super(InvalidRange, self).__init__( + "InvalidRange", + "The requested range is not satisfiable", + range_requested=range_requested, + actual_size=actual_size, + **kwargs + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index fe8e908ef..17282739a 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -5,6 +5,7 @@ import json import os import base64 import datetime +import pytz import hashlib import copy import itertools @@ -12,6 +13,7 @@ import codecs import random import string import tempfile +import threading import sys import time import uuid @@ -19,8 +21,10 @@ import uuid import six from bisect import insort -from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel, CloudFormationModel +from moto.core.utils import iso_8601_datetime_without_milliseconds_s3, rfc_1123_datetime +from moto.cloudwatch.models import MetricDatum +from moto.utilities.tagging_service import TaggingService from .exceptions import ( BucketAlreadyExists, MissingBucket, @@ -33,11 +37,13 @@ from .exceptions import ( MalformedXML, InvalidStorageClass, InvalidTargetBucketForLogging, - DuplicateTagKeys, CrossLocationLoggingProhibitted, NoSuchPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration, + WrongPublicAccessBlockAccountIdError, + NoSuchUpload, ) +from .cloud_formation import cfn_to_api_encryption, is_replacement_update from .utils import clean_key_name, _VersionedKeyStore MAX_BUCKET_NAME_LENGTH = 63 @@ -58,6 +64,13 @@ DEFAULT_TEXT_ENCODING = sys.getdefaultencoding() OWNER = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a" +def get_moto_s3_account_id(): + """This makes it easy for mocking AWS Account IDs when using AWS Config + -- Simply mock.patch the ACCOUNT_ID here, and Config gets it for free. + """ + return ACCOUNT_ID + + class FakeDeleteMarker(BaseModel): def __init__(self, key): self.key = key @@ -67,7 +80,7 @@ class FakeDeleteMarker(BaseModel): @property def last_modified_ISO8601(self): - return iso_8601_datetime_with_milliseconds(self.last_modified) + return iso_8601_datetime_without_milliseconds_s3(self.last_modified) @property def version_id(self): @@ -85,6 +98,7 @@ class FakeKey(BaseModel): version_id=0, max_buffer_size=DEFAULT_KEY_BUFFER_SIZE, multipart=None, + bucket_name=None, ): self.name = name self.last_modified = datetime.datetime.utcnow() @@ -96,12 +110,13 @@ class FakeKey(BaseModel): self._etag = etag self._version_id = version_id self._is_versioned = is_versioned - self._tagging = FakeTagging() self.multipart = multipart + self.bucket_name = bucket_name self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size) self._max_buffer_size = max_buffer_size self.value = value + self.lock = threading.Lock() @property def version_id(self): @@ -109,8 +124,19 @@ class FakeKey(BaseModel): @property def value(self): + self.lock.acquire() self._value_buffer.seek(0) - return self._value_buffer.read() + r = self._value_buffer.read() + r = copy.copy(r) + self.lock.release() + return r + + @property + def arn(self): + # S3 Objects don't have an ARN, but we do need something unique when creating tags against this resource + return "arn:aws:s3:::{}/{}/{}".format( + self.bucket_name, self.name, self.version_id + ) @value.setter def value(self, new_value): @@ -122,6 +148,7 @@ class FakeKey(BaseModel): if isinstance(new_value, six.text_type): new_value = new_value.encode(DEFAULT_TEXT_ENCODING) self._value_buffer.write(new_value) + self.contentsize = len(new_value) def copy(self, new_name=None, new_is_versioned=None): r = copy.deepcopy(self) @@ -137,18 +164,19 @@ class FakeKey(BaseModel): self._metadata = {} self._metadata.update(metadata) - def set_tagging(self, tagging): - self._tagging = tagging - def set_storage_class(self, storage): if storage is not None and storage not in STORAGE_CLASS: raise InvalidStorageClass(storage=storage) self._storage_class = storage + def set_expiry(self, expiry): + self._expiry = expiry + def set_acl(self, acl): self.acl = acl def append_to_value(self, value): + self.contentsize += len(value) self._value_buffer.seek(0, os.SEEK_END) self._value_buffer.write(value) @@ -182,7 +210,7 @@ class FakeKey(BaseModel): @property def last_modified_ISO8601(self): - return iso_8601_datetime_with_milliseconds(self.last_modified) + return iso_8601_datetime_without_milliseconds_s3(self.last_modified) @property def last_modified_RFC1123(self): @@ -194,10 +222,6 @@ class FakeKey(BaseModel): def metadata(self): return self._metadata - @property - def tagging(self): - return self._tagging - @property def response_dict(self): res = { @@ -221,8 +245,7 @@ class FakeKey(BaseModel): @property def size(self): - self._value_buffer.seek(0, os.SEEK_END) - return self._value_buffer.tell() + return self.contentsize @property def storage_class(self): @@ -241,6 +264,7 @@ class FakeKey(BaseModel): state = self.__dict__.copy() state["value"] = self.value del state["_value_buffer"] + del state["lock"] return state def __setstate__(self, state): @@ -250,6 +274,7 @@ class FakeKey(BaseModel): max_size=self._max_buffer_size ) self.value = state["value"] + self.lock = threading.Lock() class FakeMultipart(BaseModel): @@ -259,7 +284,9 @@ class FakeMultipart(BaseModel): self.parts = {} self.partlist = [] # ordered list of part ID's rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES)) - self.id = rand_b64.decode("utf-8").replace("=", "").replace("+", "") + self.id = ( + rand_b64.decode("utf-8").replace("=", "").replace("+", "").replace("/", "") + ) def complete(self, body): decode_hex = codecs.getdecoder("hex_codec") @@ -276,7 +303,7 @@ class FakeMultipart(BaseModel): etag = etag.replace('"', "") if part is None or part_etag != etag: raise InvalidPart() - if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE: + if last is not None and last.contentsize < UPLOAD_PART_MIN_SIZE: raise EntityTooSmall() md5s.extend(decode_hex(part_etag)[0]) total.extend(part.value) @@ -454,26 +481,10 @@ def get_canned_acl(acl): return FakeAcl(grants=grants) -class FakeTagging(BaseModel): - def __init__(self, tag_set=None): - self.tag_set = tag_set or FakeTagSet() - - -class FakeTagSet(BaseModel): - def __init__(self, tags=None): - self.tags = tags or [] - - -class FakeTag(BaseModel): - def __init__(self, key, value=None): - self.key = key - self.value = value - - class LifecycleFilter(BaseModel): def __init__(self, prefix=None, tag=None, and_filter=None): self.prefix = prefix - self.tag = tag + (self.tag_key, self.tag_value) = tag if tag else (None, None) self.and_filter = and_filter def to_config_dict(self): @@ -482,11 +493,11 @@ class LifecycleFilter(BaseModel): "predicate": {"type": "LifecyclePrefixPredicate", "prefix": self.prefix} } - elif self.tag: + elif self.tag_key: return { "predicate": { "type": "LifecycleTagPredicate", - "tag": {"key": self.tag.key, "value": self.tag.value}, + "tag": {"key": self.tag_key, "value": self.tag_value}, } } @@ -510,12 +521,9 @@ class LifecycleAndFilter(BaseModel): if self.prefix is not None: data.append({"type": "LifecyclePrefixPredicate", "prefix": self.prefix}) - for tag in self.tags: + for key, value in self.tags.items(): data.append( - { - "type": "LifecycleTagPredicate", - "tag": {"key": tag.key, "value": tag.value}, - } + {"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value},} ) return data @@ -759,7 +767,7 @@ class PublicAccessBlock(BaseModel): } -class FakeBucket(BaseModel): +class FakeBucket(CloudFormationModel): def __init__(self, name, region_name): self.name = name self.region_name = region_name @@ -770,19 +778,23 @@ class FakeBucket(BaseModel): self.policy = None self.website_configuration = None self.acl = get_canned_acl("private") - self.tags = FakeTagging() self.cors = [] self.logging = {} self.notification_configuration = None self.accelerate_configuration = None self.payer = "BucketOwner" - self.creation_date = datetime.datetime.utcnow() + self.creation_date = datetime.datetime.now(tz=pytz.utc) self.public_access_block = None + self.encryption = None @property def location(self): return self.region_name + @property + def creation_date_ISO8601(self): + return iso_8601_datetime_without_milliseconds_s3(self.creation_date) + @property def is_versioned(self): return self.versioning_status == "Enabled" @@ -862,7 +874,7 @@ class FakeBucket(BaseModel): and_filter = None if rule["Filter"].get("And"): filters += 1 - and_tags = [] + and_tags = {} if rule["Filter"]["And"].get("Tag"): if not isinstance(rule["Filter"]["And"]["Tag"], list): rule["Filter"]["And"]["Tag"] = [ @@ -870,7 +882,7 @@ class FakeBucket(BaseModel): ] for t in rule["Filter"]["And"]["Tag"]: - and_tags.append(FakeTag(t["Key"], t.get("Value", ""))) + and_tags[t["Key"]] = t.get("Value", "") try: and_prefix = ( @@ -884,7 +896,7 @@ class FakeBucket(BaseModel): filter_tag = None if rule["Filter"].get("Tag"): filters += 1 - filter_tag = FakeTag( + filter_tag = ( rule["Filter"]["Tag"]["Key"], rule["Filter"]["Tag"].get("Value", ""), ) @@ -971,16 +983,6 @@ class FakeBucket(BaseModel): def delete_cors(self): self.cors = [] - def set_tags(self, tagging): - self.tags = tagging - - def delete_tags(self): - self.tags = FakeTagging() - - @property - def tagging(self): - return self.tags - def set_logging(self, logging_config, bucket_backend): if not logging_config: self.logging = {} @@ -1059,26 +1061,107 @@ class FakeBucket(BaseModel): def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException - if attribute_name == "DomainName": - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"') + if attribute_name == "Arn": + return self.arn + elif attribute_name == "DomainName": + return self.domain_name + elif attribute_name == "DualStackDomainName": + return self.dual_stack_domain_name + elif attribute_name == "RegionalDomainName": + return self.regional_domain_name elif attribute_name == "WebsiteURL": - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"') + return self.website_url raise UnformattedGetAttTemplateException() def set_acl(self, acl): self.acl = acl + @property + def arn(self): + return "arn:aws:s3:::{}".format(self.name) + + @property + def domain_name(self): + return "{}.s3.amazonaws.com".format(self.name) + + @property + def dual_stack_domain_name(self): + return "{}.s3.dualstack.{}.amazonaws.com".format(self.name, self.region_name) + + @property + def regional_domain_name(self): + return "{}.s3.{}.amazonaws.com".format(self.name, self.region_name) + + @property + def website_url(self): + return "http://{}.s3-website.{}.amazonaws.com".format( + self.name, self.region_name + ) + @property def physical_resource_id(self): return self.name + @staticmethod + def cloudformation_name_type(): + return "BucketName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-s3-bucket.html + return "AWS::S3::Bucket" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): bucket = s3_backend.create_bucket(resource_name, region_name) + + properties = cloudformation_json.get("Properties", {}) + + if "BucketEncryption" in properties: + bucket_encryption = cfn_to_api_encryption(properties["BucketEncryption"]) + s3_backend.put_bucket_encryption( + bucket_name=resource_name, encryption=[bucket_encryption] + ) + return bucket + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + if "BucketEncryption" in properties: + bucket_encryption = cfn_to_api_encryption( + properties["BucketEncryption"] + ) + s3_backend.put_bucket_encryption( + bucket_name=original_resource.name, encryption=[bucket_encryption] + ) + return original_resource + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + s3_backend.delete_bucket(resource_name) + def to_config_dict(self): """Return the AWS Config JSON format of this S3 bucket. @@ -1093,7 +1176,7 @@ class FakeBucket(BaseModel): int(time.mktime(self.creation_date.timetuple())) ), # PY2 and 3 compatible "configurationItemMD5Hash": "", - "arn": "arn:aws:s3:::{}".format(self.name), + "arn": self.arn, "resourceType": "AWS::S3::Bucket", "resourceId": self.name, "resourceName": self.name, @@ -1102,7 +1185,7 @@ class FakeBucket(BaseModel): "resourceCreationTime": str(self.creation_date), "relatedEvents": [], "relationships": [], - "tags": {tag.key: tag.value for tag in self.tagging.tag_set.tags}, + "tags": s3_backend.tagger.get_tag_dict_for_resource(self.arn), "configuration": { "name": self.name, "owner": {"id": OWNER}, @@ -1163,6 +1246,43 @@ class FakeBucket(BaseModel): class S3Backend(BaseBackend): def __init__(self): self.buckets = {} + self.account_public_access_block = None + self.tagger = TaggingService() + + # TODO: This is broken! DO NOT IMPORT MUTABLE DATA TYPES FROM OTHER AREAS -- THIS BREAKS UNMOCKING! + # WRAP WITH A GETTER/SETTER FUNCTION + # Register this class as a CloudWatch Metric Provider + # Must provide a method 'get_cloudwatch_metrics' that will return a list of metrics, based on the data available + # metric_providers["S3"] = self + + def get_cloudwatch_metrics(self): + metrics = [] + for name, bucket in self.buckets.items(): + metrics.append( + MetricDatum( + namespace="AWS/S3", + name="BucketSizeBytes", + value=bucket.keys.item_size(), + dimensions=[ + {"Name": "StorageType", "Value": "StandardStorage"}, + {"Name": "BucketName", "Value": name}, + ], + timestamp=datetime.datetime.now(), + ) + ) + metrics.append( + MetricDatum( + namespace="AWS/S3", + name="NumberOfObjects", + value=len(bucket.keys), + dimensions=[ + {"Name": "StorageType", "Value": "AllStorageTypes"}, + {"Name": "BucketName", "Value": name}, + ], + timestamp=datetime.datetime.now(), + ) + ) + return metrics def create_bucket(self, bucket_name, region_name): if bucket_name in self.buckets: @@ -1196,6 +1316,9 @@ class S3Backend(BaseBackend): def get_bucket_versioning(self, bucket_name): return self.get_bucket(bucket_name).versioning_status + def get_bucket_encryption(self, bucket_name): + return self.get_bucket(bucket_name).encryption + def get_bucket_latest_versions(self, bucket_name): versions = self.get_bucket_versions(bucket_name) latest_modified_per_key = {} @@ -1244,6 +1367,12 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.policy = None + def put_bucket_encryption(self, bucket_name, encryption): + self.get_bucket(bucket_name).encryption = encryption + + def delete_bucket_encryption(self, bucket_name): + self.get_bucket(bucket_name).encryption = None + def set_bucket_lifecycle(self, bucket_name, rules): bucket = self.get_bucket(bucket_name) bucket.set_lifecycle(rules) @@ -1264,7 +1393,17 @@ class S3Backend(BaseBackend): return bucket.public_access_block - def set_key( + def get_account_public_access_block(self, account_id): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + if not self.account_public_access_block: + raise NoSuchPublicAccessBlockConfiguration() + + return self.account_public_access_block + + def set_object( self, bucket_name, key_name, value, storage=None, etag=None, multipart=None ): key_name = clean_key_name(key_name) @@ -1295,11 +1434,11 @@ class S3Backend(BaseBackend): def append_to_key(self, bucket_name, key_name, value): key_name = clean_key_name(key_name) - key = self.get_key(bucket_name, key_name) + key = self.get_object(bucket_name, key_name) key.append_to_value(value) return key - def get_key(self, bucket_name, key_name, version_id=None, part_number=None): + def get_object(self, bucket_name, key_name, version_id=None, part_number=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) key = None @@ -1322,23 +1461,32 @@ class S3Backend(BaseBackend): else: return None - def set_key_tagging(self, bucket_name, key_name, tagging, version_id=None): - key = self.get_key(bucket_name, key_name, version_id) + def get_key_tags(self, key): + return self.tagger.list_tags_for_resource(key.arn) + + def set_key_tags(self, key, tags, key_name=None): if key is None: raise MissingKey(key_name) - key.set_tagging(tagging) + self.tagger.delete_all_tags_for_resource(key.arn) + self.tagger.tag_resource( + key.arn, [{"Key": k, "Value": v} for (k, v) in tags.items()], + ) return key - def put_bucket_tagging(self, bucket_name, tagging): - tag_keys = [tag.key for tag in tagging.tag_set.tags] - if len(tag_keys) != len(set(tag_keys)): - raise DuplicateTagKeys() + def get_bucket_tagging(self, bucket_name): bucket = self.get_bucket(bucket_name) - bucket.set_tags(tagging) + return self.tagger.list_tags_for_resource(bucket.arn) + + def put_bucket_tagging(self, bucket_name, tags): + bucket = self.get_bucket(bucket_name) + self.tagger.delete_all_tags_for_resource(bucket.arn) + self.tagger.tag_resource( + bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()], + ) def delete_bucket_tagging(self, bucket_name): bucket = self.get_bucket(bucket_name) - bucket.delete_tags() + self.tagger.delete_all_tags_for_resource(bucket.arn) def put_bucket_cors(self, bucket_name, cors_rules): bucket = self.get_bucket(bucket_name) @@ -1356,6 +1504,13 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.public_access_block = None + def delete_account_public_access_block(self, account_id): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + self.account_public_access_block = None + def put_bucket_notification_configuration(self, bucket_name, notification_config): bucket = self.get_bucket(bucket_name) bucket.set_notification_configuration(notification_config) @@ -1384,6 +1539,21 @@ class S3Backend(BaseBackend): pub_block_config.get("RestrictPublicBuckets"), ) + def put_account_public_access_block(self, account_id, pub_block_config): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + if not pub_block_config: + raise InvalidPublicAccessBlockConfiguration() + + self.account_public_access_block = PublicAccessBlock( + pub_block_config.get("BlockPublicAcls"), + pub_block_config.get("IgnorePublicAcls"), + pub_block_config.get("BlockPublicPolicy"), + pub_block_config.get("RestrictPublicBuckets"), + ) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) @@ -1399,7 +1569,7 @@ class S3Backend(BaseBackend): return del bucket.multiparts[multipart_id] - key = self.set_key( + key = self.set_object( bucket_name, multipart.key_name, value, etag=etag, multipart=multipart ) key.set_metadata(multipart.metadata) @@ -1407,6 +1577,9 @@ class S3Backend(BaseBackend): def cancel_multipart(self, bucket_name, multipart_id): bucket = self.get_bucket(bucket_name) + multipart_data = bucket.multiparts.get(multipart_id, None) + if not multipart_data: + raise NoSuchUpload() del bucket.multiparts[multipart_id] def list_multipart(self, bucket_name, multipart_id): @@ -1436,7 +1609,7 @@ class S3Backend(BaseBackend): dest_bucket = self.get_bucket(dest_bucket_name) multipart = dest_bucket.multiparts[multipart_id] - src_value = self.get_key( + src_value = self.get_object( src_bucket_name, src_key_name, version_id=src_version_id ).value if start_byte is not None: @@ -1478,21 +1651,38 @@ class S3Backend(BaseBackend): def _set_delete_marker(self, bucket_name, key_name): bucket = self.get_bucket(bucket_name) - bucket.keys[key_name] = FakeDeleteMarker(key=bucket.keys[key_name]) + delete_marker = FakeDeleteMarker(key=bucket.keys[key_name]) + bucket.keys[key_name] = delete_marker + return delete_marker - def delete_key(self, bucket_name, key_name, version_id=None): + def delete_object_tagging(self, bucket_name, key_name, version_id=None): + key = self.get_object(bucket_name, key_name, version_id=version_id) + self.tagger.delete_all_tags_for_resource(key.arn) + + def delete_object(self, bucket_name, key_name, version_id=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) + response_meta = {} + try: if not bucket.is_versioned: bucket.keys.pop(key_name) else: if version_id is None: - self._set_delete_marker(bucket_name, key_name) + delete_marker = self._set_delete_marker(bucket_name, key_name) + response_meta["version-id"] = delete_marker.version_id else: if key_name not in bucket.keys: raise KeyError + + response_meta["delete-marker"] = "false" + for key in bucket.keys.getlist(key_name): + if str(key.version_id) == str(version_id): + if type(key) is FakeDeleteMarker: + response_meta["delete-marker"] = "true" + break + bucket.keys.setlist( key_name, [ @@ -1504,9 +1694,9 @@ class S3Backend(BaseBackend): if not bucket.keys.getlist(key_name): bucket.keys.pop(key_name) - return True + return True, response_meta except KeyError: - return False + return False, None def copy_key( self, @@ -1521,14 +1711,18 @@ class S3Backend(BaseBackend): src_key_name = clean_key_name(src_key_name) dest_key_name = clean_key_name(dest_key_name) dest_bucket = self.get_bucket(dest_bucket_name) - key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) + key = self.get_object(src_bucket_name, src_key_name, version_id=src_version_id) new_key = key.copy(dest_key_name, dest_bucket.is_versioned) + self.tagger.copy_tags(key.arn, new_key.arn) if storage is not None: new_key.set_storage_class(storage) if acl is not None: new_key.set_acl(acl) + if key.storage_class in "GLACIER": + # Object copied from Glacier object should not have expiry + new_key.set_expiry(None) dest_bucket.keys[dest_key_name] = new_key @@ -1540,5 +1734,17 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) return bucket.acl + def get_bucket_cors(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.cors + + def get_bucket_logging(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.logging + + def get_bucket_notification_configuration(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.notification_configuration + s3_backend = S3Backend() diff --git a/moto/s3/responses.py b/moto/s3/responses.py index a04427172..b01bed1fb 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -4,9 +4,10 @@ import re import sys import six +from botocore.awsrequest import AWSPreparedRequest from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys -from six.moves.urllib.parse import parse_qs, urlparse, unquote +from six.moves.urllib.parse import parse_qs, urlparse, unquote, parse_qsl import xmltodict @@ -23,15 +24,20 @@ from moto.s3bucket_path.utils import ( from .exceptions import ( BucketAlreadyExists, + DuplicateTagKeys, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder, MalformedXML, MalformedACLError, + IllegalLocationConstraintException, InvalidNotificationARN, InvalidNotificationEvent, ObjectNotInActiveTierError, + NoSystemTags, + PreconditionFailed, + InvalidRange, ) from .models import ( s3_backend, @@ -40,9 +46,6 @@ from .models import ( FakeGrant, FakeAcl, FakeKey, - FakeTagging, - FakeTagSet, - FakeTag, ) from .utils import ( bucket_name_from_url, @@ -122,16 +125,27 @@ ACTION_MAP = { "uploadId": "PutObject", }, }, + "CONTROL": { + "GET": {"publicAccessBlock": "GetPublicAccessBlock"}, + "PUT": {"publicAccessBlock": "PutPublicAccessBlock"}, + "DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"}, + }, } def parse_key_name(pth): - return pth.lstrip("/") + # strip the first '/' left by urlparse + return pth[1:] if pth.startswith("/") else pth def is_delete_keys(request, path, bucket_name): - return path == "/?delete" or ( - path == "/" and getattr(request, "query_string", "") == "delete" + # GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty) + # Python sends a request as url/?delete (treating it as a flag) + # https://github.com/spulec/moto/issues/2937 + return ( + path == "/?delete" + or path == "/?delete=" + or (path == "/" and getattr(request, "query_string", "") == "delete") ) @@ -167,7 +181,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): or host.startswith("localhost") or host.startswith("localstack") or re.match(r"^[^.]+$", host) - or re.match(r"^.*\.svc\.cluster\.local$", host) + or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host) ): # Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev), # (3) local host names that do not contain a "." (e.g., Docker container host names), or @@ -219,7 +233,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): # Depending on which calling format the client is using, we don't know # if this is a bucket or key request so we have to check if self.subdomain_based_buckets(request): - return self.key_response(request, full_url, headers) + return self.key_or_control_response(request, full_url, headers) else: # Using path-based buckets return self.bucket_response(request, full_url, headers) @@ -286,7 +300,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): return self._bucket_response_post(request, body, bucket_name) else: raise NotImplementedError( - "Method {0} has not been impelemented in the S3 backend yet".format( + "Method {0} has not been implemented in the S3 backend yet".format( method ) ) @@ -370,33 +384,35 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): template = self.response_template(S3_OBJECT_ACL_RESPONSE) return template.render(obj=bucket) elif "tagging" in querystring: - bucket = self.backend.get_bucket(bucket_name) + tags = self.backend.get_bucket_tagging(bucket_name)["Tags"] # "Special Error" if no tags: - if len(bucket.tagging.tag_set.tags) == 0: + if len(tags) == 0: template = self.response_template(S3_NO_BUCKET_TAGGING) return 404, {}, template.render(bucket_name=bucket_name) - template = self.response_template(S3_BUCKET_TAGGING_RESPONSE) - return template.render(bucket=bucket) + template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) + return template.render(tags=tags) elif "logging" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if not bucket.logging: + logging = self.backend.get_bucket_logging(bucket_name) + if not logging: template = self.response_template(S3_NO_LOGGING_CONFIG) return 200, {}, template.render() template = self.response_template(S3_LOGGING_CONFIG) - return 200, {}, template.render(logging=bucket.logging) + return 200, {}, template.render(logging=logging) elif "cors" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if len(bucket.cors) == 0: + cors = self.backend.get_bucket_cors(bucket_name) + if len(cors) == 0: template = self.response_template(S3_NO_CORS_CONFIG) return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_CORS_RESPONSE) - return template.render(bucket=bucket) + return template.render(cors=cors) elif "notification" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if not bucket.notification_configuration: + notification_configuration = self.backend.get_bucket_notification_configuration( + bucket_name + ) + if not notification_configuration: return 200, {}, "" template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) - return template.render(bucket=bucket) + return template.render(config=notification_configuration) elif "accelerate" in querystring: bucket = self.backend.get_bucket(bucket_name) if bucket.accelerate_configuration is None: @@ -454,6 +470,13 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): is_truncated="false", ), ) + elif "encryption" in querystring: + encryption = self.backend.get_bucket_encryption(bucket_name) + if not encryption: + template = self.response_template(S3_NO_ENCRYPTION) + return 404, {}, template.render(bucket_name=bucket_name) + template = self.response_template(S3_ENCRYPTION_CONFIG) + return 200, {}, template.render(encryption=encryption) elif querystring.get("list-type", [None])[0] == "2": return 200, {}, self._handle_list_objects_v2(bucket_name, querystring) @@ -585,6 +608,42 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): next_continuation_token = None return result_keys, is_truncated, next_continuation_token + def _body_contains_location_constraint(self, body): + if body: + try: + xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"] + return True + except KeyError: + pass + return False + + def _create_bucket_configuration_is_empty(self, body): + if body: + try: + create_bucket_configuration = xmltodict.parse(body)[ + "CreateBucketConfiguration" + ] + del create_bucket_configuration["@xmlns"] + if len(create_bucket_configuration) == 0: + return True + except KeyError: + pass + return False + + def _parse_pab_config(self, body): + parsed_xml = xmltodict.parse(body) + parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) + + # If Python 2, fix the unicode strings: + if sys.version_info[0] < 3: + parsed_xml = { + "PublicAccessBlockConfiguration": py2_strip_unicode_keys( + dict(parsed_xml["PublicAccessBlockConfiguration"]) + ) + } + + return parsed_xml + def _bucket_response_put( self, request, body, region_name, bucket_name, querystring ): @@ -663,27 +722,35 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): raise e elif "publicAccessBlock" in querystring: - parsed_xml = xmltodict.parse(body) - parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) - - # If Python 2, fix the unicode strings: - if sys.version_info[0] < 3: - parsed_xml = { - "PublicAccessBlockConfiguration": py2_strip_unicode_keys( - dict(parsed_xml["PublicAccessBlockConfiguration"]) - ) - } - + pab_config = self._parse_pab_config(body) self.backend.put_bucket_public_access_block( - bucket_name, parsed_xml["PublicAccessBlockConfiguration"] + bucket_name, pab_config["PublicAccessBlockConfiguration"] ) return "" - + elif "encryption" in querystring: + try: + self.backend.put_bucket_encryption( + bucket_name, self._encryption_config_from_xml(body) + ) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e else: + # us-east-1, the default AWS region behaves a bit differently + # - you should not use it as a location constraint --> it fails + # - querying the location constraint returns None + # - LocationConstraint has to be specified if outside us-east-1 + if ( + region_name != DEFAULT_REGION_NAME + and not self._body_contains_location_constraint(body) + ): + raise IllegalLocationConstraintException() if body: - # us-east-1, the default AWS region behaves a bit differently - # - you should not use it as a location constraint --> it fails - # - querying the location constraint returns None + if self._create_bucket_configuration_is_empty(body): + raise MalformedXML() + try: forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][ "LocationConstraint" @@ -737,6 +804,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): elif "publicAccessBlock" in querystring: self.backend.delete_bucket_public_access_block(bucket_name) return 204, {}, "" + elif "encryption" in querystring: + bucket = self.backend.delete_bucket_encryption(bucket_name) + return 204, {}, "" removed_bucket = self.backend.delete_bucket(bucket_name) @@ -750,6 +820,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): return 409, {}, template.render(bucket=removed_bucket) def _bucket_response_post(self, request, body, bucket_name): + response_headers = {} if not request.headers.get("Content-Length"): return 411, {}, "Content-Length required" @@ -771,11 +842,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): else: # HTTPretty, build new form object body = body.decode() - - form = {} - for kv in body.split("&"): - k, v = kv.split("=") - form[k] = v + form = dict(parse_qsl(body)) key = form["key"] if "file" in form: @@ -783,13 +850,27 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): else: f = request.files["file"].stream.read() - new_key = self.backend.set_key(bucket_name, key, f) + if "success_action_redirect" in form: + response_headers["Location"] = form["success_action_redirect"] + + if "success_action_status" in form: + status_code = form["success_action_status"] + elif "success_action_redirect" in form: + status_code = 303 + else: + status_code = 204 + + new_key = self.backend.set_object(bucket_name, key, f) + + if form.get("acl"): + acl = get_canned_acl(form.get("acl")) + new_key.set_acl(acl) # Metadata metadata = metadata_from_headers(form) new_key.set_metadata(metadata) - return 200, {}, "" + return status_code, response_headers, "" @staticmethod def _get_path(request): @@ -805,27 +886,35 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): def _bucket_response_delete_keys(self, request, body, bucket_name): template = self.response_template(S3_DELETE_KEYS_RESPONSE) + body_dict = xmltodict.parse(body) - keys = minidom.parseString(body).getElementsByTagName("Key") - deleted_names = [] - error_names = [] - if len(keys) == 0: + objects = body_dict["Delete"].get("Object", []) + if not isinstance(objects, list): + # We expect a list of objects, but when there is a single node xmltodict does not + # return a list. + objects = [objects] + if len(objects) == 0: raise MalformedXML() - for k in keys: - key_name = k.firstChild.nodeValue - success = self.backend.delete_key( - bucket_name, undo_clean_key_name(key_name) + deleted_objects = [] + error_names = [] + + for object_ in objects: + key_name = object_["Key"] + version_id = object_.get("VersionId", None) + + success, _ = self.backend.delete_object( + bucket_name, undo_clean_key_name(key_name), version_id=version_id ) if success: - deleted_names.append(key_name) + deleted_objects.append((key_name, version_id)) else: error_names.append(key_name) return ( 200, {}, - template.render(deleted=deleted_names, delete_errors=error_names), + template.render(deleted=deleted_objects, delete_errors=error_names), ) def _handle_range_header(self, request, headers, response_content): @@ -848,21 +937,31 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): else: return 400, response_headers, "" if begin < 0 or end > last or begin > min(end, last): - return 416, response_headers, "" + raise InvalidRange( + actual_size=str(length), range_requested=request.headers.get("range") + ) response_headers["content-range"] = "bytes {0}-{1}/{2}".format( begin, end, length ) - return 206, response_headers, response_content[begin : end + 1] + content = response_content[begin : end + 1] + response_headers["content-length"] = len(content) + return 206, response_headers, content - def key_response(self, request, full_url, headers): + def key_or_control_response(self, request, full_url, headers): + # Key and Control are lumped in because splitting out the regex is too much of a pain :/ self.method = request.method self.path = self._get_path(request) self.headers = request.headers if "host" not in self.headers: self.headers["host"] = urlparse(full_url).netloc response_headers = {} + try: - response = self._key_response(request, full_url, headers) + # Is this an S3 control response? + if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url: + response = self._control_response(request, full_url, headers) + else: + response = self._key_response(request, full_url, headers) except S3ClientError as s3error: response = s3error.code, {}, s3error.description @@ -873,11 +972,102 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): status_code, response_headers, response_content = response if status_code == 200 and "range" in request.headers: - return self._handle_range_header( - request, response_headers, response_content - ) + try: + return self._handle_range_header( + request, response_headers, response_content + ) + except S3ClientError as s3error: + return s3error.code, {}, s3error.description return status_code, response_headers, response_content + def _control_response(self, request, full_url, headers): + parsed_url = urlparse(full_url) + query = parse_qs(parsed_url.query, keep_blank_values=True) + method = request.method + + if hasattr(request, "body"): + # Boto + body = request.body + if hasattr(body, "read"): + body = body.read() + else: + # Flask server + body = request.data + if body is None: + body = b"" + + if method == "GET": + return self._control_response_get(request, query, headers) + elif method == "PUT": + return self._control_response_put(request, body, query, headers) + elif method == "DELETE": + return self._control_response_delete(request, query, headers) + else: + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format( + method + ) + ) + + def _control_response_get(self, request, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "GET", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + public_block_config = self.backend.get_account_public_access_block( + headers["x-amz-account-id"] + ) + template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION) + return ( + 200, + response_headers, + template.render(public_block_config=public_block_config), + ) + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + + def _control_response_put(self, request, body, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "PUT", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + pab_config = self._parse_pab_config(body) + self.backend.put_account_public_access_block( + headers["x-amz-account-id"], + pab_config["PublicAccessBlockConfiguration"], + ) + return 200, response_headers, "" + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + + def _control_response_delete(self, request, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "DELETE", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + self.backend.delete_account_public_access_block(headers["x-amz-account-id"]) + return 200, response_headers, "" + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + def _key_response(self, request, full_url, headers): parsed_url = urlparse(full_url) query = parse_qs(parsed_url.query, keep_blank_values=True) @@ -897,11 +1087,15 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): signed_url = "Signature=" in request.url elif hasattr(request, "requestline"): signed_url = "Signature=" in request.path - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) if key: if not key.acl.public_read and not signed_url: return 403, {}, "" + elif signed_url: + # coming in from requests.get(s3.generate_presigned_url()) + if self._invalid_headers(request.url, dict(request.headers)): + return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS if hasattr(request, "body"): # Boto @@ -911,6 +1105,11 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): else: # Flask server body = request.data + # when the data is being passed as a file + if request.files and not body: + for _, value in request.files.items(): + body = value.stream.read() + if body is None: body = b"" @@ -959,19 +1158,35 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): ) version_id = query.get("versionId", [None])[0] if_modified_since = headers.get("If-Modified-Since", None) - key = self.backend.get_key(bucket_name, key_name, version_id=version_id) + if_match = headers.get("If-Match", None) + if_none_match = headers.get("If-None-Match", None) + if_unmodified_since = headers.get("If-Unmodified-Since", None) + + key = self.backend.get_object(bucket_name, key_name, version_id=version_id) if key is None: raise MissingKey(key_name) + + if if_unmodified_since: + if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since) + if key.last_modified > if_unmodified_since: + raise PreconditionFailed("If-Unmodified-Since") + if if_match and key.etag != if_match: + raise PreconditionFailed("If-Match") + if if_modified_since: if_modified_since = str_to_rfc_1123_datetime(if_modified_since) - if if_modified_since and key.last_modified < if_modified_since: + if key.last_modified < if_modified_since: + return 304, response_headers, "Not Modified" + if if_none_match and key.etag == if_none_match: return 304, response_headers, "Not Modified" + if "acl" in query: template = self.response_template(S3_OBJECT_ACL_RESPONSE) return 200, response_headers, template.render(obj=key) if "tagging" in query: + tags = self.backend.get_key_tags(key)["Tags"] template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) - return 200, response_headers, template.render(obj=key) + return 200, response_headers, template.render(tags=tags) response_headers.update(key.metadata) response_headers.update(key.response_dict) @@ -1004,7 +1219,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): except ValueError: start_byte, end_byte = None, None - if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + if self.backend.get_object( + src_bucket, src_key, version_id=src_version_id + ): key = self.backend.copy_part( bucket_name, upload_id, @@ -1033,7 +1250,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): tagging = self._tagging_from_headers(request.headers) if "acl" in query: - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) # TODO: Support the XML-based ACL format key.set_acl(acl) return 200, response_headers, "" @@ -1043,8 +1260,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): version_id = query["versionId"][0] else: version_id = None + key = self.backend.get_object(bucket_name, key_name, version_id=version_id) tagging = self._tagging_from_xml(body) - self.backend.set_key_tagging(bucket_name, key_name, tagging, version_id) + self.backend.set_key_tags(key, tagging, key_name) return 200, response_headers, "" if "x-amz-copy-source" in request.headers: @@ -1060,11 +1278,19 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): ) src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0] - key = self.backend.get_key(src_bucket, src_key, version_id=src_version_id) + key = self.backend.get_object( + src_bucket, src_key, version_id=src_version_id + ) if key is not None: if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]: - raise ObjectNotInActiveTierError(key) + if key.response_dict.get( + "x-amz-restore" + ) is None or 'ongoing-request="true"' in key.response_dict.get( + "x-amz-restore" + ): + raise ObjectNotInActiveTierError(key) + self.backend.copy_key( src_bucket, src_key, @@ -1077,11 +1303,15 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): else: return 404, response_headers, "" - new_key = self.backend.get_key(bucket_name, key_name) + new_key = self.backend.get_object(bucket_name, key_name) mdirective = request.headers.get("x-amz-metadata-directive") if mdirective is not None and mdirective == "REPLACE": metadata = metadata_from_headers(request.headers) new_key.set_metadata(metadata, replace=True) + tdirective = request.headers.get("x-amz-tagging-directive") + if tdirective == "REPLACE": + tagging = self._tagging_from_headers(request.headers) + self.backend.set_key_tags(new_key, tagging) template = self.response_template(S3_OBJECT_COPY_RESPONSE) response_headers.update(new_key.response_dict) return 200, response_headers, template.render(key=new_key) @@ -1089,27 +1319,27 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): closing_connection = headers.get("connection") == "close" if closing_connection and streaming_request: # Closing the connection of a streaming request. No more data - new_key = self.backend.get_key(bucket_name, key_name) + new_key = self.backend.get_object(bucket_name, key_name) elif streaming_request: # Streaming request, more data new_key = self.backend.append_to_key(bucket_name, key_name, body) else: # Initial data - new_key = self.backend.set_key( + new_key = self.backend.set_object( bucket_name, key_name, body, storage=storage_class ) request.streaming = True metadata = metadata_from_headers(request.headers) + metadata.update(metadata_from_headers(query)) new_key.set_metadata(metadata) new_key.set_acl(acl) new_key.website_redirect_location = request.headers.get( "x-amz-website-redirect-location" ) - new_key.set_tagging(tagging) + self.backend.set_key_tags(new_key, tagging) - template = self.response_template(S3_OBJECT_RESPONSE) response_headers.update(new_key.response_dict) - return 200, response_headers, template.render(key=new_key) + return 200, response_headers, "" def _key_response_head(self, bucket_name, query, key_name, headers): response_headers = {} @@ -1119,20 +1349,32 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): part_number = int(part_number) if_modified_since = headers.get("If-Modified-Since", None) - if if_modified_since: - if_modified_since = str_to_rfc_1123_datetime(if_modified_since) + if_match = headers.get("If-Match", None) + if_none_match = headers.get("If-None-Match", None) + if_unmodified_since = headers.get("If-Unmodified-Since", None) - key = self.backend.get_key( + key = self.backend.get_object( bucket_name, key_name, version_id=version_id, part_number=part_number ) if key: response_headers.update(key.metadata) response_headers.update(key.response_dict) - if if_modified_since and key.last_modified < if_modified_since: + if if_unmodified_since: + if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since) + if key.last_modified > if_unmodified_since: + return 412, response_headers, "" + if if_match and key.etag != if_match: + return 412, response_headers, "" + + if if_modified_since: + if_modified_since = str_to_rfc_1123_datetime(if_modified_since) + if key.last_modified < if_modified_since: + return 304, response_headers, "Not Modified" + if if_none_match and key.etag == if_none_match: return 304, response_headers, "Not Modified" - else: - return 200, response_headers, "" + + return 200, response_headers, "" else: return 404, response_headers, "" @@ -1234,50 +1476,45 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): return None def _tagging_from_headers(self, headers): + tags = {} if headers.get("x-amz-tagging"): parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True) - tags = [] for tag in parsed_header.items(): - tags.append(FakeTag(tag[0], tag[1][0])) - - tag_set = FakeTagSet(tags) - tagging = FakeTagging(tag_set) - return tagging - else: - return FakeTagging() + tags[tag[0]] = tag[1][0] + return tags def _tagging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml, force_list={"Tag": True}) - tags = [] + tags = {} for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]: - tags.append(FakeTag(tag["Key"], tag["Value"])) + tags[tag["Key"]] = tag["Value"] - tag_set = FakeTagSet(tags) - tagging = FakeTagging(tag_set) - return tagging + return tags def _bucket_tagging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) - tags = [] + tags = {} # Optional if no tags are being sent: if parsed_xml["Tagging"].get("TagSet"): # If there is only 1 tag, then it's not a list: if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list): - tags.append( - FakeTag( - parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"], - parsed_xml["Tagging"]["TagSet"]["Tag"]["Value"], - ) - ) + tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[ + "Tagging" + ]["TagSet"]["Tag"]["Value"] else: for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]: - tags.append(FakeTag(tag["Key"], tag["Value"])) + if tag["Key"] in tags: + raise DuplicateTagKeys() + tags[tag["Key"]] = tag["Value"] - tag_set = FakeTagSet(tags) - tagging = FakeTagging(tag_set) - return tagging + # Verify that "aws:" is not in the tags. If so, then this is a problem: + for key, _ in tags.items(): + if key.startswith("aws:"): + raise NoSystemTags() + + return tags def _cors_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) @@ -1287,6 +1524,22 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): return [parsed_xml["CORSConfiguration"]["CORSRule"]] + def _encryption_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if ( + not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule") + or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get( + "ApplyServerSideEncryptionByDefault" + ) + or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][ + "ApplyServerSideEncryptionByDefault" + ].get("SSEAlgorithm") + ): + raise MalformedXML() + + return [parsed_xml["ServerSideEncryptionConfiguration"]] + def _logging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) @@ -1421,9 +1674,20 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): self.backend.cancel_multipart(bucket_name, upload_id) return 204, {}, "" version_id = query.get("versionId", [None])[0] - self.backend.delete_key(bucket_name, key_name, version_id=version_id) - template = self.response_template(S3_DELETE_OBJECT_SUCCESS) - return 204, {}, template.render() + if "tagging" in query: + self.backend.delete_object_tagging( + bucket_name, key_name, version_id=version_id + ) + template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE) + return 204, {}, template.render(version_id=version_id) + success, response_meta = self.backend.delete_object( + bucket_name, key_name, version_id=version_id + ) + response_headers = {} + if response_meta is not None: + for k in response_meta: + response_headers["x-amz-{}".format(k)] = response_meta[k] + return 204, response_headers, "" def _complete_multipart_body(self, body): ps = minidom.parseString(body).getElementsByTagName("Part") @@ -1459,7 +1723,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): elif "restore" in query: es = minidom.parseString(body).getElementsByTagName("Days") days = es[0].childNodes[0].wholeText - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) r = 202 if key.expiry_date is not None: r = 200 @@ -1470,6 +1734,29 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): "Method POST had only been implemented for multipart uploads and restore operations, so far" ) + def _invalid_headers(self, url, headers): + """ + Verify whether the provided metadata in the URL is also present in the headers + :param url: .../file.txt&content-type=app%2Fjson&Signature=.. + :param headers: Content-Type=app/json + :return: True or False + """ + metadata_to_check = { + "content-disposition": "Content-Disposition", + "content-encoding": "Content-Encoding", + "content-language": "Content-Language", + "content-length": "Content-Length", + "content-md5": "Content-MD5", + "content-type": "Content-Type", + } + for url_key, header_key in metadata_to_check.items(): + metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url) + if metadata_in_url: + url_value = unquote(metadata_in_url.group(1)) + if header_key not in headers or (url_value != headers[header_key]): + return True + return False + S3ResponseInstance = ResponseObject(s3_backend) @@ -1482,7 +1769,7 @@ S3_ALL_BUCKETS = """ {{ prefix }} {% endif %} {{ max_keys }} - {{ delimiter }} + {% if delimiter %} + {{ delimiter }} + {% endif %} {{ is_truncated }} {% if next_marker %} {{ next_marker }} @@ -1598,10 +1887,10 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% if rule.filter.prefix != None %} {{ rule.filter.prefix }} {% endif %} - {% if rule.filter.tag %} + {% if rule.filter.tag_key %} - {{ rule.filter.tag.key }} - {{ rule.filter.tag.value }} + {{ rule.filter.tag_key }} + {{ rule.filter.tag_value }} {% endif %} {% if rule.filter.and_filter %} @@ -1609,10 +1898,10 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% if rule.filter.and_filter.prefix != None %} {{ rule.filter.and_filter.prefix }} {% endif %} - {% for tag in rule.filter.and_filter.tags %} + {% for key, value in rule.filter.and_filter.tags.items() %} - {{ tag.key }} - {{ tag.value }} + {{ key }} + {{ value }} {% endfor %} @@ -1726,9 +2015,10 @@ S3_BUCKET_GET_VERSIONS = """ S3_DELETE_KEYS_RESPONSE = """ -{% for k in deleted %} +{% for k, v in deleted %} {{k}} +{% if v %}{{v}}{% endif %} {% endfor %} {% for k in delete_errors %} @@ -1738,19 +2028,11 @@ S3_DELETE_KEYS_RESPONSE = """ {% endfor %} """ -S3_DELETE_OBJECT_SUCCESS = """ - - 200 - OK - -""" - -S3_OBJECT_RESPONSE = """ - - {{ key.etag }} - {{ key.last_modified_ISO8601 }} - - """ +S3_DELETE_KEY_TAGGING_RESPONSE = """ + +{{version_id}} + +""" S3_OBJECT_ACL_RESPONSE = """ @@ -1787,22 +2069,10 @@ S3_OBJECT_TAGGING_RESPONSE = """\ - {% for tag in obj.tagging.tag_set.tags %} + {% for tag in tags %} - {{ tag.key }} - {{ tag.value }} - - {% endfor %} - -""" - -S3_BUCKET_TAGGING_RESPONSE = """ - - - {% for tag in bucket.tagging.tag_set.tags %} - - {{ tag.key }} - {{ tag.value }} + {{ tag.Key }} + {{ tag.Value }} {% endfor %} @@ -1810,7 +2080,7 @@ S3_BUCKET_TAGGING_RESPONSE = """ S3_BUCKET_CORS_RESPONSE = """ - {% for cors in bucket.cors %} + {% for cors in cors %} {% for origin in cors.allowed_origins %} {{ origin }} @@ -2016,9 +2286,43 @@ S3_NO_LOGGING_CONFIG = """ """ +S3_ENCRYPTION_CONFIG = """ + + {% for entry in encryption %} + + + {{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }} + {% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %} + {{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }} + {% endif %} + + + {% endfor %} + +""" + +S3_INVALID_PRESIGNED_PARAMETERS = """ + + SignatureDoesNotMatch + The request signature we calculated does not match the signature you provided. Check your key and signing method. + 0D68A23BB2E2215B + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" + +S3_NO_ENCRYPTION = """ + + ServerSideEncryptionConfigurationNotFoundError + The server side encryption configuration was not found + {{ bucket_name }} + 0D68A23BB2E2215B + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" + S3_GET_BUCKET_NOTIFICATION_CONFIG = """ - {% for topic in bucket.notification_configuration.topic %} + {% for topic in config.topic %} {{ topic.id }} {{ topic.arn }} @@ -2039,7 +2343,7 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """ {% endif %} {% endfor %} - {% for queue in bucket.notification_configuration.queue %} + {% for queue in config.queue %} {{ queue.id }} {{ queue.arn }} @@ -2060,7 +2364,7 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """ {% endif %} {% endfor %} - {% for cf in bucket.notification_configuration.cloud_function %} + {% for cf in config.cloud_function %} {{ cf.id }} {{ cf.arn }} diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 7241dbef1..4c4e9ea76 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -13,7 +13,7 @@ url_paths = { # subdomain key of path-based bucket "{0}/(?P[^/]+)/?$": S3ResponseInstance.ambiguous_response, # path-based bucket + key - "{0}/(?P[^/]+)/(?P.+)": S3ResponseInstance.key_response, + "{0}/(?P[^/]+)/(?P.+)": S3ResponseInstance.key_or_control_response, # subdomain bucket + key with empty first part of path - "{0}//(?P.*)$": S3ResponseInstance.key_response, + "{0}/(?P/.*)$": S3ResponseInstance.key_or_control_response, } diff --git a/moto/s3/utils.py b/moto/s3/utils.py index e7d9e5580..d89997dfd 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -2,10 +2,10 @@ from __future__ import unicode_literals import logging import os -from boto.s3.key import Key import re import six from six.moves.urllib.parse import urlparse, unquote, quote +from requests.structures import CaseInsensitiveDict import sys @@ -13,6 +13,16 @@ log = logging.getLogger(__name__) bucket_name_regex = re.compile("(.+).s3(.*).amazonaws.com") +user_settable_fields = { + "content-md5", + "content-language", + "content-type", + "content-encoding", + "cache-control", + "expires", + "content-disposition", + "x-robots-tag", +} def bucket_name_from_url(url): @@ -35,9 +45,20 @@ def bucket_name_from_url(url): return None +# 'owi-common-cf', 'snippets/test.json' = bucket_and_name_from_url('s3://owi-common-cf/snippets/test.json') +def bucket_and_name_from_url(url): + prefix = "s3://" + if url.startswith(prefix): + bucket_name = url[len(prefix) : url.index("/", len(prefix))] + key = url[url.index("/", len(prefix)) + 1 :] + return bucket_name, key + else: + return None, None + + REGION_URL_REGEX = re.compile( r"^https?://(s3[-\.](?P.+)\.amazonaws\.com/(.+)|" - r"(.+)\.s3-(?P.+)\.amazonaws\.com)/?" + r"(.+)\.s3[-\.](?P.+)\.amazonaws\.com)/?" ) @@ -51,8 +72,8 @@ def parse_region_from_url(url): def metadata_from_headers(headers): - metadata = {} - meta_regex = re.compile("^x-amz-meta-([a-zA-Z0-9\-_]+)$", flags=re.IGNORECASE) + metadata = CaseInsensitiveDict() + meta_regex = re.compile(r"^x-amz-meta-([a-zA-Z0-9\-_]+)$", flags=re.IGNORECASE) for header, value in headers.items(): if isinstance(header, six.string_types): result = meta_regex.match(header) @@ -60,11 +81,15 @@ def metadata_from_headers(headers): if result: # Check for extra metadata meta_key = result.group(0).lower() - elif header.lower() in Key.base_user_settable_fields: + elif header.lower() in user_settable_fields: # Check for special metadata that doesn't start with x-amz-meta meta_key = header if meta_key: - metadata[meta_key] = headers[header] + metadata[meta_key] = ( + headers[header][0] + if type(headers[header]) == list + else headers[header] + ) return metadata @@ -82,7 +107,7 @@ def undo_clean_key_name(key_name): class _VersionedKeyStore(dict): - """ A simplified/modified version of Django's `MultiValueDict` taken from: + """A simplified/modified version of Django's `MultiValueDict` taken from: https://github.com/django/django/blob/70576740b0bb5289873f5a9a9a4e1a26b2c330e5/django/utils/datastructures.py#L282 """ @@ -135,6 +160,12 @@ class _VersionedKeyStore(dict): for key in self: yield key, self.getlist(key) + def item_size(self): + size = 0 + for val in self.values(): + size += sys.getsizeof(val) + return size + items = iteritems = _iteritems lists = iterlists = _iterlists values = itervalues = _itervalues diff --git a/moto/sagemaker/__init__.py b/moto/sagemaker/__init__.py new file mode 100644 index 000000000..85e635380 --- /dev/null +++ b/moto/sagemaker/__init__.py @@ -0,0 +1,5 @@ +from __future__ import unicode_literals +from .models import sagemaker_backends + +sagemaker_backend = sagemaker_backends["us-east-1"] +mock_sagemaker = sagemaker_backend.decorator diff --git a/moto/sagemaker/exceptions.py b/moto/sagemaker/exceptions.py new file mode 100644 index 000000000..0331fee89 --- /dev/null +++ b/moto/sagemaker/exceptions.py @@ -0,0 +1,34 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError, JsonRESTError + +ERROR_WITH_MODEL_NAME = """{% extends 'single_error' %} +{% block extra %}{{ model }}{% endblock %} +""" + + +class SagemakerClientError(RESTError): + def __init__(self, *args, **kwargs): + kwargs.setdefault("template", "single_error") + self.templates["model_error"] = ERROR_WITH_MODEL_NAME + super(SagemakerClientError, self).__init__(*args, **kwargs) + + +class ModelError(RESTError): + def __init__(self, *args, **kwargs): + kwargs.setdefault("template", "model_error") + self.templates["model_error"] = ERROR_WITH_MODEL_NAME + super(ModelError, self).__init__(*args, **kwargs) + + +class MissingModel(ModelError): + code = 404 + + def __init__(self, *args, **kwargs): + super(MissingModel, self).__init__( + "NoSuchModel", "Could not find model", *args, **kwargs + ) + + +class ValidationError(JsonRESTError): + def __init__(self, message, **kwargs): + super(ValidationError, self).__init__("ValidationException", message, **kwargs) diff --git a/moto/sagemaker/models.py b/moto/sagemaker/models.py new file mode 100644 index 000000000..f53cc3eec --- /dev/null +++ b/moto/sagemaker/models.py @@ -0,0 +1,940 @@ +from __future__ import unicode_literals + +import os +from boto3 import Session +from copy import deepcopy +from datetime import datetime + +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel +from moto.core.exceptions import RESTError +from moto.sagemaker import validators +from .exceptions import MissingModel, ValidationError + + +class BaseObject(BaseModel): + def camelCase(self, key): + words = [] + for i, word in enumerate(key.split("_")): + words.append(word.title()) + return "".join(words) + + def gen_response_object(self): + response_object = dict() + for key, value in self.__dict__.items(): + if "_" in key: + response_object[self.camelCase(key)] = value + else: + response_object[key[0].upper() + key[1:]] = value + return response_object + + @property + def response_object(self): + return self.gen_response_object() + + +class FakeTrainingJob(BaseObject): + def __init__( + self, + region_name, + training_job_name, + hyper_parameters, + algorithm_specification, + role_arn, + input_data_config, + output_data_config, + resource_config, + vpc_config, + stopping_condition, + tags, + enable_network_isolation, + enable_inter_container_traffic_encryption, + enable_managed_spot_training, + checkpoint_config, + debug_hook_config, + debug_rule_configurations, + tensor_board_output_config, + experiment_config, + ): + self.training_job_name = training_job_name + self.hyper_parameters = hyper_parameters + self.algorithm_specification = algorithm_specification + self.role_arn = role_arn + self.input_data_config = input_data_config + self.output_data_config = output_data_config + self.resource_config = resource_config + self.vpc_config = vpc_config + self.stopping_condition = stopping_condition + self.tags = tags + self.enable_network_isolation = enable_network_isolation + self.enable_inter_container_traffic_encryption = ( + enable_inter_container_traffic_encryption + ) + self.enable_managed_spot_training = enable_managed_spot_training + self.checkpoint_config = checkpoint_config + self.debug_hook_config = debug_hook_config + self.debug_rule_configurations = debug_rule_configurations + self.tensor_board_output_config = tensor_board_output_config + self.experiment_config = experiment_config + self.training_job_arn = FakeTrainingJob.arn_formatter( + training_job_name, region_name + ) + self.creation_time = self.last_modified_time = datetime.now().strftime( + "%Y-%m-%d %H:%M:%S" + ) + self.model_artifacts = { + "S3ModelArtifacts": os.path.join( + self.output_data_config["S3OutputPath"], + self.training_job_name, + "output", + "model.tar.gz", + ) + } + self.training_job_status = "Completed" + self.secondary_status = "Completed" + self.algorithm_specification["MetricDefinitions"] = [ + { + "Name": "test:dcg", + "Regex": "#quality_metric: host=\\S+, test dcg =(\\S+)", + } + ] + now_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + self.creation_time = now_string + self.last_modified_time = now_string + self.training_start_time = now_string + self.training_end_time = now_string + self.secondary_status_transitions = [ + { + "Status": "Starting", + "StartTime": self.creation_time, + "EndTime": self.creation_time, + "StatusMessage": "Preparing the instances for training", + } + ] + self.final_metric_data_list = [ + { + "MetricName": "train:progress", + "Value": 100.0, + "Timestamp": self.creation_time, + } + ] + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"TrainingJobArn": self.training_job_arn} + + @staticmethod + def arn_formatter(endpoint_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":training-job/" + + endpoint_name + ) + + +class FakeEndpoint(BaseObject): + def __init__( + self, + region_name, + endpoint_name, + endpoint_config_name, + production_variants, + data_capture_config, + tags, + ): + self.endpoint_name = endpoint_name + self.endpoint_arn = FakeEndpoint.arn_formatter(endpoint_name, region_name) + self.endpoint_config_name = endpoint_config_name + self.production_variants = production_variants + self.data_capture_config = data_capture_config + self.tags = tags or [] + self.endpoint_status = "InService" + self.failure_reason = None + self.creation_time = self.last_modified_time = datetime.now().strftime( + "%Y-%m-%d %H:%M:%S" + ) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"EndpointArn": self.endpoint_arn} + + @staticmethod + def arn_formatter(endpoint_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":endpoint/" + + endpoint_name + ) + + +class FakeEndpointConfig(BaseObject): + def __init__( + self, + region_name, + endpoint_config_name, + production_variants, + data_capture_config, + tags, + kms_key_id, + ): + self.validate_production_variants(production_variants) + + self.endpoint_config_name = endpoint_config_name + self.endpoint_config_arn = FakeEndpointConfig.arn_formatter( + endpoint_config_name, region_name + ) + self.production_variants = production_variants or [] + self.data_capture_config = data_capture_config or {} + self.tags = tags or [] + self.kms_key_id = kms_key_id + self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + def validate_production_variants(self, production_variants): + for production_variant in production_variants: + self.validate_instance_type(production_variant["InstanceType"]) + + def validate_instance_type(self, instance_type): + VALID_INSTANCE_TYPES = [ + "ml.r5d.12xlarge", + "ml.r5.12xlarge", + "ml.p2.xlarge", + "ml.m5.4xlarge", + "ml.m4.16xlarge", + "ml.r5d.24xlarge", + "ml.r5.24xlarge", + "ml.p3.16xlarge", + "ml.m5d.xlarge", + "ml.m5.large", + "ml.t2.xlarge", + "ml.p2.16xlarge", + "ml.m5d.12xlarge", + "ml.inf1.2xlarge", + "ml.m5d.24xlarge", + "ml.c4.2xlarge", + "ml.c5.2xlarge", + "ml.c4.4xlarge", + "ml.inf1.6xlarge", + "ml.c5d.2xlarge", + "ml.c5.4xlarge", + "ml.g4dn.xlarge", + "ml.g4dn.12xlarge", + "ml.c5d.4xlarge", + "ml.g4dn.2xlarge", + "ml.c4.8xlarge", + "ml.c4.large", + "ml.c5d.xlarge", + "ml.c5.large", + "ml.g4dn.4xlarge", + "ml.c5.9xlarge", + "ml.g4dn.16xlarge", + "ml.c5d.large", + "ml.c5.xlarge", + "ml.c5d.9xlarge", + "ml.c4.xlarge", + "ml.inf1.xlarge", + "ml.g4dn.8xlarge", + "ml.inf1.24xlarge", + "ml.m5d.2xlarge", + "ml.t2.2xlarge", + "ml.c5d.18xlarge", + "ml.m5d.4xlarge", + "ml.t2.medium", + "ml.c5.18xlarge", + "ml.r5d.2xlarge", + "ml.r5.2xlarge", + "ml.p3.2xlarge", + "ml.m5d.large", + "ml.m5.xlarge", + "ml.m4.10xlarge", + "ml.t2.large", + "ml.r5d.4xlarge", + "ml.r5.4xlarge", + "ml.m5.12xlarge", + "ml.m4.xlarge", + "ml.m5.24xlarge", + "ml.m4.2xlarge", + "ml.p2.8xlarge", + "ml.m5.2xlarge", + "ml.r5d.xlarge", + "ml.r5d.large", + "ml.r5.xlarge", + "ml.r5.large", + "ml.p3.8xlarge", + "ml.m4.4xlarge", + ] + if not validators.is_one_of(instance_type, VALID_INSTANCE_TYPES): + message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format( + instance_type, VALID_INSTANCE_TYPES + ) + raise ValidationError(message=message) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"EndpointConfigArn": self.endpoint_config_arn} + + @staticmethod + def arn_formatter(model_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":endpoint-config/" + + model_name + ) + + +class Model(BaseObject): + def __init__( + self, + region_name, + model_name, + execution_role_arn, + primary_container, + vpc_config, + containers=[], + tags=[], + ): + self.model_name = model_name + self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + self.containers = containers + self.tags = tags + self.enable_network_isolation = False + self.vpc_config = vpc_config + self.primary_container = primary_container + self.execution_role_arn = execution_role_arn or "arn:test" + self.model_arn = self.arn_for_model_name(self.model_name, region_name) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"ModelArn": self.model_arn} + + @staticmethod + def arn_for_model_name(model_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":model/" + + model_name + ) + + +class VpcConfig(BaseObject): + def __init__(self, security_group_ids, subnets): + self.security_group_ids = security_group_ids + self.subnets = subnets + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + +class Container(BaseObject): + def __init__(self, **kwargs): + self.container_hostname = kwargs.get("container_hostname", "localhost") + self.model_data_url = kwargs.get("data_url", "") + self.model_package_name = kwargs.get("package_name", "pkg") + self.image = kwargs.get("image", "") + self.environment = kwargs.get("environment", {}) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + +class FakeSagemakerNotebookInstance: + def __init__( + self, + region_name, + notebook_instance_name, + instance_type, + role_arn, + subnet_id, + security_group_ids, + kms_key_id, + tags, + lifecycle_config_name, + direct_internet_access, + volume_size_in_gb, + accelerator_types, + default_code_repository, + additional_code_repositories, + root_access, + ): + self.validate_volume_size_in_gb(volume_size_in_gb) + self.validate_instance_type(instance_type) + + self.region_name = region_name + self.notebook_instance_name = notebook_instance_name + self.instance_type = instance_type + self.role_arn = role_arn + self.subnet_id = subnet_id + self.security_group_ids = security_group_ids + self.kms_key_id = kms_key_id + self.tags = tags or [] + self.lifecycle_config_name = lifecycle_config_name + self.direct_internet_access = direct_internet_access + self.volume_size_in_gb = volume_size_in_gb + self.accelerator_types = accelerator_types + self.default_code_repository = default_code_repository + self.additional_code_repositories = additional_code_repositories + self.root_access = root_access + self.status = None + self.creation_time = self.last_modified_time = datetime.now() + self.start() + + def validate_volume_size_in_gb(self, volume_size_in_gb): + if not validators.is_integer_between(volume_size_in_gb, mn=5, optional=True): + message = "Invalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf" + raise ValidationError(message=message) + + def validate_instance_type(self, instance_type): + VALID_INSTANCE_TYPES = [ + "ml.p2.xlarge", + "ml.m5.4xlarge", + "ml.m4.16xlarge", + "ml.t3.xlarge", + "ml.p3.16xlarge", + "ml.t2.xlarge", + "ml.p2.16xlarge", + "ml.c4.2xlarge", + "ml.c5.2xlarge", + "ml.c4.4xlarge", + "ml.c5d.2xlarge", + "ml.c5.4xlarge", + "ml.c5d.4xlarge", + "ml.c4.8xlarge", + "ml.c5d.xlarge", + "ml.c5.9xlarge", + "ml.c5.xlarge", + "ml.c5d.9xlarge", + "ml.c4.xlarge", + "ml.t2.2xlarge", + "ml.c5d.18xlarge", + "ml.t3.2xlarge", + "ml.t3.medium", + "ml.t2.medium", + "ml.c5.18xlarge", + "ml.p3.2xlarge", + "ml.m5.xlarge", + "ml.m4.10xlarge", + "ml.t2.large", + "ml.m5.12xlarge", + "ml.m4.xlarge", + "ml.t3.large", + "ml.m5.24xlarge", + "ml.m4.2xlarge", + "ml.p2.8xlarge", + "ml.m5.2xlarge", + "ml.p3.8xlarge", + "ml.m4.4xlarge", + ] + if not validators.is_one_of(instance_type, VALID_INSTANCE_TYPES): + message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format( + instance_type, VALID_INSTANCE_TYPES + ) + raise ValidationError(message=message) + + @property + def arn(self): + return ( + "arn:aws:sagemaker:" + + self.region_name + + ":" + + str(ACCOUNT_ID) + + ":notebook-instance/" + + self.notebook_instance_name + ) + + @property + def url(self): + return "{}.notebook.{}.sagemaker.aws".format( + self.notebook_instance_name, self.region_name + ) + + def start(self): + self.status = "InService" + + @property + def is_deletable(self): + return self.status in ["Stopped", "Failed"] + + def stop(self): + self.status = "Stopped" + + +class FakeSageMakerNotebookInstanceLifecycleConfig(BaseObject): + def __init__( + self, region_name, notebook_instance_lifecycle_config_name, on_create, on_start + ): + self.region_name = region_name + self.notebook_instance_lifecycle_config_name = ( + notebook_instance_lifecycle_config_name + ) + self.on_create = on_create + self.on_start = on_start + self.creation_time = self.last_modified_time = datetime.now().strftime( + "%Y-%m-%d %H:%M:%S" + ) + self.notebook_instance_lifecycle_config_arn = FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + self.notebook_instance_lifecycle_config_name, self.region_name + ) + + @staticmethod + def arn_formatter(notebook_instance_lifecycle_config_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":notebook-instance-lifecycle-configuration/" + + notebook_instance_lifecycle_config_name + ) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"TrainingJobArn": self.training_job_arn} + + +class SageMakerModelBackend(BaseBackend): + def __init__(self, region_name=None): + self._models = {} + self.notebook_instances = {} + self.endpoint_configs = {} + self.endpoints = {} + self.training_jobs = {} + self.notebook_instance_lifecycle_configurations = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_model(self, **kwargs): + model_obj = Model( + region_name=self.region_name, + model_name=kwargs.get("ModelName"), + execution_role_arn=kwargs.get("ExecutionRoleArn"), + primary_container=kwargs.get("PrimaryContainer", {}), + vpc_config=kwargs.get("VpcConfig", {}), + containers=kwargs.get("Containers", []), + tags=kwargs.get("Tags", []), + ) + + self._models[kwargs.get("ModelName")] = model_obj + return model_obj.response_create + + def describe_model(self, model_name=None): + model = self._models.get(model_name) + if model: + return model.response_object + message = "Could not find model '{}'.".format( + Model.arn_for_model_name(model_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", message=message, template="error_json", + ) + + def list_models(self): + models = [] + for model in self._models.values(): + model_response = deepcopy(model.response_object) + models.append(model_response) + return {"Models": models} + + def delete_model(self, model_name=None): + for model in self._models.values(): + if model.model_name == model_name: + self._models.pop(model.model_name) + break + else: + raise MissingModel(model=model_name) + + def create_notebook_instance( + self, + notebook_instance_name, + instance_type, + role_arn, + subnet_id=None, + security_group_ids=None, + kms_key_id=None, + tags=None, + lifecycle_config_name=None, + direct_internet_access="Enabled", + volume_size_in_gb=5, + accelerator_types=None, + default_code_repository=None, + additional_code_repositories=None, + root_access=None, + ): + self._validate_unique_notebook_instance_name(notebook_instance_name) + + notebook_instance = FakeSagemakerNotebookInstance( + region_name=self.region_name, + notebook_instance_name=notebook_instance_name, + instance_type=instance_type, + role_arn=role_arn, + subnet_id=subnet_id, + security_group_ids=security_group_ids, + kms_key_id=kms_key_id, + tags=tags, + lifecycle_config_name=lifecycle_config_name, + direct_internet_access=direct_internet_access + if direct_internet_access is not None + else "Enabled", + volume_size_in_gb=volume_size_in_gb if volume_size_in_gb is not None else 5, + accelerator_types=accelerator_types, + default_code_repository=default_code_repository, + additional_code_repositories=additional_code_repositories, + root_access=root_access, + ) + self.notebook_instances[notebook_instance_name] = notebook_instance + return notebook_instance + + def _validate_unique_notebook_instance_name(self, notebook_instance_name): + if notebook_instance_name in self.notebook_instances: + duplicate_arn = self.notebook_instances[notebook_instance_name].arn + message = "Cannot create a duplicate Notebook Instance ({})".format( + duplicate_arn + ) + raise ValidationError(message=message) + + def get_notebook_instance(self, notebook_instance_name): + try: + return self.notebook_instances[notebook_instance_name] + except KeyError: + raise ValidationError(message="RecordNotFound") + + def get_notebook_instance_by_arn(self, arn): + instances = [ + notebook_instance + for notebook_instance in self.notebook_instances.values() + if notebook_instance.arn == arn + ] + if len(instances) == 0: + raise ValidationError(message="RecordNotFound") + return instances[0] + + def start_notebook_instance(self, notebook_instance_name): + notebook_instance = self.get_notebook_instance(notebook_instance_name) + notebook_instance.start() + + def stop_notebook_instance(self, notebook_instance_name): + notebook_instance = self.get_notebook_instance(notebook_instance_name) + notebook_instance.stop() + + def delete_notebook_instance(self, notebook_instance_name): + notebook_instance = self.get_notebook_instance(notebook_instance_name) + if not notebook_instance.is_deletable: + message = "Status ({}) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format( + notebook_instance.status, notebook_instance.arn + ) + raise ValidationError(message=message) + del self.notebook_instances[notebook_instance_name] + + def get_notebook_instance_tags(self, arn): + try: + notebook_instance = self.get_notebook_instance_by_arn(arn) + return notebook_instance.tags or [] + except RESTError: + return [] + + def create_notebook_instance_lifecycle_config( + self, notebook_instance_lifecycle_config_name, on_create, on_start + ): + if ( + notebook_instance_lifecycle_config_name + in self.notebook_instance_lifecycle_configurations + ): + message = "Unable to create Notebook Instance Lifecycle Config {}. (Details: Notebook Instance Lifecycle Config already exists.)".format( + FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + notebook_instance_lifecycle_config_name, self.region_name + ) + ) + raise ValidationError(message=message) + lifecycle_config = FakeSageMakerNotebookInstanceLifecycleConfig( + region_name=self.region_name, + notebook_instance_lifecycle_config_name=notebook_instance_lifecycle_config_name, + on_create=on_create, + on_start=on_start, + ) + self.notebook_instance_lifecycle_configurations[ + notebook_instance_lifecycle_config_name + ] = lifecycle_config + return lifecycle_config + + def describe_notebook_instance_lifecycle_config( + self, notebook_instance_lifecycle_config_name + ): + try: + return self.notebook_instance_lifecycle_configurations[ + notebook_instance_lifecycle_config_name + ].response_object + except KeyError: + message = "Unable to describe Notebook Instance Lifecycle Config '{}'. (Details: Notebook Instance Lifecycle Config does not exist.)".format( + FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + notebook_instance_lifecycle_config_name, self.region_name + ) + ) + raise ValidationError(message=message) + + def delete_notebook_instance_lifecycle_config( + self, notebook_instance_lifecycle_config_name + ): + try: + del self.notebook_instance_lifecycle_configurations[ + notebook_instance_lifecycle_config_name + ] + except KeyError: + message = "Unable to delete Notebook Instance Lifecycle Config '{}'. (Details: Notebook Instance Lifecycle Config does not exist.)".format( + FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + notebook_instance_lifecycle_config_name, self.region_name + ) + ) + raise ValidationError(message=message) + + def create_endpoint_config( + self, + endpoint_config_name, + production_variants, + data_capture_config, + tags, + kms_key_id, + ): + endpoint_config = FakeEndpointConfig( + region_name=self.region_name, + endpoint_config_name=endpoint_config_name, + production_variants=production_variants, + data_capture_config=data_capture_config, + tags=tags, + kms_key_id=kms_key_id, + ) + self.validate_production_variants(production_variants) + + self.endpoint_configs[endpoint_config_name] = endpoint_config + return endpoint_config + + def validate_production_variants(self, production_variants): + for production_variant in production_variants: + if production_variant["ModelName"] not in self._models: + message = "Could not find model '{}'.".format( + Model.arn_for_model_name( + production_variant["ModelName"], self.region_name + ) + ) + raise ValidationError(message=message) + + def describe_endpoint_config(self, endpoint_config_name): + try: + return self.endpoint_configs[endpoint_config_name].response_object + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) + ) + raise ValidationError(message=message) + + def delete_endpoint_config(self, endpoint_config_name): + try: + del self.endpoint_configs[endpoint_config_name] + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) + ) + raise ValidationError(message=message) + + def create_endpoint( + self, endpoint_name, endpoint_config_name, tags, + ): + try: + endpoint_config = self.describe_endpoint_config(endpoint_config_name) + except KeyError: + message = "Could not find endpoint_config '{}'.".format( + FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) + ) + raise ValidationError(message=message) + + endpoint = FakeEndpoint( + region_name=self.region_name, + endpoint_name=endpoint_name, + endpoint_config_name=endpoint_config_name, + production_variants=endpoint_config["ProductionVariants"], + data_capture_config=endpoint_config["DataCaptureConfig"], + tags=tags, + ) + + self.endpoints[endpoint_name] = endpoint + return endpoint + + def describe_endpoint(self, endpoint_name): + try: + return self.endpoints[endpoint_name].response_object + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeEndpoint.arn_formatter(endpoint_name, self.region_name) + ) + raise ValidationError(message=message) + + def delete_endpoint(self, endpoint_name): + try: + del self.endpoints[endpoint_name] + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeEndpoint.arn_formatter(endpoint_name, self.region_name) + ) + raise ValidationError(message=message) + + def get_endpoint_by_arn(self, arn): + endpoints = [ + endpoint + for endpoint in self.endpoints.values() + if endpoint.endpoint_arn == arn + ] + if len(endpoints) == 0: + message = "RecordNotFound" + raise ValidationError(message=message) + return endpoints[0] + + def get_endpoint_tags(self, arn): + try: + endpoint = self.get_endpoint_by_arn(arn) + return endpoint.tags or [] + except RESTError: + return [] + + def create_training_job( + self, + training_job_name, + hyper_parameters, + algorithm_specification, + role_arn, + input_data_config, + output_data_config, + resource_config, + vpc_config, + stopping_condition, + tags, + enable_network_isolation, + enable_inter_container_traffic_encryption, + enable_managed_spot_training, + checkpoint_config, + debug_hook_config, + debug_rule_configurations, + tensor_board_output_config, + experiment_config, + ): + training_job = FakeTrainingJob( + region_name=self.region_name, + training_job_name=training_job_name, + hyper_parameters=hyper_parameters, + algorithm_specification=algorithm_specification, + role_arn=role_arn, + input_data_config=input_data_config, + output_data_config=output_data_config, + resource_config=resource_config, + vpc_config=vpc_config, + stopping_condition=stopping_condition, + tags=tags, + enable_network_isolation=enable_network_isolation, + enable_inter_container_traffic_encryption=enable_inter_container_traffic_encryption, + enable_managed_spot_training=enable_managed_spot_training, + checkpoint_config=checkpoint_config, + debug_hook_config=debug_hook_config, + debug_rule_configurations=debug_rule_configurations, + tensor_board_output_config=tensor_board_output_config, + experiment_config=experiment_config, + ) + self.training_jobs[training_job_name] = training_job + return training_job + + def describe_training_job(self, training_job_name): + try: + return self.training_jobs[training_job_name].response_object + except KeyError: + message = "Could not find training job '{}'.".format( + FakeTrainingJob.arn_formatter(training_job_name, self.region_name) + ) + raise ValidationError(message=message) + + def delete_training_job(self, training_job_name): + try: + del self.training_jobs[training_job_name] + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeTrainingJob.arn_formatter(training_job_name, self.region_name) + ) + raise ValidationError(message=message) + + def get_training_job_by_arn(self, arn): + training_jobs = [ + training_job + for training_job in self.training_jobs.values() + if training_job.training_job_arn == arn + ] + if len(training_jobs) == 0: + raise ValidationError(message="RecordNotFound") + return training_jobs[0] + + def get_training_job_tags(self, arn): + try: + training_job = self.get_training_job_by_arn(arn) + return training_job.tags or [] + except RESTError: + return [] + + +sagemaker_backends = {} +for region in Session().get_available_regions("sagemaker"): + sagemaker_backends[region] = SageMakerModelBackend(region) +for region in Session().get_available_regions("sagemaker", partition_name="aws-us-gov"): + sagemaker_backends[region] = SageMakerModelBackend(region) +for region in Session().get_available_regions("sagemaker", partition_name="aws-cn"): + sagemaker_backends[region] = SageMakerModelBackend(region) diff --git a/moto/sagemaker/responses.py b/moto/sagemaker/responses.py new file mode 100644 index 000000000..d5d2cab43 --- /dev/null +++ b/moto/sagemaker/responses.py @@ -0,0 +1,276 @@ +from __future__ import unicode_literals + +import json + +from moto.core.exceptions import AWSError +from moto.core.responses import BaseResponse +from moto.core.utils import amzn_request_id +from .models import sagemaker_backends + + +class SageMakerResponse(BaseResponse): + @property + def sagemaker_backend(self): + return sagemaker_backends[self.region] + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def describe_model(self): + model_name = self._get_param("ModelName") + response = self.sagemaker_backend.describe_model(model_name) + return json.dumps(response) + + def create_model(self): + response = self.sagemaker_backend.create_model(**self.request_params) + return json.dumps(response) + + def delete_model(self): + model_name = self._get_param("ModelName") + response = self.sagemaker_backend.delete_model(model_name) + return json.dumps(response) + + def list_models(self): + response = self.sagemaker_backend.list_models(**self.request_params) + return json.dumps(response) + + def _get_param(self, param, if_none=None): + return self.request_params.get(param, if_none) + + @amzn_request_id + def create_notebook_instance(self): + try: + sagemaker_notebook = self.sagemaker_backend.create_notebook_instance( + notebook_instance_name=self._get_param("NotebookInstanceName"), + instance_type=self._get_param("InstanceType"), + subnet_id=self._get_param("SubnetId"), + security_group_ids=self._get_param("SecurityGroupIds"), + role_arn=self._get_param("RoleArn"), + kms_key_id=self._get_param("KmsKeyId"), + tags=self._get_param("Tags"), + lifecycle_config_name=self._get_param("LifecycleConfigName"), + direct_internet_access=self._get_param("DirectInternetAccess"), + volume_size_in_gb=self._get_param("VolumeSizeInGB"), + accelerator_types=self._get_param("AcceleratorTypes"), + default_code_repository=self._get_param("DefaultCodeRepository"), + additional_code_repositories=self._get_param( + "AdditionalCodeRepositories" + ), + root_access=self._get_param("RootAccess"), + ) + response = { + "NotebookInstanceArn": sagemaker_notebook.arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_notebook_instance(self): + notebook_instance_name = self._get_param("NotebookInstanceName") + try: + notebook_instance = self.sagemaker_backend.get_notebook_instance( + notebook_instance_name + ) + response = { + "NotebookInstanceArn": notebook_instance.arn, + "NotebookInstanceName": notebook_instance.notebook_instance_name, + "NotebookInstanceStatus": notebook_instance.status, + "Url": notebook_instance.url, + "InstanceType": notebook_instance.instance_type, + "SubnetId": notebook_instance.subnet_id, + "SecurityGroups": notebook_instance.security_group_ids, + "RoleArn": notebook_instance.role_arn, + "KmsKeyId": notebook_instance.kms_key_id, + # ToDo: NetworkInterfaceId + "LastModifiedTime": str(notebook_instance.last_modified_time), + "CreationTime": str(notebook_instance.creation_time), + "NotebookInstanceLifecycleConfigName": notebook_instance.lifecycle_config_name, + "DirectInternetAccess": notebook_instance.direct_internet_access, + "VolumeSizeInGB": notebook_instance.volume_size_in_gb, + "AcceleratorTypes": notebook_instance.accelerator_types, + "DefaultCodeRepository": notebook_instance.default_code_repository, + "AdditionalCodeRepositories": notebook_instance.additional_code_repositories, + "RootAccess": notebook_instance.root_access, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def start_notebook_instance(self): + notebook_instance_name = self._get_param("NotebookInstanceName") + self.sagemaker_backend.start_notebook_instance(notebook_instance_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def stop_notebook_instance(self): + notebook_instance_name = self._get_param("NotebookInstanceName") + self.sagemaker_backend.stop_notebook_instance(notebook_instance_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def delete_notebook_instance(self): + notebook_instance_name = self._get_param("NotebookInstanceName") + self.sagemaker_backend.delete_notebook_instance(notebook_instance_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def list_tags(self): + arn = self._get_param("ResourceArn") + try: + if ":notebook-instance/" in arn: + tags = self.sagemaker_backend.get_notebook_instance_tags(arn) + elif ":endpoint/" in arn: + tags = self.sagemaker_backend.get_endpoint_tags(arn) + elif ":training-job/" in arn: + tags = self.sagemaker_backend.get_training_job_tags(arn) + else: + tags = [] + except AWSError: + tags = [] + response = {"Tags": tags} + return 200, {}, json.dumps(response) + + @amzn_request_id + def create_endpoint_config(self): + try: + endpoint_config = self.sagemaker_backend.create_endpoint_config( + endpoint_config_name=self._get_param("EndpointConfigName"), + production_variants=self._get_param("ProductionVariants"), + data_capture_config=self._get_param("DataCaptureConfig"), + tags=self._get_param("Tags"), + kms_key_id=self._get_param("KmsKeyId"), + ) + response = { + "EndpointConfigArn": endpoint_config.endpoint_config_arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_endpoint_config(self): + endpoint_config_name = self._get_param("EndpointConfigName") + response = self.sagemaker_backend.describe_endpoint_config(endpoint_config_name) + return json.dumps(response) + + @amzn_request_id + def delete_endpoint_config(self): + endpoint_config_name = self._get_param("EndpointConfigName") + self.sagemaker_backend.delete_endpoint_config(endpoint_config_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def create_endpoint(self): + try: + endpoint = self.sagemaker_backend.create_endpoint( + endpoint_name=self._get_param("EndpointName"), + endpoint_config_name=self._get_param("EndpointConfigName"), + tags=self._get_param("Tags"), + ) + response = { + "EndpointArn": endpoint.endpoint_arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_endpoint(self): + endpoint_name = self._get_param("EndpointName") + response = self.sagemaker_backend.describe_endpoint(endpoint_name) + return json.dumps(response) + + @amzn_request_id + def delete_endpoint(self): + endpoint_name = self._get_param("EndpointName") + self.sagemaker_backend.delete_endpoint(endpoint_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def create_training_job(self): + try: + training_job = self.sagemaker_backend.create_training_job( + training_job_name=self._get_param("TrainingJobName"), + hyper_parameters=self._get_param("HyperParameters"), + algorithm_specification=self._get_param("AlgorithmSpecification"), + role_arn=self._get_param("RoleArn"), + input_data_config=self._get_param("InputDataConfig"), + output_data_config=self._get_param("OutputDataConfig"), + resource_config=self._get_param("ResourceConfig"), + vpc_config=self._get_param("VpcConfig"), + stopping_condition=self._get_param("StoppingCondition"), + tags=self._get_param("Tags"), + enable_network_isolation=self._get_param( + "EnableNetworkIsolation", False + ), + enable_inter_container_traffic_encryption=self._get_param( + "EnableInterContainerTrafficEncryption", False + ), + enable_managed_spot_training=self._get_param( + "EnableManagedSpotTraining", False + ), + checkpoint_config=self._get_param("CheckpointConfig"), + debug_hook_config=self._get_param("DebugHookConfig"), + debug_rule_configurations=self._get_param("DebugRuleConfigurations"), + tensor_board_output_config=self._get_param("TensorBoardOutputConfig"), + experiment_config=self._get_param("ExperimentConfig"), + ) + response = { + "TrainingJobArn": training_job.training_job_arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_training_job(self): + training_job_name = self._get_param("TrainingJobName") + response = self.sagemaker_backend.describe_training_job(training_job_name) + return json.dumps(response) + + @amzn_request_id + def delete_training_job(self): + training_job_name = self._get_param("TrainingJobName") + self.sagemaker_backend.delete_training_job(training_job_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def create_notebook_instance_lifecycle_config(self): + try: + lifecycle_configuration = self.sagemaker_backend.create_notebook_instance_lifecycle_config( + notebook_instance_lifecycle_config_name=self._get_param( + "NotebookInstanceLifecycleConfigName" + ), + on_create=self._get_param("OnCreate"), + on_start=self._get_param("OnStart"), + ) + response = { + "NotebookInstanceLifecycleConfigArn": lifecycle_configuration.notebook_instance_lifecycle_config_arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_notebook_instance_lifecycle_config(self): + response = self.sagemaker_backend.describe_notebook_instance_lifecycle_config( + notebook_instance_lifecycle_config_name=self._get_param( + "NotebookInstanceLifecycleConfigName" + ) + ) + return json.dumps(response) + + @amzn_request_id + def delete_notebook_instance_lifecycle_config(self): + self.sagemaker_backend.delete_notebook_instance_lifecycle_config( + notebook_instance_lifecycle_config_name=self._get_param( + "NotebookInstanceLifecycleConfigName" + ) + ) + return 200, {}, json.dumps("{}") diff --git a/moto/sagemaker/urls.py b/moto/sagemaker/urls.py new file mode 100644 index 000000000..9c039d899 --- /dev/null +++ b/moto/sagemaker/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import SageMakerResponse + +url_bases = [ + "https?://api.sagemaker.(.+).amazonaws.com", +] + +url_paths = { + "{0}/$": SageMakerResponse.dispatch, +} diff --git a/moto/sagemaker/validators.py b/moto/sagemaker/validators.py new file mode 100644 index 000000000..69cbee2a5 --- /dev/null +++ b/moto/sagemaker/validators.py @@ -0,0 +1,20 @@ +def is_integer_between(x, mn=None, mx=None, optional=False): + if optional and x is None: + return True + try: + if mn is not None and mx is not None: + return int(x) >= mn and int(x) < mx + elif mn is not None: + return int(x) >= mn + elif mx is not None: + return int(x) < mx + else: + return True + except ValueError: + return False + + +def is_one_of(x, choices, optional=False): + if optional and x is None: + return True + return x in choices diff --git a/moto/secretsmanager/exceptions.py b/moto/secretsmanager/exceptions.py index bf717e20c..6618cd3ac 100644 --- a/moto/secretsmanager/exceptions.py +++ b/moto/secretsmanager/exceptions.py @@ -57,3 +57,8 @@ class InvalidRequestException(SecretsManagerClientError): super(InvalidRequestException, self).__init__( "InvalidRequestException", message ) + + +class ValidationException(SecretsManagerClientError): + def __init__(self, message): + super(ValidationException, self).__init__("ValidationException", message) diff --git a/moto/secretsmanager/list_secrets/__init__.py b/moto/secretsmanager/list_secrets/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/secretsmanager/list_secrets/filters.py b/moto/secretsmanager/list_secrets/filters.py new file mode 100644 index 000000000..c888ebe64 --- /dev/null +++ b/moto/secretsmanager/list_secrets/filters.py @@ -0,0 +1,44 @@ +def _matcher(pattern, str): + for word in pattern.split(" "): + if word not in str: + return False + return True + + +def name(secret, names): + for n in names: + if _matcher(n, secret.name): + return True + return False + + +def description(secret, descriptions): + for d in descriptions: + if _matcher(d, secret.description): + return True + return False + + +def tag_key(secret, tag_keys): + for k in tag_keys: + for tag in secret.tags: + if _matcher(k, tag["Key"]): + return True + return False + + +def tag_value(secret, tag_values): + for v in tag_values: + for tag in secret.tags: + if _matcher(v, tag["Value"]): + return True + return False + + +def all(secret, values): + return ( + name(secret, values) + or description(secret, values) + or tag_key(secret, values) + or tag_value(secret, values) + ) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 294a6401e..0aaa2027a 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -14,10 +14,36 @@ from .exceptions import ( SecretHasNoValueException, InvalidParameterException, ResourceExistsException, + ResourceNotFoundException, InvalidRequestException, ClientError, ) from .utils import random_password, secret_arn, get_secret_name_from_arn +from .list_secrets.filters import all, tag_key, tag_value, description, name + + +_filter_functions = { + "all": all, + "name": name, + "description": description, + "tag-key": tag_key, + "tag-value": tag_value, +} + + +def filter_keys(): + return list(_filter_functions.keys()) + + +def _matches(secret, filters): + is_match = True + + for f in filters: + # Filter names are pre-validated in the resource layer + filter_function = _filter_functions.get(f["Key"]) + is_match = is_match and filter_function(secret, f["Values"]) + + return is_match class SecretsManager(BaseModel): @@ -25,6 +51,102 @@ class SecretsManager(BaseModel): self.region = region_name +class FakeSecret: + def __init__( + self, + region_name, + secret_id, + secret_string=None, + secret_binary=None, + description=None, + tags=[], + version_id=None, + version_stages=None, + ): + self.secret_id = secret_id + self.name = secret_id + self.arn = secret_arn(region_name, secret_id) + self.secret_string = secret_string + self.secret_binary = secret_binary + self.description = description + self.tags = tags + self.version_id = version_id + self.version_stages = version_stages + self.rotation_enabled = False + self.rotation_lambda_arn = "" + self.auto_rotate_after_days = 0 + self.deleted_date = None + + def update(self, description=None, tags=[]): + self.description = description + self.tags = tags + + def set_versions(self, versions): + self.versions = versions + + def set_default_version_id(self, version_id): + self.default_version_id = version_id + + def reset_default_version(self, secret_version, version_id): + # remove all old AWSPREVIOUS stages + for old_version in self.versions.values(): + if "AWSPREVIOUS" in old_version["version_stages"]: + old_version["version_stages"].remove("AWSPREVIOUS") + + # set old AWSCURRENT secret to AWSPREVIOUS + previous_current_version_id = self.default_version_id + self.versions[previous_current_version_id]["version_stages"] = ["AWSPREVIOUS"] + + self.versions[version_id] = secret_version + self.default_version_id = version_id + + def delete(self, deleted_date): + self.deleted_date = deleted_date + + def restore(self): + self.deleted_date = None + + def is_deleted(self): + return self.deleted_date is not None + + def to_short_dict(self, include_version_stages=False): + dct = { + "ARN": self.arn, + "Name": self.name, + "VersionId": self.default_version_id, + } + if include_version_stages: + dct["VersionStages"] = self.version_stages + return json.dumps(dct) + + def to_dict(self): + version_id_to_stages = self._form_version_ids_to_stages() + + return { + "ARN": self.arn, + "Name": self.name, + "Description": self.description or "", + "KmsKeyId": "", + "RotationEnabled": self.rotation_enabled, + "RotationLambdaARN": self.rotation_lambda_arn, + "RotationRules": {"AutomaticallyAfterDays": self.auto_rotate_after_days}, + "LastRotatedDate": None, + "LastChangedDate": None, + "LastAccessedDate": None, + "DeletedDate": self.deleted_date, + "Tags": self.tags, + "VersionIdsToStages": version_id_to_stages, + "SecretVersionsToStages": version_id_to_stages, + } + + def _form_version_ids_to_stages(self): + version_id_to_stages = {} + for key, value in self.versions.items(): + version_id_to_stages[key] = value["version_stages"] + + return version_id_to_stages + + class SecretsStore(dict): def __setitem__(self, key, value): new_key = get_secret_name_from_arn(key) @@ -38,6 +160,10 @@ class SecretsStore(dict): new_key = get_secret_name_from_arn(key) return dict.__contains__(self, new_key) + def pop(self, key, *args, **kwargs): + new_key = get_secret_name_from_arn(key) + return super(SecretsStore, self).pop(new_key, *args, **kwargs) + class SecretsManagerBackend(BaseBackend): def __init__(self, region_name=None, **kwargs): @@ -63,7 +189,7 @@ class SecretsManagerBackend(BaseBackend): if not version_id and version_stage: # set version_id to match version_stage - versions_dict = self.secrets[secret_id]["versions"] + versions_dict = self.secrets[secret_id].versions for ver_id, ver_val in versions_dict.items(): if version_stage in ver_val["version_stages"]: version_id = ver_id @@ -72,20 +198,27 @@ class SecretsManagerBackend(BaseBackend): raise SecretNotFoundException() # TODO check this part - if "deleted_date" in self.secrets[secret_id]: + if self.secrets[secret_id].is_deleted(): raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \ perform the operation on a secret that's currently marked deleted." ) secret = self.secrets[secret_id] - version_id = version_id or secret["default_version_id"] + version_id = version_id or secret.default_version_id - secret_version = secret["versions"][version_id] + secret_version = secret.versions.get(version_id) + if not secret_version: + raise ResourceNotFoundException( + "An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets " + "Manager can't find the specified secret value for VersionId: {}".format( + version_id + ) + ) response_data = { - "ARN": secret_arn(self.region, secret["secret_id"]), - "Name": secret["name"], + "ARN": secret.arn, + "Name": secret.name, "VersionId": secret_version["version_id"], "VersionStages": secret_version["version_stages"], "CreatedDate": secret_version["createdate"], @@ -107,8 +240,42 @@ class SecretsManagerBackend(BaseBackend): return response + def update_secret( + self, secret_id, secret_string=None, secret_binary=None, **kwargs + ): + + # error if secret does not exist + if secret_id not in self.secrets.keys(): + raise SecretNotFoundException() + + if self.secrets[secret_id].is_deleted(): + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the UpdateSecret operation: " + "You can't perform this operation on the secret because it was marked for deletion." + ) + + secret = self.secrets[secret_id] + tags = secret.tags + description = secret.description + + secret = self._add_secret( + secret_id, + secret_string=secret_string, + secret_binary=secret_binary, + description=description, + tags=tags, + ) + + return secret.to_short_dict() + def create_secret( - self, name, secret_string=None, secret_binary=None, tags=[], **kwargs + self, + name, + secret_string=None, + secret_binary=None, + description=None, + tags=[], + **kwargs ): # error if secret exists @@ -117,25 +284,22 @@ class SecretsManagerBackend(BaseBackend): "A resource with the ID you requested already exists." ) - version_id = self._add_secret( - name, secret_string=secret_string, secret_binary=secret_binary, tags=tags + secret = self._add_secret( + name, + secret_string=secret_string, + secret_binary=secret_binary, + description=description, + tags=tags, ) - response = json.dumps( - { - "ARN": secret_arn(self.region, name), - "Name": name, - "VersionId": version_id, - } - ) - - return response + return secret.to_short_dict() def _add_secret( self, secret_id, secret_string=None, secret_binary=None, + description=None, tags=[], version_id=None, version_stages=None, @@ -152,7 +316,6 @@ class SecretsManagerBackend(BaseBackend): "version_id": version_id, "version_stages": version_stages, } - if secret_string is not None: secret_version["secret_string"] = secret_string @@ -160,53 +323,43 @@ class SecretsManagerBackend(BaseBackend): secret_version["secret_binary"] = secret_binary if secret_id in self.secrets: - # remove all old AWSPREVIOUS stages - for secret_verion_to_look_at in self.secrets[secret_id][ - "versions" - ].values(): - if "AWSPREVIOUS" in secret_verion_to_look_at["version_stages"]: - secret_verion_to_look_at["version_stages"].remove("AWSPREVIOUS") - - # set old AWSCURRENT secret to AWSPREVIOUS - previous_current_version_id = self.secrets[secret_id]["default_version_id"] - self.secrets[secret_id]["versions"][previous_current_version_id][ - "version_stages" - ] = ["AWSPREVIOUS"] - - self.secrets[secret_id]["versions"][version_id] = secret_version - self.secrets[secret_id]["default_version_id"] = version_id + secret = self.secrets[secret_id] + secret.update(description, tags) + secret.reset_default_version(secret_version, version_id) else: - self.secrets[secret_id] = { - "versions": {version_id: secret_version}, - "default_version_id": version_id, - } + secret = FakeSecret( + region_name=self.region, + secret_id=secret_id, + secret_string=secret_string, + secret_binary=secret_binary, + description=description, + tags=tags, + ) + secret.set_versions({version_id: secret_version}) + secret.set_default_version_id(version_id) + self.secrets[secret_id] = secret - secret = self.secrets[secret_id] - secret["secret_id"] = secret_id - secret["name"] = secret_id - secret["rotation_enabled"] = False - secret["rotation_lambda_arn"] = "" - secret["auto_rotate_after_days"] = 0 - secret["tags"] = tags - - return version_id + return secret def put_secret_value(self, secret_id, secret_string, secret_binary, version_stages): - version_id = self._add_secret( - secret_id, secret_string, secret_binary, version_stages=version_stages + if not self._is_valid_identifier(secret_id): + raise SecretNotFoundException() + else: + secret = self.secrets[secret_id] + tags = secret.tags + description = secret.description + + secret = self._add_secret( + secret_id, + secret_string, + secret_binary, + description=description, + tags=tags, + version_stages=version_stages, ) - response = json.dumps( - { - "ARN": secret_arn(self.region, secret_id), - "Name": secret_id, - "VersionId": version_id, - "VersionStages": version_stages, - } - ) - - return response + return secret.to_short_dict(include_version_stages=True) def describe_secret(self, secret_id): if not self._is_valid_identifier(secret_id): @@ -214,26 +367,7 @@ class SecretsManagerBackend(BaseBackend): secret = self.secrets[secret_id] - response = json.dumps( - { - "ARN": secret_arn(self.region, secret["secret_id"]), - "Name": secret["name"], - "Description": "", - "KmsKeyId": "", - "RotationEnabled": secret["rotation_enabled"], - "RotationLambdaARN": secret["rotation_lambda_arn"], - "RotationRules": { - "AutomaticallyAfterDays": secret["auto_rotate_after_days"] - }, - "LastRotatedDate": None, - "LastChangedDate": None, - "LastAccessedDate": None, - "DeletedDate": secret.get("deleted_date", None), - "Tags": secret["tags"], - } - ) - - return response + return json.dumps(secret.to_dict()) def rotate_secret( self, @@ -248,7 +382,7 @@ class SecretsManagerBackend(BaseBackend): if not self._is_valid_identifier(secret_id): raise SecretNotFoundException() - if "deleted_date" in self.secrets[secret_id]: + if self.secrets[secret_id].is_deleted(): raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \ perform the operation on a secret that's currently marked deleted." @@ -276,35 +410,28 @@ class SecretsManagerBackend(BaseBackend): secret = self.secrets[secret_id] - old_secret_version = secret["versions"][secret["default_version_id"]] + old_secret_version = secret.versions[secret.default_version_id] new_version_id = client_request_token or str(uuid.uuid4()) self._add_secret( secret_id, old_secret_version["secret_string"], - secret["tags"], + description=secret.description, + tags=secret.tags, version_id=new_version_id, version_stages=["AWSCURRENT"], ) - secret["rotation_lambda_arn"] = rotation_lambda_arn or "" + secret.rotation_lambda_arn = rotation_lambda_arn or "" if rotation_rules: - secret["auto_rotate_after_days"] = rotation_rules.get(rotation_days, 0) - if secret["auto_rotate_after_days"] > 0: - secret["rotation_enabled"] = True + secret.auto_rotate_after_days = rotation_rules.get(rotation_days, 0) + if secret.auto_rotate_after_days > 0: + secret.rotation_enabled = True if "AWSCURRENT" in old_secret_version["version_stages"]: old_secret_version["version_stages"].remove("AWSCURRENT") - response = json.dumps( - { - "ARN": secret_arn(self.region, secret["secret_id"]), - "Name": secret["name"], - "VersionId": new_version_id, - } - ) - - return response + return secret.to_short_dict() def get_random_password( self, @@ -353,7 +480,7 @@ class SecretsManagerBackend(BaseBackend): secret = self.secrets[secret_id] version_list = [] - for version_id, version in secret["versions"].items(): + for version_id, version in secret.versions.items(): version_list.append( { "CreatedDate": int(time.time()), @@ -365,8 +492,8 @@ class SecretsManagerBackend(BaseBackend): response = json.dumps( { - "ARN": secret["secret_id"], - "Name": secret["name"], + "ARN": secret.secret_id, + "Name": secret.name, "NextToken": "", "Versions": version_list, } @@ -374,35 +501,13 @@ class SecretsManagerBackend(BaseBackend): return response - def list_secrets(self, max_results, next_token): + def list_secrets(self, filters, max_results, next_token): # TODO implement pagination and limits secret_list = [] for secret in self.secrets.values(): - - versions_to_stages = {} - for version_id, version in secret["versions"].items(): - versions_to_stages[version_id] = version["version_stages"] - - secret_list.append( - { - "ARN": secret_arn(self.region, secret["secret_id"]), - "DeletedDate": secret.get("deleted_date", None), - "Description": "", - "KmsKeyId": "", - "LastAccessedDate": None, - "LastChangedDate": None, - "LastRotatedDate": None, - "Name": secret["name"], - "RotationEnabled": secret["rotation_enabled"], - "RotationLambdaARN": secret["rotation_lambda_arn"], - "RotationRules": { - "AutomaticallyAfterDays": secret["auto_rotate_after_days"] - }, - "SecretVersionsToStages": versions_to_stages, - "Tags": secret["tags"], - } - ) + if _matches(secret, filters): + secret_list.append(secret.to_dict()) return secret_list, None @@ -413,7 +518,7 @@ class SecretsManagerBackend(BaseBackend): if not self._is_valid_identifier(secret_id): raise SecretNotFoundException() - if "deleted_date" in self.secrets[secret_id]: + if self.secrets[secret_id].is_deleted(): raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \ perform the operation on a secret that's currently marked deleted." @@ -439,16 +544,14 @@ class SecretsManagerBackend(BaseBackend): secret = self.secrets.pop(secret_id, None) else: deletion_date += datetime.timedelta(days=recovery_window_in_days or 30) - self.secrets[secret_id]["deleted_date"] = self._unix_time_secs( - deletion_date - ) + self.secrets[secret_id].delete(self._unix_time_secs(deletion_date)) secret = self.secrets.get(secret_id, None) if not secret: raise SecretNotFoundException() - arn = secret_arn(self.region, secret["secret_id"]) - name = secret["name"] + arn = secret.arn + name = secret.name return arn, name, self._unix_time_secs(deletion_date) @@ -457,14 +560,23 @@ class SecretsManagerBackend(BaseBackend): if not self._is_valid_identifier(secret_id): raise SecretNotFoundException() - self.secrets[secret_id].pop("deleted_date", None) + secret = self.secrets[secret_id] + secret.restore() + + return secret.arn, secret.name + + def tag_resource(self, secret_id, tags): + + if secret_id not in self.secrets.keys(): + raise SecretNotFoundException() secret = self.secrets[secret_id] + old_tags = secret.tags - arn = secret_arn(self.region, secret["secret_id"]) - name = secret["name"] + for tag in tags: + old_tags.append(tag) - return arn, name + return secret_id @staticmethod def get_resource_policy(secret_id): diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index 28af7b91d..e1c0517db 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -1,13 +1,36 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.secretsmanager.exceptions import InvalidRequestException +from moto.secretsmanager.exceptions import ( + InvalidRequestException, + InvalidParameterException, + ValidationException, +) -from .models import secretsmanager_backends +from .models import secretsmanager_backends, filter_keys import json +def _validate_filters(filters): + for idx, f in enumerate(filters): + filter_key = f.get("Key", None) + filter_values = f.get("Values", None) + if filter_key is None: + raise InvalidParameterException("Invalid filter key") + if filter_key not in filter_keys(): + raise ValidationException( + "1 validation error detected: Value '{}' at 'filters.{}.member.key' failed to satisfy constraint: " + "Member must satisfy enum value set: [all, name, tag-key, description, tag-value]".format( + filter_key, idx + 1 + ) + ) + if filter_values is None: + raise InvalidParameterException( + "Invalid filter values for key: {}".format(filter_key) + ) + + class SecretsManagerResponse(BaseResponse): def get_secret_value(self): secret_id = self._get_param("SecretId") @@ -21,14 +44,26 @@ class SecretsManagerResponse(BaseResponse): name = self._get_param("Name") secret_string = self._get_param("SecretString") secret_binary = self._get_param("SecretBinary") + description = self._get_param("Description", if_none="") tags = self._get_param("Tags", if_none=[]) return secretsmanager_backends[self.region].create_secret( name=name, secret_string=secret_string, secret_binary=secret_binary, + description=description, tags=tags, ) + def update_secret(self): + secret_id = self._get_param("SecretId") + secret_string = self._get_param("SecretString") + secret_binary = self._get_param("SecretBinary") + return secretsmanager_backends[self.region].update_secret( + secret_id=secret_id, + secret_string=secret_string, + secret_binary=secret_binary, + ) + def get_random_password(self): password_length = self._get_param("PasswordLength", if_none=32) exclude_characters = self._get_param("ExcludeCharacters", if_none="") @@ -90,10 +125,12 @@ class SecretsManagerResponse(BaseResponse): ) def list_secrets(self): + filters = self._get_param("Filters", if_none=[]) + _validate_filters(filters) max_results = self._get_int_param("MaxResults") next_token = self._get_param("NextToken") secret_list, next_token = secretsmanager_backends[self.region].list_secrets( - max_results=max_results, next_token=next_token + filters=filters, max_results=max_results, next_token=next_token ) return json.dumps(dict(SecretList=secret_list, NextToken=next_token)) @@ -120,3 +157,8 @@ class SecretsManagerResponse(BaseResponse): return secretsmanager_backends[self.region].get_resource_policy( secret_id=secret_id ) + + def tag_resource(self): + secret_id = self._get_param("SecretId") + tags = self._get_param("Tags", if_none=[]) + return secretsmanager_backends[self.region].tag_resource(secret_id, tags) diff --git a/moto/secretsmanager/utils.py b/moto/secretsmanager/utils.py index 73275ee05..ab0f584f0 100644 --- a/moto/secretsmanager/utils.py +++ b/moto/secretsmanager/utils.py @@ -51,6 +51,8 @@ def random_password( if include_space: password += " " required_characters += " " + if exclude_characters: + password = _exclude_characters(password, exclude_characters) password = "".join( six.text_type(random.choice(password)) for x in range(password_length) @@ -61,7 +63,6 @@ def random_password( password, required_characters ) - password = _exclude_characters(password, exclude_characters) return password @@ -89,7 +90,7 @@ def _exclude_characters(password, exclude_characters): for c in exclude_characters: if c in string.punctuation: # Escape punctuation regex usage - c = "\{0}".format(c) + c = r"\{0}".format(c) password = re.sub(c, "", str(password)) return password diff --git a/moto/server.py b/moto/server.py index 92fe6f229..28e4ce556 100644 --- a/moto/server.py +++ b/moto/server.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import argparse +import io import json import re import sys @@ -8,13 +9,14 @@ from threading import Lock import six from flask import Flask +from flask_cors import CORS from flask.testing import FlaskClient from six.moves.urllib.parse import urlencode from werkzeug.routing import BaseConverter from werkzeug.serving import run_simple -from moto.backends import BACKENDS +import moto.backends as backends from moto.core.utils import convert_flask_to_httpretty_response @@ -29,6 +31,7 @@ UNSIGNED_REQUESTS = { "AWSCognitoIdentityService": ("cognito-identity", "us-east-1"), "AWSCognitoIdentityProviderService": ("cognito-idp", "us-east-1"), } +UNSIGNED_ACTIONS = {"AssumeRoleWithSAML": ("sts", "us-east-1")} class DomainDispatcherApplication(object): @@ -50,13 +53,15 @@ class DomainDispatcherApplication(object): if self.service: return self.service - if host in BACKENDS: + if host in backends.BACKENDS: return host - for backend_name, backend in BACKENDS.items(): - for url_base in list(backend.values())[0].url_bases: - if re.match(url_base, "http://%s" % host): - return backend_name + return backends.search_backend( + lambda backend: any( + re.match(url_base, "http://%s" % host) + for url_base in list(backend.values())[0].url_bases + ) + ) def infer_service_region_host(self, environ): auth = environ.get("HTTP_AUTHORIZATION") @@ -77,13 +82,22 @@ class DomainDispatcherApplication(object): else: # Unsigned request target = environ.get("HTTP_X_AMZ_TARGET") + action = self.get_action_from_body(environ) if target: service, _ = target.split(".", 1) service, region = UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION) + elif action and action in UNSIGNED_ACTIONS: + # See if we can match the Action to a known service + service, region = UNSIGNED_ACTIONS.get(action) else: # S3 is the last resort when the target is also unknown service, region = DEFAULT_SERVICE_REGION + if service == "EventBridge": + # Go SDK uses 'EventBridge' in the SigV4 request instead of 'events' + # see https://github.com/spulec/moto/issues/3494 + service = "events" + if service == "dynamodb": if environ["HTTP_X_AMZ_TARGET"].startswith("DynamoDBStreams"): host = "dynamodbstreams" @@ -94,6 +108,10 @@ class DomainDispatcherApplication(object): # If Newer API version, use dynamodb2 if dynamo_api_version > "20111205": host = "dynamodb2" + elif service == "sagemaker": + host = "api.sagemaker.{region}.amazonaws.com".format( + service=service, region=region + ) else: host = "{service}.{region}.amazonaws.com".format( service=service, region=region @@ -130,6 +148,26 @@ class DomainDispatcherApplication(object): self.app_instances[backend] = app return app + def get_action_from_body(self, environ): + body = None + try: + # AWS requests use querystrings as the body (Action=x&Data=y&...) + simple_form = environ["CONTENT_TYPE"].startswith( + "application/x-www-form-urlencoded" + ) + request_body_size = int(environ["CONTENT_LENGTH"]) + if simple_form and request_body_size: + body = environ["wsgi.input"].read(request_body_size).decode("utf-8") + body_dict = dict(x.split("=") for x in body.split("&")) + return body_dict["Action"] + except (KeyError, ValueError): + pass + finally: + if body: + # We've consumed the body = need to reset it + environ["wsgi.input"] = io.StringIO(body) + return None + def __call__(self, environ, start_response): backend_app = self.get_application(environ) return backend_app(environ, start_response) @@ -173,12 +211,13 @@ def create_backend_app(service): backend_app = Flask(__name__) backend_app.debug = True backend_app.service = service + CORS(backend_app) # Reset view functions to reset the app backend_app.view_functions = {} backend_app.url_map = Map() backend_app.url_map.converters["regex"] = RegexConverter - backend = list(BACKENDS[service].values())[0] + backend = list(backends.get_backend(service).values())[0] for url_path, handler in backend.flask_paths.items(): view_func = convert_flask_to_httpretty_response(handler) if handler.__name__ == "dispatch": diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index a905039e2..d3e60aef5 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -7,3 +7,60 @@ class MessageRejectedError(RESTError): def __init__(self, message): super(MessageRejectedError, self).__init__("MessageRejected", message) + + +class ConfigurationSetDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(ConfigurationSetDoesNotExist, self).__init__( + "ConfigurationSetDoesNotExist", message + ) + + +class EventDestinationAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(EventDestinationAlreadyExists, self).__init__( + "EventDestinationAlreadyExists", message + ) + + +class TemplateNameAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(TemplateNameAlreadyExists, self).__init__( + "TemplateNameAlreadyExists", message + ) + + +class TemplateDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(TemplateDoesNotExist, self).__init__("TemplateDoesNotExist", message) + + +class RuleSetNameAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(RuleSetNameAlreadyExists, self).__init__( + "RuleSetNameAlreadyExists", message + ) + + +class RuleAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(RuleAlreadyExists, self).__init__("RuleAlreadyExists", message) + + +class RuleSetDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(RuleSetDoesNotExist, self).__init__("RuleSetDoesNotExist", message) diff --git a/moto/ses/models.py b/moto/ses/models.py index 4b6ce52c8..d9a44a370 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -1,11 +1,21 @@ from __future__ import unicode_literals +import datetime import email from email.utils import parseaddr from moto.core import BaseBackend, BaseModel from moto.sns.models import sns_backends -from .exceptions import MessageRejectedError +from .exceptions import ( + MessageRejectedError, + ConfigurationSetDoesNotExist, + EventDestinationAlreadyExists, + TemplateNameAlreadyExists, + TemplateDoesNotExist, + RuleSetNameAlreadyExists, + RuleSetDoesNotExist, + RuleAlreadyExists, +) from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -81,19 +91,29 @@ class SESBackend(BaseBackend): self.domains = [] self.sent_messages = [] self.sent_message_count = 0 + self.rejected_messages_count = 0 self.sns_topics = {} + self.config_set = {} + self.config_set_event_destination = {} + self.event_destinations = {} + self.templates = {} + self.receipt_rule_set = {} def _is_verified_address(self, source): _, address = parseaddr(source) if address in self.addresses: return True + if address in self.email_addresses: + return True user, host = address.split("@", 1) return host in self.domains def verify_email_identity(self, address): + _, address = parseaddr(address) self.addresses.append(address) def verify_email_address(self, address): + _, address = parseaddr(address) self.email_addresses.append(address) def verify_domain(self, domain): @@ -116,6 +136,7 @@ class SESBackend(BaseBackend): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): + self.rejected_messages_count += 1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -133,6 +154,7 @@ class SESBackend(BaseBackend): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): + self.rejected_messages_count += 1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -182,12 +204,12 @@ class SESBackend(BaseBackend): if sns_topic is not None: message = self.__generate_feedback__(msg_type) if message: - sns_backends[region].publish(sns_topic, message) + sns_backends[region].publish(message, arn=sns_topic) def send_raw_email(self, source, destinations, raw_data, region): if source is not None: _, source_email_address = parseaddr(source) - if source_email_address not in self.addresses: + if not self._is_verified_address(source_email_address): raise MessageRejectedError( "Did not have authority to send from email %s" % source_email_address @@ -200,7 +222,7 @@ class SESBackend(BaseBackend): raise MessageRejectedError("Source not specified") _, source_email_address = parseaddr(message["from"]) - if source_email_address not in self.addresses: + if not self._is_verified_address(source_email_address): raise MessageRejectedError( "Did not have authority to send from email %s" % source_email_address @@ -235,5 +257,62 @@ class SESBackend(BaseBackend): return {} + def create_configuration_set(self, configuration_set_name): + self.config_set[configuration_set_name] = 1 + return {} + + def create_configuration_set_event_destination( + self, configuration_set_name, event_destination + ): + + if self.config_set.get(configuration_set_name) is None: + raise ConfigurationSetDoesNotExist("Invalid Configuration Set Name.") + + if self.event_destinations.get(event_destination["Name"]): + raise EventDestinationAlreadyExists("Duplicate Event destination Name.") + + self.config_set_event_destination[configuration_set_name] = event_destination + self.event_destinations[event_destination["Name"]] = 1 + + return {} + + def get_send_statistics(self): + + statistics = {} + statistics["DeliveryAttempts"] = self.sent_message_count + statistics["Rejects"] = self.rejected_messages_count + statistics["Complaints"] = 0 + statistics["Bounces"] = 0 + statistics["Timestamp"] = datetime.datetime.utcnow() + return statistics + + def add_template(self, template_info): + template_name = template_info["template_name"] + if self.templates.get(template_name, None): + raise TemplateNameAlreadyExists("Duplicate Template Name.") + self.templates[template_name] = template_info + + def get_template(self, template_name): + if not self.templates.get(template_name, None): + raise TemplateDoesNotExist("Invalid Template Name.") + return self.templates[template_name] + + def list_templates(self): + return list(self.templates.values()) + + def create_receipt_rule_set(self, rule_set_name): + if self.receipt_rule_set.get(rule_set_name) is not None: + raise RuleSetNameAlreadyExists("Duplicate receipt rule set Name.") + self.receipt_rule_set[rule_set_name] = [] + + def create_receipt_rule(self, rule_set_name, rule): + rule_set = self.receipt_rule_set.get(rule_set_name) + if rule_set is None: + raise RuleSetDoesNotExist("Invalid Rule Set Name.") + if rule in rule_set: + raise RuleAlreadyExists("Duplicate Rule Name.") + rule_set.append(rule) + self.receipt_rule_set[rule_set_name] = rule_set + ses_backend = SESBackend() diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 1034aeb0d..703cd2e7a 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -5,6 +5,7 @@ import six from moto.core.responses import BaseResponse from .models import ses_backend +from datetime import datetime class EmailResponse(BaseResponse): @@ -133,6 +134,84 @@ class EmailResponse(BaseResponse): template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE) return template.render() + def get_send_statistics(self): + statistics = ses_backend.get_send_statistics() + template = self.response_template(GET_SEND_STATISTICS) + return template.render(all_statistics=[statistics]) + + def create_configuration_set(self): + configuration_set_name = self.querystring.get("ConfigurationSet.Name")[0] + ses_backend.create_configuration_set( + configuration_set_name=configuration_set_name + ) + template = self.response_template(CREATE_CONFIGURATION_SET) + return template.render() + + def create_configuration_set_event_destination(self): + + configuration_set_name = self._get_param("ConfigurationSetName") + is_configuration_event_enabled = self.querystring.get( + "EventDestination.Enabled" + )[0] + configuration_event_name = self.querystring.get("EventDestination.Name")[0] + event_topic_arn = self.querystring.get( + "EventDestination.SNSDestination.TopicARN" + )[0] + event_matching_types = self._get_multi_param( + "EventDestination.MatchingEventTypes.member" + ) + + event_destination = { + "Name": configuration_event_name, + "Enabled": is_configuration_event_enabled, + "EventMatchingTypes": event_matching_types, + "SNSDestination": event_topic_arn, + } + + ses_backend.create_configuration_set_event_destination( + configuration_set_name=configuration_set_name, + event_destination=event_destination, + ) + + template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION) + return template.render() + + def create_template(self): + template_data = self._get_dict_param("Template") + template_info = {} + template_info["text_part"] = template_data["._text_part"] + template_info["html_part"] = template_data["._html_part"] + template_info["template_name"] = template_data["._name"] + template_info["subject_part"] = template_data["._subject_part"] + template_info["Timestamp"] = datetime.utcnow() + ses_backend.add_template(template_info=template_info) + template = self.response_template(CREATE_TEMPLATE) + return template.render() + + def get_template(self): + template_name = self._get_param("TemplateName") + template_data = ses_backend.get_template(template_name) + template = self.response_template(GET_TEMPLATE) + return template.render(template_data=template_data) + + def list_templates(self): + email_templates = ses_backend.list_templates() + template = self.response_template(LIST_TEMPLATES) + return template.render(templates=email_templates) + + def create_receipt_rule_set(self): + rule_set_name = self._get_param("RuleSetName") + ses_backend.create_receipt_rule_set(rule_set_name) + template = self.response_template(CREATE_RECEIPT_RULE_SET) + return template.render() + + def create_receipt_rule(self): + rule_set_name = self._get_param("RuleSetName") + rule = self._get_dict_param("Rule") + ses_backend.create_receipt_rule(rule_set_name, rule) + template = self.response_template(CREATE_RECEIPT_RULE) + return template.render() + VERIFY_EMAIL_IDENTITY = """ @@ -248,3 +327,88 @@ SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE = """47e0ef1a-9bf2-11e1-9279-0100e8cf109a """ + +GET_SEND_STATISTICS = """ + + + {% for statistics in all_statistics %} + + {{ statistics["DeliveryAttempts"] }} + {{ statistics["Rejects"] }} + {{ statistics["Bounces"] }} + {{ statistics["Complaints"] }} + {{ statistics["Timestamp"] }} + + {% endfor %} + + + e0abcdfa-c866-11e0-b6d0-273d09173z49 + + +""" + +CREATE_CONFIGURATION_SET = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" + + +CREATE_CONFIGURATION_SET_EVENT_DESTINATION = """ + + + 67e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" + +CREATE_TEMPLATE = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" + +GET_TEMPLATE = """ + + + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" + +LIST_TEMPLATES = """ + + + {% for template in templates %} + + {{ template["template_name"] }} + {{ template["Timestamp"] }} + + {% endfor %} + + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" + +CREATE_RECEIPT_RULE_SET = """ + + + 47e0ef1a-9bf2-11e1-9279-01ab88cf109a + +""" + +CREATE_RECEIPT_RULE = """ + + + 15e0ef1a-9bf2-11e1-9279-01ab88cf109a + +""" diff --git a/moto/sns/models.py b/moto/sns/models.py index d6791eecf..7d297fbdc 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -11,7 +11,7 @@ import re from boto3 import Session from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import ( iso_8601_datetime_with_milliseconds, camelcase_to_underscores, @@ -35,15 +35,17 @@ from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID DEFAULT_PAGE_SIZE = 100 MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB +MAXIMUM_SMS_MESSAGE_BYTES = 1600 # Amazon limit for a single publish SMS action -class Topic(BaseModel): +class Topic(CloudFormationModel): def __init__(self, name, sns_backend): self.name = name self.sns_backend = sns_backend self.account_id = DEFAULT_ACCOUNT_ID self.display_name = "" self.delivery_policy = "" + self.kms_master_key_id = "" self.effective_delivery_policy = json.dumps(DEFAULT_EFFECTIVE_DELIVERY_POLICY) self.arn = make_arn_for_topic(self.account_id, name, sns_backend.region_name) @@ -87,6 +89,15 @@ class Topic(BaseModel): def policy(self, policy): self._policy_json = json.loads(policy) + @staticmethod + def cloudformation_name_type(): + return "TopicName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sns-topic.html + return "AWS::SNS::Topic" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -94,7 +105,7 @@ class Topic(BaseModel): sns_backend = sns_backends[region_name] properties = cloudformation_json["Properties"] - topic = sns_backend.create_topic(properties.get("TopicName")) + topic = sns_backend.create_topic(resource_name) for subscription in properties.get("Subscription", []): sns_backend.subscribe( topic.arn, subscription["Endpoint"], subscription["Protocol"] @@ -146,20 +157,38 @@ class Subscription(BaseModel): queue_name = self.endpoint.split(":")[-1] region = self.endpoint.split(":")[3] if self.attributes.get("RawMessageDelivery") != "true": - enveloped_message = json.dumps( - self.get_post_data( - message, - message_id, - subject, - message_attributes=message_attributes, + sqs_backends[region].send_message( + queue_name, + json.dumps( + self.get_post_data( + message, + message_id, + subject, + message_attributes=message_attributes, + ), + sort_keys=True, + indent=2, + separators=(",", ": "), ), - sort_keys=True, - indent=2, - separators=(",", ": "), ) else: - enveloped_message = message - sqs_backends[region].send_message(queue_name, enveloped_message) + raw_message_attributes = {} + for key, value in message_attributes.items(): + type = "string_value" + type_value = value["Value"] + if value["Type"].startswith("Binary"): + type = "binary_value" + elif value["Type"].startswith("Number"): + type_value = "{0:g}".format(value["Value"]) + + raw_message_attributes[key] = { + "data_type": value["Type"], + type: type_value, + } + + sqs_backends[region].send_message( + queue_name, message, message_attributes=raw_message_attributes + ) elif self.protocol in ["http", "https"]: post_data = self.get_post_data(message, message_id, subject) requests.post( @@ -338,6 +367,7 @@ class SNSBackend(BaseBackend): self.platform_endpoints = {} self.region_name = region_name self.sms_attributes = {} + self.sms_messages = OrderedDict() self.opt_out_numbers = [ "+447420500600", "+447420505401", @@ -396,8 +426,18 @@ class SNSBackend(BaseBackend): def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) + def delete_topic_subscriptions(self, topic): + for key, value in self.subscriptions.items(): + if value.topic == topic: + self.subscriptions.pop(key) + def delete_topic(self, arn): - self.topics.pop(arn) + try: + topic = self.get_topic(arn) + self.delete_topic_subscriptions(topic) + self.topics.pop(arn) + except KeyError: + raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) def get_topic(self, arn): try: @@ -405,12 +445,6 @@ class SNSBackend(BaseBackend): except KeyError: raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) - def get_topic_from_phone_number(self, number): - for subscription in self.subscriptions.values(): - if subscription.protocol == "sms" and subscription.endpoint == number: - return subscription.topic.arn - raise SNSNotFoundError("Could not find valid subscription") - def set_topic_attribute(self, topic_arn, attribute_name, attribute_value): topic = self.get_topic(topic_arn) setattr(topic, attribute_name, attribute_value) @@ -474,11 +508,27 @@ class SNSBackend(BaseBackend): else: return self._get_values_nexttoken(self.subscriptions, next_token) - def publish(self, arn, message, subject=None, message_attributes=None): + def publish( + self, + message, + arn=None, + phone_number=None, + subject=None, + message_attributes=None, + ): if subject is not None and len(subject) > 100: # Note that the AWS docs around length are wrong: https://github.com/spulec/moto/issues/1503 raise ValueError("Subject must be less than 100 characters") + if phone_number: + # This is only an approximation. In fact, we should try to use GSM-7 or UCS-2 encoding to count used bytes + if len(message) > MAXIMUM_SMS_MESSAGE_BYTES: + raise ValueError("SMS message must be less than 1600 bytes") + + message_id = six.text_type(uuid.uuid4()) + self.sms_messages[message_id] = (phone_number, message) + return message_id + if len(message) > MAXIMUM_MESSAGE_LENGTH: raise InvalidParameterValue( "An error occurred (InvalidParameter) when calling the Publish operation: Invalid parameter: Message too long" @@ -562,7 +612,12 @@ class SNSBackend(BaseBackend): return subscription.attributes def set_subscription_attributes(self, arn, name, value): - if name not in ["RawMessageDelivery", "DeliveryPolicy", "FilterPolicy"]: + if name not in [ + "RawMessageDelivery", + "DeliveryPolicy", + "FilterPolicy", + "RedrivePolicy", + ]: raise SNSInvalidParameter("AttributeName") # TODO: should do validation diff --git a/moto/sns/responses.py b/moto/sns/responses.py index c2eb3e7c3..dd30d6517 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -6,7 +6,7 @@ from collections import defaultdict from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from .models import sns_backends -from .exceptions import SNSNotFoundError, InvalidParameterValue +from .exceptions import InvalidParameterValue from .utils import is_e164 @@ -158,28 +158,28 @@ class SNSResponse(BaseResponse): topic = self.backend.get_topic(topic_arn) if self.request_json: - return json.dumps( - { - "GetTopicAttributesResponse": { - "GetTopicAttributesResult": { - "Attributes": { - "Owner": topic.account_id, - "Policy": topic.policy, - "TopicArn": topic.arn, - "DisplayName": topic.display_name, - "SubscriptionsPending": topic.subscriptions_pending, - "SubscriptionsConfirmed": topic.subscriptions_confimed, - "SubscriptionsDeleted": topic.subscriptions_deleted, - "DeliveryPolicy": topic.delivery_policy, - "EffectiveDeliveryPolicy": topic.effective_delivery_policy, - } - }, - "ResponseMetadata": { - "RequestId": "057f074c-33a7-11df-9540-99d0768312d3" - }, - } + attributes = { + "Owner": topic.account_id, + "Policy": topic.policy, + "TopicArn": topic.arn, + "DisplayName": topic.display_name, + "SubscriptionsPending": topic.subscriptions_pending, + "SubscriptionsConfirmed": topic.subscriptions_confimed, + "SubscriptionsDeleted": topic.subscriptions_deleted, + "DeliveryPolicy": topic.delivery_policy, + "EffectiveDeliveryPolicy": topic.effective_delivery_policy, + } + if topic.kms_master_key_id: + attributes["KmsMasterKeyId"] = topic.kms_master_key_id + response = { + "GetTopicAttributesResponse": { + "GetTopicAttributesResult": {"Attributes": attributes}, + "ResponseMetadata": { + "RequestId": "057f074c-33a7-11df-9540-99d0768312d3" + }, } - ) + } + return json.dumps(response) template = self.response_template(GET_TOPIC_ATTRIBUTES_TEMPLATE) return template.render(topic=topic) @@ -327,6 +327,7 @@ class SNSResponse(BaseResponse): message_attributes = self._parse_message_attributes() + arn = None if phone_number is not None: # Check phone is correct syntax (e164) if not is_e164(phone_number): @@ -336,18 +337,6 @@ class SNSResponse(BaseResponse): ), dict(status=400), ) - - # Look up topic arn by phone number - try: - arn = self.backend.get_topic_from_phone_number(phone_number) - except SNSNotFoundError: - return ( - self._error( - "ParameterValueInvalid", - "Could not find topic associated with phone number", - ), - dict(status=400), - ) elif target_arn is not None: arn = target_arn else: @@ -357,7 +346,11 @@ class SNSResponse(BaseResponse): try: message_id = self.backend.publish( - arn, message, subject=subject, message_attributes=message_attributes + message, + arn=arn, + phone_number=phone_number, + subject=subject, + message_attributes=message_attributes, ) except ValueError as err: error_response = self._error("InvalidParameter", str(err)) @@ -834,6 +827,12 @@ GET_TOPIC_ATTRIBUTES_TEMPLATE = """ b'\x00\x00\x00\t' - encoded += struct.pack(struct_format, len(utf8(name))) + utf8(name) - # The datatype is additionally given a final byte - # representing which type it is - encoded += struct.pack(struct_format, len(data_type)) + utf8(data_type) - encoded += TRANSPORT_TYPE_ENCODINGS[data_type] + for attrName in sorted(self.message_attributes.keys()): + self.validate_attribute_name(attrName) + attrValue = self.message_attributes[attrName] + # Encode name + self.update_binary_length_and_value(md5, self.utf8(attrName)) + # Encode type + self.update_binary_length_and_value(md5, self.utf8(attrValue["data_type"])) - if data_type == "String" or data_type == "Number": - value = attr["string_value"] - elif data_type == "Binary": - print(data_type, attr["binary_value"], type(attr["binary_value"])) - value = base64.b64decode(attr["binary_value"]) - else: - print( - "Moto hasn't implemented MD5 hashing for {} attributes".format( - data_type - ) + if attrValue.get("string_value"): + md5.update(bytearray([STRING_TYPE_FIELD_INDEX])) + self.update_binary_length_and_value( + md5, self.utf8(attrValue.get("string_value")) ) - # The following should be enough of a clue to users that - # they are not, in fact, looking at a correct MD5 while - # also following the character and length constraints of - # MD5 so as not to break client softwre - return "deadbeefdeadbeefdeadbeefdeadbeef" + elif attrValue.get("binary_value"): + md5.update(bytearray([BINARY_TYPE_FIELD_INDEX])) + decoded_binary_value = base64.b64decode(attrValue.get("binary_value")) + self.update_binary_length_and_value(md5, decoded_binary_value) + # string_list_value type is not implemented, reserved for the future use. + # See https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_MessageAttributeValue.html + elif len(attrValue["string_list_value"]) > 0: + md5.update(bytearray([STRING_LIST_TYPE_FIELD_INDEX])) + for strListMember in attrValue["string_list_value"]: + self.update_binary_length_and_value(md5, self.utf8(strListMember)) + # binary_list_value type is not implemented, reserved for the future use. + # See https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_MessageAttributeValue.html + elif len(attrValue["binary_list_value"]) > 0: + md5.update(bytearray([BINARY_LIST_TYPE_FIELD_INDEX])) + for strListMember in attrValue["binary_list_value"]: + decoded_binary_value = base64.b64decode(strListMember) + self.update_binary_length_and_value(md5, decoded_binary_value) - encoded += struct.pack(struct_format, len(utf8(value))) + utf8(value) - - md5.update(encoded) return md5.hexdigest() + @staticmethod + def update_binary_length_and_value(md5, value): + length_bytes = struct.pack("!I".encode("ascii"), len(value)) + md5.update(length_bytes) + md5.update(value) + + @staticmethod + def validate_attribute_name(name): + if not ATTRIBUTE_NAME_PATTERN.match(name): + raise MessageAttributesInvalid( + "The message attribute name '{0}' is invalid. " + "Attribute name can contain A-Z, a-z, 0-9, " + "underscore (_), hyphen (-), and period (.) characters.".format(name) + ) + + @staticmethod + def utf8(string): + if isinstance(string, six.string_types): + return string.encode("utf-8") + return string + @property def body(self): return escape(self._body) @@ -175,7 +199,7 @@ class Message(BaseModel): return False -class Queue(BaseModel): +class Queue(CloudFormationModel): BASE_ATTRIBUTES = [ "ApproximateNumberOfMessages", "ApproximateNumberOfMessagesDelayed", @@ -230,7 +254,7 @@ class Queue(BaseModel): "FifoQueue": "false", "KmsDataKeyReusePeriodSeconds": 300, # five minutes "KmsMasterKeyId": None, - "MaximumMessageSize": int(64 << 10), + "MaximumMessageSize": MAXIMUM_MESSAGE_LENGTH, "MessageRetentionPeriod": 86400 * 4, # four days "Policy": None, "ReceiveMessageWaitTimeSeconds": 0, @@ -243,9 +267,12 @@ class Queue(BaseModel): # Check some conditions if self.fifo_queue and not self.name.endswith(".fifo"): - raise MessageAttributesInvalid( - "Queue name must end in .fifo for FIFO queues" - ) + raise InvalidParameterValue("Queue name must end in .fifo for FIFO queues") + if ( + self.maximum_message_size < MAXIMUM_MESSAGE_SIZE_ATTR_LOWER_BOUND + or self.maximum_message_size > MAXIMUM_MESSAGE_SIZE_ATTR_UPPER_BOUND + ): + raise InvalidAttributeValue("MaximumMessageSize") @property def pending_messages(self): @@ -343,15 +370,27 @@ class Queue(BaseModel): ), ) + @staticmethod + def cloudformation_name_type(): + return "QueueName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sqs-queue.html + return "AWS::SQS::Queue" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] + properties = deepcopy(cloudformation_json["Properties"]) + # remove Tags from properties and convert tags list to dict + tags = properties.pop("Tags", []) + tags_dict = tags_from_cloudformation_tags_list(tags) sqs_backend = sqs_backends[region_name] return sqs_backend.create_queue( - name=properties["QueueName"], region=region_name, **properties + name=resource_name, tags=tags_dict, region=region_name, **properties ) @classmethod @@ -359,7 +398,7 @@ class Queue(BaseModel): cls, original_resource, new_resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - queue_name = properties["QueueName"] + queue_name = original_resource.name sqs_backend = sqs_backends[region_name] queue = sqs_backend.get_queue(queue_name) @@ -376,10 +415,8 @@ class Queue(BaseModel): def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - queue_name = properties["QueueName"] sqs_backend = sqs_backends[region_name] - sqs_backend.delete_queue(queue_name) + sqs_backend.delete_queue(resource_name) @property def approximate_number_of_messages_delayed(self): @@ -500,6 +537,15 @@ class Queue(BaseModel): } +def _filter_message_attributes(message, input_message_attributes): + filtered_message_attributes = {} + return_all = "All" in input_message_attributes + for key, value in message.message_attributes.items(): + if return_all or key in input_message_attributes: + filtered_message_attributes[key] = value + message.message_attributes = filtered_message_attributes + + class SQSBackend(BaseBackend): def __init__(self, region_name): self.region_name = region_name @@ -524,22 +570,10 @@ class SQSBackend(BaseBackend): queue_attributes = queue.attributes new_queue_attributes = new_queue.attributes - static_attributes = ( - "DelaySeconds", - "MaximumMessageSize", - "MessageRetentionPeriod", - "Policy", - "QueueArn", - "ReceiveMessageWaitTimeSeconds", - "RedrivePolicy", - "VisibilityTimeout", - "KmsMasterKeyId", - "KmsDataKeyReusePeriodSeconds", - "FifoQueue", - "ContentBasedDeduplication", - ) - for key in static_attributes: + # only the attributes which are being sent for the queue + # creation have to be compared if the queue is existing. + for key in kwargs: if queue_attributes.get(key) != new_queue_attributes.get(key): raise QueueAlreadyExists("The specified queue already exists.") else: @@ -605,7 +639,8 @@ class SQSBackend(BaseBackend): attributes = queue.attributes else: for name in (name for name in attribute_names if name in queue.attributes): - attributes[name] = queue.attributes.get(name) + if queue.attributes.get(name) is not None: + attributes[name] = queue.attributes.get(name) return attributes @@ -626,6 +661,12 @@ class SQSBackend(BaseBackend): queue = self.get_queue(queue_name) + if len(message_body) > queue.maximum_message_size: + msg = "One or more parameters are invalid. Reason: Message must be shorter than {} bytes.".format( + queue.maximum_message_size + ) + raise InvalidParameterValue(msg) + if delay_seconds: delay_seconds = int(delay_seconds) else: @@ -685,6 +726,8 @@ class SQSBackend(BaseBackend): entry["MessageBody"], message_attributes=entry["MessageAttributes"], delay_seconds=entry["DelaySeconds"], + group_id=entry.get("MessageGroupId"), + deduplication_id=entry.get("MessageDeduplicationId"), ) message.user_id = entry["Id"] @@ -701,7 +744,12 @@ class SQSBackend(BaseBackend): return None def receive_messages( - self, queue_name, count, wait_seconds_timeout, visibility_timeout + self, + queue_name, + count, + wait_seconds_timeout, + visibility_timeout, + message_attribute_names=None, ): """ Attempt to retrieve visible messages from a queue. @@ -717,11 +765,14 @@ class SQSBackend(BaseBackend): :param int wait_seconds_timeout: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds """ + if message_attribute_names is None: + message_attribute_names = [] queue = self.get_queue(queue_name) result = [] previous_result_count = len(result) polling_end = unix_time() + wait_seconds_timeout + currently_pending_groups = deepcopy(queue.pending_message_groups) # queue.messages only contains visible messages while True: @@ -739,11 +790,11 @@ class SQSBackend(BaseBackend): # The message is pending but is visible again, so the # consumer must have timed out. queue.pending_messages.remove(message) + currently_pending_groups = deepcopy(queue.pending_message_groups) if message.group_id and queue.fifo_queue: - if message.group_id in queue.pending_message_groups: - # There is already one active message with the same - # group, so we cannot deliver this one. + if message.group_id in currently_pending_groups: + # A previous call is still processing messages in this group, so we cannot deliver this one. continue queue.pending_messages.add(message) @@ -757,6 +808,7 @@ class SQSBackend(BaseBackend): continue message.mark_received(visibility_timeout=visibility_timeout) + _filter_message_attributes(message, message_attribute_names) result.append(message) if len(result) >= count: break @@ -815,6 +867,7 @@ class SQSBackend(BaseBackend): def purge_queue(self, queue_name): queue = self.get_queue(queue_name) queue._messages = [] + queue._pending_messages = set() def list_dead_letter_source_queues(self, queue_name): dlq = self.get_queue(queue_name) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 8acea0799..016637b4c 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -9,12 +9,11 @@ from six.moves.urllib.parse import urlparse from .exceptions import ( EmptyBatchRequest, InvalidAttributeName, - MessageAttributesInvalid, MessageNotInflight, ReceiptHandleIsInvalid, ) from .models import sqs_backends -from .utils import parse_message_attributes +from .utils import parse_message_attributes, extract_input_message_attributes MAXIMUM_VISIBILTY_TIMEOUT = 43200 MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB @@ -71,7 +70,10 @@ class SQSResponse(BaseResponse): def call_action(self): status_code, headers, body = super(SQSResponse, self).call_action() if status_code == 404: - return 404, headers, ERROR_INEXISTENT_QUEUE + queue_name = self.querystring.get("QueueName", [""])[0] + template = self.response_template(ERROR_INEXISTENT_QUEUE) + response = template.render(queue_name=queue_name) + return 404, headers, response return status_code, headers, body def _error(self, code, message, status=400): @@ -82,12 +84,7 @@ class SQSResponse(BaseResponse): request_url = urlparse(self.uri) queue_name = self._get_param("QueueName") - try: - queue = self.sqs_backend.create_queue( - queue_name, self.tags, **self.attribute - ) - except MessageAttributesInvalid as e: - return self._error("InvalidParameterValue", e.description) + queue = self.sqs_backend.create_queue(queue_name, self.tags, **self.attribute) template = self.response_template(CREATE_QUEUE_RESPONSE) return template.render(queue_url=queue.url(request_url)) @@ -225,13 +222,18 @@ class SQSResponse(BaseResponse): if len(message) > MAXIMUM_MESSAGE_LENGTH: return ERROR_TOO_LONG_RESPONSE, dict(status=400) - try: - message_attributes = parse_message_attributes(self.querystring) - except MessageAttributesInvalid as e: - return e.description, dict(status=e.status_code) + message_attributes = parse_message_attributes(self.querystring) queue_name = self._get_queue_name() + if not message_group_id: + queue = self.sqs_backend.get_queue(queue_name) + if queue.attributes.get("FifoQueue", False): + return self._error( + "MissingParameter", + "The request must contain the parameter MessageGroupId.", + ) + message = self.sqs_backend.send_message( queue_name, message, @@ -283,8 +285,21 @@ class SQSResponse(BaseResponse): [None], )[0], "MessageAttributes": message_attributes, + "MessageGroupId": self.querystring.get( + "SendMessageBatchRequestEntry.{}.MessageGroupId".format(index), + [None], + )[0], + "MessageDeduplicationId": self.querystring.get( + "SendMessageBatchRequestEntry.{}.MessageDeduplicationId".format( + index + ), + [None], + )[0], } + if entries == {}: + raise EmptyBatchRequest() + messages = self.sqs_backend.send_message_batch(queue_name, entries) template = self.response_template(SEND_MESSAGE_BATCH_RESPONSE) @@ -337,6 +352,9 @@ class SQSResponse(BaseResponse): def receive_message(self): queue_name = self._get_queue_name() + message_attributes = self._get_multi_param("message_attributes") + if not message_attributes: + message_attributes = extract_input_message_attributes(self.querystring,) queue = self.sqs_backend.get_queue(queue_name) @@ -376,7 +394,7 @@ class SQSResponse(BaseResponse): return ERROR_MAX_VISIBILITY_TIMEOUT_RESPONSE, dict(status=400) messages = self.sqs_backend.receive_messages( - queue_name, message_count, wait_time, visibility_timeout + queue_name, message_count, wait_time, visibility_timeout, message_attributes ) template = self.response_template(RECEIVE_MESSAGE_RESPONSE) return template.render(messages=messages) @@ -475,10 +493,12 @@ DELETE_QUEUE_RESPONSE = """ GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% for key, value in attributes.items() %} - - {{ key }} - {{ value }} - + {% if value is not none %} + + {{ key }} + {{ value }} + + {% endif %} {% endfor %} @@ -706,7 +726,11 @@ ERROR_INEXISTENT_QUEUE = """\d+)/(?P[a-zA-Z0-9\-_\.]+)": dispatch, + r"{0}/(?P\d+)/(?P[a-zA-Z0-9\-_\.]+)": dispatch, } diff --git a/moto/sqs/utils.py b/moto/sqs/utils.py index f3b8bbfe8..876d6b40e 100644 --- a/moto/sqs/utils.py +++ b/moto/sqs/utils.py @@ -11,6 +11,21 @@ def generate_receipt_handle(): return "".join(random.choice(string.ascii_lowercase) for x in range(length)) +def extract_input_message_attributes(querystring): + message_attributes = [] + index = 1 + while True: + # Loop through looking for message attributes + name_key = "MessageAttributeName.{0}".format(index) + name = querystring.get(name_key) + if not name: + # Found all attributes + break + message_attributes.append(name[0]) + index = index + 1 + return message_attributes + + def parse_message_attributes(querystring, base="", value_namespace="Value."): message_attributes = {} index = 1 @@ -34,7 +49,7 @@ def parse_message_attributes(querystring, base="", value_namespace="Value."): ) data_type_parts = data_type[0].split(".") - if len(data_type_parts) > 2 or data_type_parts[0] not in [ + if data_type_parts[0] not in [ "String", "Binary", "Number", diff --git a/moto/ssm/exceptions.py b/moto/ssm/exceptions.py index 3458fe7d3..f68e47029 100644 --- a/moto/ssm/exceptions.py +++ b/moto/ssm/exceptions.py @@ -23,8 +23,95 @@ class InvalidFilterValue(JsonRESTError): super(InvalidFilterValue, self).__init__("InvalidFilterValue", message) +class ParameterNotFound(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ParameterNotFound, self).__init__("ParameterNotFound", message) + + +class ParameterVersionNotFound(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ParameterVersionNotFound, self).__init__( + "ParameterVersionNotFound", message + ) + + +class ParameterVersionLabelLimitExceeded(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ParameterVersionLabelLimitExceeded, self).__init__( + "ParameterVersionLabelLimitExceeded", message + ) + + class ValidationException(JsonRESTError): code = 400 def __init__(self, message): super(ValidationException, self).__init__("ValidationException", message) + + +class DocumentAlreadyExists(JsonRESTError): + code = 400 + + def __init__(self, message): + super(DocumentAlreadyExists, self).__init__("DocumentAlreadyExists", message) + + +class InvalidDocument(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidDocument, self).__init__("InvalidDocument", message) + + +class InvalidDocumentOperation(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidDocumentOperation, self).__init__( + "InvalidDocumentOperation", message + ) + + +class AccessDeniedException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(AccessDeniedException, self).__init__("AccessDeniedException", message) + + +class InvalidDocumentContent(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidDocumentContent, self).__init__("InvalidDocumentContent", message) + + +class InvalidDocumentVersion(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidDocumentVersion, self).__init__("InvalidDocumentVersion", message) + + +class DuplicateDocumentVersionName(JsonRESTError): + code = 400 + + def __init__(self, message): + super(DuplicateDocumentVersionName, self).__init__( + "DuplicateDocumentVersionName", message + ) + + +class DuplicateDocumentContent(JsonRESTError): + code = 400 + + def __init__(self, message): + super(DuplicateDocumentContent, self).__init__( + "DuplicateDocumentContent", message + ) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 60c47f021..538e700f8 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -1,17 +1,19 @@ from __future__ import unicode_literals import re +from boto3 import Session from collections import defaultdict -from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.exceptions import RESTError from moto.ec2 import ec2_backends -from moto.cloudformation import cloudformation_backends import datetime import time import uuid -import itertools +import json +import yaml +import hashlib from .utils import parameter_arn from .exceptions import ( @@ -19,6 +21,17 @@ from .exceptions import ( InvalidFilterValue, InvalidFilterOption, InvalidFilterKey, + ParameterVersionLabelLimitExceeded, + ParameterVersionNotFound, + ParameterNotFound, + DocumentAlreadyExists, + InvalidDocumentOperation, + AccessDeniedException, + InvalidDocument, + InvalidDocumentContent, + InvalidDocumentVersion, + DuplicateDocumentVersionName, + DuplicateDocumentContent, ) @@ -41,6 +54,7 @@ class Parameter(BaseModel): self.keyid = keyid self.last_modified_date = last_modified_date self.version = version + self.labels = [] if self.type == "SecureString": if not self.keyid: @@ -75,7 +89,7 @@ class Parameter(BaseModel): return r - def describe_response_object(self, decrypt=False): + def describe_response_object(self, decrypt=False, include_labels=False): r = self.response_object(decrypt) r["LastModifiedDate"] = round(self.last_modified_date, 3) r["LastModifiedUser"] = "N/A" @@ -89,12 +103,117 @@ class Parameter(BaseModel): if self.allowed_pattern: r["AllowedPattern"] = self.allowed_pattern + if include_labels: + r["Labels"] = self.labels + return r MAX_TIMEOUT_SECONDS = 3600 +def generate_ssm_doc_param_list(parameters): + if not parameters: + return None + param_list = [] + for param_name, param_info in parameters.items(): + final_dict = {} + + final_dict["Name"] = param_name + final_dict["Type"] = param_info["type"] + final_dict["Description"] = param_info["description"] + + if ( + param_info["type"] == "StringList" + or param_info["type"] == "StringMap" + or param_info["type"] == "MapList" + ): + final_dict["DefaultValue"] = json.dumps(param_info["default"]) + else: + final_dict["DefaultValue"] = str(param_info["default"]) + + param_list.append(final_dict) + + return param_list + + +class Document(BaseModel): + def __init__( + self, + name, + version_name, + content, + document_type, + document_format, + requires, + attachments, + target_type, + tags, + document_version="1", + ): + self.name = name + self.version_name = version_name + self.content = content + self.document_type = document_type + self.document_format = document_format + self.requires = requires + self.attachments = attachments + self.target_type = target_type + self.tags = tags + + self.status = "Active" + self.document_version = document_version + self.owner = ACCOUNT_ID + self.created_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + + if document_format == "JSON": + try: + content_json = json.loads(content) + except ValueError: + # Python2 + raise InvalidDocumentContent( + "The content for the document is not valid." + ) + except json.decoder.JSONDecodeError: + raise InvalidDocumentContent( + "The content for the document is not valid." + ) + elif document_format == "YAML": + try: + content_json = yaml.safe_load(content) + except yaml.YAMLError: + raise InvalidDocumentContent( + "The content for the document is not valid." + ) + else: + raise ValidationException("Invalid document format " + str(document_format)) + + self.content_json = content_json + + try: + self.schema_version = str(content_json["schemaVersion"]) + self.description = content_json.get("description") + self.outputs = content_json.get("outputs") + self.files = content_json.get("files") + # TODO add platformType (requires mapping the ssm actions to OS's this isn't well documented) + self.platform_types = ["Not Implemented (moto)"] + self.parameter_list = generate_ssm_doc_param_list( + content_json.get("parameters") + ) + + if ( + self.schema_version == "0.3" + or self.schema_version == "2.0" + or self.schema_version == "2.2" + ): + self.mainSteps = content_json["mainSteps"] + elif self.schema_version == "1.2": + self.runtimeConfig = content_json.get("runtimeConfig") + + except KeyError: + raise InvalidDocumentContent("The content for the document is not valid.") + + class Command(BaseModel): def __init__( self, @@ -126,9 +245,6 @@ class Command(BaseModel): if targets is None: targets = [] - self.error_count = 0 - self.completed_count = len(instance_ids) - self.target_count = len(instance_ids) self.command_id = str(uuid.uuid4()) self.status = "Success" self.status_details = "Details placeholder" @@ -142,7 +258,6 @@ class Command(BaseModel): self.comment = comment self.document_name = document_name - self.instance_ids = instance_ids self.max_concurrency = max_concurrency self.max_errors = max_errors self.notification_config = notification_config @@ -154,14 +269,19 @@ class Command(BaseModel): self.targets = targets self.backend_region = backend_region - # Get instance ids from a cloud formation stack target. - stack_instance_ids = [ - self.get_instance_ids_by_stack_ids(target["Values"]) - for target in self.targets - if target["Key"] == "tag:aws:cloudformation:stack-name" - ] + self.instance_ids = instance_ids + self.instance_ids += self._get_instance_ids_from_targets() + # Ensure no duplicate instance_ids + self.instance_ids = list(set(self.instance_ids)) - self.instance_ids += list(itertools.chain.from_iterable(stack_instance_ids)) + # NOTE: All of these counts are 0 in the ssm:SendCommand response + # received from a real AWS backend. The counts are correct when + # making subsequent calls to ssm:DescribeCommand or ssm:ListCommands. + # Not likely to cause any problems, but perhaps an area for future + # improvement. + self.error_count = 0 + self.completed_count = len(instance_ids) + self.target_count = len(instance_ids) # Create invocations with a single run command plugin. self.invocations = [] @@ -170,19 +290,15 @@ class Command(BaseModel): self.invocation_response(instance_id, "aws:runShellScript") ) - def get_instance_ids_by_stack_ids(self, stack_ids): - instance_ids = [] - cloudformation_backend = cloudformation_backends[self.backend_region] - for stack_id in stack_ids: - stack_resources = cloudformation_backend.list_stack_resources(stack_id) - instance_resources = [ - instance.id - for instance in stack_resources - if instance.type == "AWS::EC2::Instance" - ] - instance_ids.extend(instance_resources) - - return instance_ids + def _get_instance_ids_from_targets(self): + target_instance_ids = [] + ec2_backend = ec2_backends[self.backend_region] + ec2_filters = {target["Key"]: target["Values"] for target in self.targets} + reservations = ec2_backend.all_reservations(filters=ec2_filters) + for reservation in reservations: + for instance in reservation.instances: + target_instance_ids.append(instance.id) + return target_instance_ids def response_object(self): r = { @@ -262,8 +378,96 @@ class Command(BaseModel): return invocation +def _validate_document_format(document_format): + aws_doc_formats = ["JSON", "YAML"] + if document_format not in aws_doc_formats: + raise ValidationException("Invalid document format " + str(document_format)) + + +def _validate_document_info(content, name, document_type, document_format, strict=True): + aws_ssm_name_regex = r"^[a-zA-Z0-9_\-.]{3,128}$" + aws_name_reject_list = ["aws-", "amazon", "amzn"] + aws_doc_types = [ + "Command", + "Policy", + "Automation", + "Session", + "Package", + "ApplicationConfiguration", + "ApplicationConfigurationSchema", + "DeploymentStrategy", + "ChangeCalendar", + ] + + _validate_document_format(document_format) + + if not content: + raise ValidationException("Content is required") + + if list(filter(name.startswith, aws_name_reject_list)): + raise ValidationException("Invalid document name " + str(name)) + ssm_name_pattern = re.compile(aws_ssm_name_regex) + if not ssm_name_pattern.match(name): + raise ValidationException("Invalid document name " + str(name)) + + if strict and document_type not in aws_doc_types: + # Update document doesn't use document type + raise ValidationException("Invalid document type " + str(document_type)) + + +def _document_filter_equal_comparator(keyed_value, filter): + for v in filter["Values"]: + if keyed_value == v: + return True + return False + + +def _document_filter_list_includes_comparator(keyed_value_list, filter): + for v in filter["Values"]: + if v in keyed_value_list: + return True + return False + + +def _document_filter_match(filters, ssm_doc): + for filter in filters: + if filter["Key"] == "Name" and not _document_filter_equal_comparator( + ssm_doc.name, filter + ): + return False + + elif filter["Key"] == "Owner": + if len(filter["Values"]) != 1: + raise ValidationException("Owner filter can only have one value.") + if filter["Values"][0] == "Self": + # Update to running account ID + filter["Values"][0] = ACCOUNT_ID + if not _document_filter_equal_comparator(ssm_doc.owner, filter): + return False + + elif filter[ + "Key" + ] == "PlatformTypes" and not _document_filter_list_includes_comparator( + ssm_doc.platform_types, filter + ): + return False + + elif filter["Key"] == "DocumentType" and not _document_filter_equal_comparator( + ssm_doc.document_type, filter + ): + return False + + elif filter["Key"] == "TargetType" and not _document_filter_equal_comparator( + ssm_doc.target_type, filter + ): + return False + + return True + + class SimpleSystemManagerBackend(BaseBackend): - def __init__(self): + def __init__(self, region_name=None): + super(SimpleSystemManagerBackend, self).__init__() # each value is a list of all of the versions for a parameter # to get the current value, grab the last item of the list self._parameters = defaultdict(list) @@ -271,17 +475,359 @@ class SimpleSystemManagerBackend(BaseBackend): self._resource_tags = defaultdict(lambda: defaultdict(dict)) self._commands = [] self._errors = [] + self._documents = defaultdict(dict) - # figure out what region we're in - for region, backend in ssm_backends.items(): - if backend == self: - self._region = region + self._region = region_name + + def reset(self): + region_name = self._region + self.__dict__ = {} + self.__init__(region_name) + + def _generate_document_description(self, document): + + latest = self._documents[document.name]["latest_version"] + default_version = self._documents[document.name]["default_version"] + base = { + "Hash": hashlib.sha256(document.content.encode("utf-8")).hexdigest(), + "HashType": "Sha256", + "Name": document.name, + "Owner": document.owner, + "CreatedDate": document.created_date, + "Status": document.status, + "DocumentVersion": document.document_version, + "Description": document.description, + "Parameters": document.parameter_list, + "PlatformTypes": document.platform_types, + "DocumentType": document.document_type, + "SchemaVersion": document.schema_version, + "LatestVersion": latest, + "DefaultVersion": default_version, + "DocumentFormat": document.document_format, + } + if document.version_name: + base["VersionName"] = document.version_name + if document.target_type: + base["TargetType"] = document.target_type + if document.tags: + base["Tags"] = document.tags + + return base + + def _generate_document_information(self, ssm_document, document_format): + base = { + "Name": ssm_document.name, + "DocumentVersion": ssm_document.document_version, + "Status": ssm_document.status, + "Content": ssm_document.content, + "DocumentType": ssm_document.document_type, + "DocumentFormat": document_format, + } + + if document_format == "JSON": + base["Content"] = json.dumps(ssm_document.content_json) + elif document_format == "YAML": + base["Content"] = yaml.dump(ssm_document.content_json) + else: + raise ValidationException("Invalid document format " + str(document_format)) + + if ssm_document.version_name: + base["VersionName"] = ssm_document.version_name + if ssm_document.requires: + base["Requires"] = ssm_document.requires + if ssm_document.attachments: + base["AttachmentsContent"] = ssm_document.attachments + + return base + + def _generate_document_list_information(self, ssm_document): + base = { + "Name": ssm_document.name, + "Owner": ssm_document.owner, + "DocumentVersion": ssm_document.document_version, + "DocumentType": ssm_document.document_type, + "SchemaVersion": ssm_document.schema_version, + "DocumentFormat": ssm_document.document_format, + } + if ssm_document.version_name: + base["VersionName"] = ssm_document.version_name + if ssm_document.platform_types: + base["PlatformTypes"] = ssm_document.platform_types + if ssm_document.target_type: + base["TargetType"] = ssm_document.target_type + if ssm_document.tags: + base["Tags"] = ssm_document.tags + if ssm_document.requires: + base["Requires"] = ssm_document.requires + + return base + + def create_document( + self, + content, + requires, + attachments, + name, + version_name, + document_type, + document_format, + target_type, + tags, + ): + ssm_document = Document( + name=name, + version_name=version_name, + content=content, + document_type=document_type, + document_format=document_format, + requires=requires, + attachments=attachments, + target_type=target_type, + tags=tags, + ) + + _validate_document_info( + content=content, + name=name, + document_type=document_type, + document_format=document_format, + ) + + if self._documents.get(ssm_document.name): + raise DocumentAlreadyExists("The specified document already exists.") + + self._documents[ssm_document.name] = { + "documents": {ssm_document.document_version: ssm_document}, + "default_version": ssm_document.document_version, + "latest_version": ssm_document.document_version, + } + + return self._generate_document_description(ssm_document) + + def delete_document(self, name, document_version, version_name, force): + documents = self._documents.get(name, {}).get("documents", {}) + keys_to_delete = set() + + if documents: + default_version = self._documents[name]["default_version"] + + if ( + documents[default_version].document_type + == "ApplicationConfigurationSchema" + and not force + ): + raise InvalidDocumentOperation( + "You attempted to delete a document while it is still shared. " + "You must stop sharing the document before you can delete it." + ) + + if document_version and document_version == default_version: + raise InvalidDocumentOperation( + "Default version of the document can't be deleted." + ) + + if document_version or version_name: + # We delete only a specific version + delete_doc = self._find_document(name, document_version, version_name) + + # we can't delete only the default version + if ( + delete_doc + and delete_doc.document_version == default_version + and len(documents) != 1 + ): + raise InvalidDocumentOperation( + "Default version of the document can't be deleted." + ) + + if delete_doc: + keys_to_delete.add(delete_doc.document_version) + else: + raise InvalidDocument("The specified document does not exist.") + else: + # We are deleting all versions + keys_to_delete = set(documents.keys()) + + for key in keys_to_delete: + del self._documents[name]["documents"][key] + + if len(self._documents[name]["documents"].keys()) == 0: + del self._documents[name] + else: + old_latest = self._documents[name]["latest_version"] + if old_latest not in self._documents[name]["documents"].keys(): + leftover_keys = self._documents[name]["documents"].keys() + int_keys = [] + for key in leftover_keys: + int_keys.append(int(key)) + self._documents[name]["latest_version"] = str(sorted(int_keys)[-1]) + else: + raise InvalidDocument("The specified document does not exist.") + + def _find_document( + self, name, document_version=None, version_name=None, strict=True + ): + if not self._documents.get(name): + raise InvalidDocument("The specified document does not exist.") + + documents = self._documents[name]["documents"] + ssm_document = None + + if not version_name and not document_version: + # Retrieve default version + default_version = self._documents[name]["default_version"] + ssm_document = documents.get(default_version) + + elif version_name and document_version: + for doc_version, document in documents.items(): + if ( + doc_version == document_version + and document.version_name == version_name + ): + ssm_document = document + break + + else: + for doc_version, document in documents.items(): + if document_version and doc_version == document_version: + ssm_document = document + break + if version_name and document.version_name == version_name: + ssm_document = document + break + + if strict and not ssm_document: + raise InvalidDocument("The specified document does not exist.") + + return ssm_document + + def get_document(self, name, document_version, version_name, document_format): + + ssm_document = self._find_document(name, document_version, version_name) + if not document_format: + document_format = ssm_document.document_format + else: + _validate_document_format(document_format=document_format) + + return self._generate_document_information(ssm_document, document_format) + + def update_document_default_version(self, name, document_version): + + ssm_document = self._find_document(name, document_version=document_version) + self._documents[name]["default_version"] = document_version + base = { + "Name": ssm_document.name, + "DefaultVersion": document_version, + } + + if ssm_document.version_name: + base["DefaultVersionName"] = ssm_document.version_name + + return base + + def update_document( + self, + content, + attachments, + name, + version_name, + document_version, + document_format, + target_type, + ): + _validate_document_info( + content=content, + name=name, + document_type=None, + document_format=document_format, + strict=False, + ) + + if not self._documents.get(name): + raise InvalidDocument("The specified document does not exist.") + if ( + self._documents[name]["latest_version"] != document_version + and document_version != "$LATEST" + ): + raise InvalidDocumentVersion( + "The document version is not valid or does not exist." + ) + if version_name and self._find_document( + name, version_name=version_name, strict=False + ): + raise DuplicateDocumentVersionName( + "The specified version name is a duplicate." + ) + + old_ssm_document = self._find_document(name) + + new_ssm_document = Document( + name=name, + version_name=version_name, + content=content, + document_type=old_ssm_document.document_type, + document_format=document_format, + requires=old_ssm_document.requires, + attachments=attachments, + target_type=target_type, + tags=old_ssm_document.tags, + document_version=str(int(self._documents[name]["latest_version"]) + 1), + ) + + for doc_version, document in self._documents[name]["documents"].items(): + if document.content == new_ssm_document.content: + raise DuplicateDocumentContent( + "The content of the association document matches another document. " + "Change the content of the document and try again." + ) + + self._documents[name]["latest_version"] = str( + int(self._documents[name]["latest_version"]) + 1 + ) + self._documents[name]["documents"][ + new_ssm_document.document_version + ] = new_ssm_document + + return self._generate_document_description(new_ssm_document) + + def describe_document(self, name, document_version, version_name): + ssm_document = self._find_document(name, document_version, version_name) + return self._generate_document_description(ssm_document) + + def list_documents( + self, document_filter_list, filters, max_results=10, next_token="0" + ): + if document_filter_list: + raise ValidationException( + "DocumentFilterList is deprecated. Instead use Filters." + ) + + next_token = int(next_token) + results = [] + dummy_token_tracker = 0 + # Sort to maintain next token adjacency + for document_name, document_bundle in sorted(self._documents.items()): + if len(results) == max_results: + # There's still more to go so we need a next token + return results, str(next_token + len(results)) + + if dummy_token_tracker < next_token: + dummy_token_tracker = dummy_token_tracker + 1 + continue + + default_version = document_bundle["default_version"] + ssm_doc = self._documents[document_name]["documents"][default_version] + if filters and not _document_filter_match(filters, ssm_doc): + # If we have filters enabled, and we don't match them, + continue + else: + results.append(self._generate_document_list_information(ssm_doc)) + + # If we've fallen out of the loop, theres no more documents. No next token. + return results, "" def delete_parameter(self, name): - try: - del self._parameters[name] - except KeyError: - pass + return self._parameters.pop(name, None) def delete_parameters(self, names): result = [] @@ -416,6 +962,13 @@ class SimpleSystemManagerBackend(BaseBackend): "The following filter key is not valid: Label. Valid filter keys include: [Path, Name, Type, KeyId, Tier]." ) + if by_path and key in ["Name", "Path", "Tier"]: + raise InvalidFilterKey( + "The following filter key is not valid: {key}. Valid filter keys include: [Type, KeyId].".format( + key=key + ) + ) + if not values: raise InvalidFilterValue( "The following filter values are missing : null for filter key Name." @@ -475,7 +1028,10 @@ class SimpleSystemManagerBackend(BaseBackend): ) ) - if key != "Path" and option not in ["Equals", "BeginsWith"]: + allowed_options = ["Equals", "BeginsWith"] + if key == "Name": + allowed_options += ["Contains"] + if key != "Path" and option not in allowed_options: raise InvalidFilterOption( "The following filter option is not valid: {option}. Valid options include: [BeginsWith, Equals].".format( option=option @@ -510,6 +1066,16 @@ class SimpleSystemManagerBackend(BaseBackend): def get_parameters(self, names, with_decryption): result = [] + + if len(names) > 10: + raise ValidationException( + "1 validation error detected: " + "Value '[{}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10.".format( + ", ".join(names) + ) + ) + for name in names: if name in self._parameters: result.append(self.get_parameter(name, with_decryption)) @@ -525,6 +1091,9 @@ class SimpleSystemManagerBackend(BaseBackend): max_results=10, ): """Implement the get-parameters-by-path-API in the backend.""" + + self._validate_parameter_filters(filters, by_path=True) + result = [] # path could be with or without a trailing /. we handle this # difference here. @@ -575,7 +1144,8 @@ class SimpleSystemManagerBackend(BaseBackend): what = parameter.keyid elif key == "Name": what = "/" + parameter.name.lstrip("/") - values = ["/" + value.lstrip("/") for value in values] + if option != "Contains": + values = ["/" + value.lstrip("/") for value in values] elif key == "Path": what = "/" + parameter.name.lstrip("/") values = ["/" + value.strip("/") for value in values] @@ -588,6 +1158,8 @@ class SimpleSystemManagerBackend(BaseBackend): what.startswith(value) for value in values ): return False + elif option == "Contains" and not any(value in what for value in values): + return False elif option == "Equals" and not any(what == value for value in values): return False elif option == "OneLevel": @@ -613,13 +1185,114 @@ class SimpleSystemManagerBackend(BaseBackend): return True def get_parameter(self, name, with_decryption): - if name in self._parameters: - return self._parameters[name][-1] + name_parts = name.split(":") + name_prefix = name_parts[0] + + if len(name_parts) > 2: + return None + + if name_prefix in self._parameters: + if len(name_parts) == 1: + return self._parameters[name][-1] + + if len(name_parts) == 2: + version_or_label = name_parts[1] + parameters = self._parameters[name_prefix] + + if version_or_label.isdigit(): + result = list( + filter(lambda x: str(x.version) == version_or_label, parameters) + ) + if len(result) > 0: + return result[-1] + + result = list( + filter(lambda x: version_or_label in x.labels, parameters) + ) + if len(result) > 0: + return result[-1] + return None + def label_parameter_version(self, name, version, labels): + previous_parameter_versions = self._parameters[name] + if not previous_parameter_versions: + raise ParameterNotFound("Parameter %s not found." % name) + found_parameter = None + labels_needing_removal = [] + if not version: + version = 1 + for parameter in previous_parameter_versions: + if parameter.version >= version: + version = parameter.version + for parameter in previous_parameter_versions: + if parameter.version == version: + found_parameter = parameter + else: + for label in labels: + if label in parameter.labels: + labels_needing_removal.append(label) + if not found_parameter: + raise ParameterVersionNotFound( + "Systems Manager could not find version %s of %s. " + "Verify the version and try again." % (version, name) + ) + labels_to_append = [] + invalid_labels = [] + for label in labels: + if ( + label.startswith("aws") + or label.startswith("ssm") + or label[:1].isdigit() + or not re.match(r"^[a-zA-z0-9_\.\-]*$", label) + ): + invalid_labels.append(label) + continue + if len(label) > 100: + raise ValidationException( + "1 validation error detected: " + "Value '[%s]' at 'labels' failed to satisfy constraint: " + "Member must satisfy constraint: " + "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" + % label + ) + continue + if label not in found_parameter.labels: + labels_to_append.append(label) + if (len(found_parameter.labels) + len(labels_to_append)) > 10: + raise ParameterVersionLabelLimitExceeded( + "An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: " + "A parameter version can have maximum 10 labels." + "Move one or more labels to another version and try again." + ) + found_parameter.labels = found_parameter.labels + labels_to_append + for parameter in previous_parameter_versions: + if parameter.version != version: + for label in parameter.labels[:]: + if label in labels_needing_removal: + parameter.labels.remove(label) + return [invalid_labels, version] + def put_parameter( self, name, description, value, type, allowed_pattern, keyid, overwrite ): + if name.lower().lstrip("/").startswith("aws") or name.lower().lstrip( + "/" + ).startswith("ssm"): + is_path = name.count("/") > 1 + if name.lower().startswith("/aws") and is_path: + raise AccessDeniedException( + "No access to reserved parameter name: {name}.".format(name=name) + ) + if not is_path: + invalid_prefix_error = 'Parameter name: can\'t be prefixed with "aws" or "ssm" (case-insensitive).' + else: + invalid_prefix_error = ( + 'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). ' + "If formed as a path, it can consist of sub-paths divided by slash symbol; each sub-path can be " + "formed as a mix of letters, numbers and the following 3 symbols .-_" + ) + raise ValidationException(invalid_prefix_error) previous_parameter_versions = self._parameters[name] if len(previous_parameter_versions) == 0: previous_parameter = None @@ -731,5 +1404,9 @@ class SimpleSystemManagerBackend(BaseBackend): ssm_backends = {} -for region, ec2_backend in ec2_backends.items(): - ssm_backends[region] = SimpleSystemManagerBackend() +for region in Session().get_available_regions("ssm"): + ssm_backends[region] = SimpleSystemManagerBackend(region) +for region in Session().get_available_regions("ssm", partition_name="aws-us-gov"): + ssm_backends[region] = SimpleSystemManagerBackend(region) +for region in Session().get_available_regions("ssm", partition_name="aws-cn"): + ssm_backends[region] = SimpleSystemManagerBackend(region) diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 1b13780a8..66606c283 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -17,12 +17,128 @@ class SimpleSystemManagerResponse(BaseResponse): except ValueError: return {} + def create_document(self): + content = self._get_param("Content") + requires = self._get_param("Requires") + attachments = self._get_param("Attachments") + name = self._get_param("Name") + version_name = self._get_param("VersionName") + document_type = self._get_param("DocumentType") + document_format = self._get_param("DocumentFormat", "JSON") + target_type = self._get_param("TargetType") + tags = self._get_param("Tags") + + result = self.ssm_backend.create_document( + content=content, + requires=requires, + attachments=attachments, + name=name, + version_name=version_name, + document_type=document_type, + document_format=document_format, + target_type=target_type, + tags=tags, + ) + + return json.dumps({"DocumentDescription": result}) + + def delete_document(self): + name = self._get_param("Name") + document_version = self._get_param("DocumentVersion") + version_name = self._get_param("VersionName") + force = self._get_param("Force", False) + self.ssm_backend.delete_document( + name=name, + document_version=document_version, + version_name=version_name, + force=force, + ) + + return json.dumps({}) + + def get_document(self): + name = self._get_param("Name") + version_name = self._get_param("VersionName") + document_version = self._get_param("DocumentVersion") + document_format = self._get_param("DocumentFormat", "JSON") + + document = self.ssm_backend.get_document( + name=name, + document_version=document_version, + document_format=document_format, + version_name=version_name, + ) + + return json.dumps(document) + + def describe_document(self): + name = self._get_param("Name") + document_version = self._get_param("DocumentVersion") + version_name = self._get_param("VersionName") + + result = self.ssm_backend.describe_document( + name=name, document_version=document_version, version_name=version_name + ) + + return json.dumps({"Document": result}) + + def update_document(self): + content = self._get_param("Content") + attachments = self._get_param("Attachments") + name = self._get_param("Name") + version_name = self._get_param("VersionName") + document_version = self._get_param("DocumentVersion") + document_format = self._get_param("DocumentFormat", "JSON") + target_type = self._get_param("TargetType") + + result = self.ssm_backend.update_document( + content=content, + attachments=attachments, + name=name, + version_name=version_name, + document_version=document_version, + document_format=document_format, + target_type=target_type, + ) + + return json.dumps({"DocumentDescription": result}) + + def update_document_default_version(self): + name = self._get_param("Name") + document_version = self._get_param("DocumentVersion") + + result = self.ssm_backend.update_document_default_version( + name=name, document_version=document_version + ) + return json.dumps({"Description": result}) + + def list_documents(self): + document_filter_list = self._get_param("DocumentFilterList") + filters = self._get_param("Filters") + max_results = self._get_param("MaxResults", 10) + next_token = self._get_param("NextToken", "0") + + documents, token = self.ssm_backend.list_documents( + document_filter_list=document_filter_list, + filters=filters, + max_results=max_results, + next_token=next_token, + ) + + return json.dumps({"DocumentIdentifiers": documents, "NextToken": token}) + def _get_param(self, param, default=None): return self.request_params.get(param, default) def delete_parameter(self): name = self._get_param("Name") - self.ssm_backend.delete_parameter(name) + result = self.ssm_backend.delete_parameter(name) + if result is None: + error = { + "__type": "ParameterNotFound", + "message": "Parameter {0} not found.".format(name), + } + return json.dumps(error), dict(status=400) return json.dumps({}) def delete_parameters(self): @@ -162,12 +278,24 @@ class SimpleSystemManagerResponse(BaseResponse): response = {"Parameters": []} for parameter_version in result: param_data = parameter_version.describe_response_object( - decrypt=with_decryption + decrypt=with_decryption, include_labels=True ) response["Parameters"].append(param_data) return json.dumps(response) + def label_parameter_version(self): + name = self._get_param("Name") + version = self._get_param("ParameterVersion") + labels = self._get_param("Labels") + + invalid_labels, version = self.ssm_backend.label_parameter_version( + name, version, labels + ) + + response = {"InvalidLabels": invalid_labels, "ParameterVersion": version} + return json.dumps(response) + def add_tags_to_resource(self): resource_id = self._get_param("ResourceId") resource_type = self._get_param("ResourceType") diff --git a/moto/stepfunctions/exceptions.py b/moto/stepfunctions/exceptions.py index 704e4ea83..9598c65f9 100644 --- a/moto/stepfunctions/exceptions.py +++ b/moto/stepfunctions/exceptions.py @@ -1,22 +1,11 @@ from __future__ import unicode_literals -import json +from moto.core.exceptions import AWSError -class AWSError(Exception): - TYPE = None +class ExecutionAlreadyExists(AWSError): + TYPE = "ExecutionAlreadyExists" STATUS = 400 - def __init__(self, message, type=None, status=None): - self.message = message - self.type = type if type is not None else self.TYPE - self.status = status if status is not None else self.STATUS - - def response(self): - return ( - json.dumps({"__type": self.type, "message": self.message}), - dict(status=self.status), - ) - class ExecutionDoesNotExist(AWSError): TYPE = "ExecutionDoesNotExist" @@ -33,6 +22,27 @@ class InvalidName(AWSError): STATUS = 400 +class InvalidExecutionInput(AWSError): + TYPE = "InvalidExecutionInput" + STATUS = 400 + + class StateMachineDoesNotExist(AWSError): TYPE = "StateMachineDoesNotExist" STATUS = 400 + + +class InvalidToken(AWSError): + TYPE = "InvalidToken" + STATUS = 400 + + def __init__(self, message="Invalid token"): + super(InvalidToken, self).__init__("Invalid Token: {}".format(message)) + + +class ResourceNotFound(AWSError): + TYPE = "ResourceNotFound" + STATUS = 400 + + def __init__(self, arn): + super(ResourceNotFound, self).__init__("Resource not found: '{}'".format(arn)) diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index de530b863..125e5d807 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -1,28 +1,157 @@ +import json import re from datetime import datetime from boto3 import Session -from moto.core import BaseBackend -from moto.core.utils import iso_8601_datetime_without_milliseconds -from moto.sts.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel +from moto.core.utils import iso_8601_datetime_with_milliseconds from uuid import uuid4 from .exceptions import ( + ExecutionAlreadyExists, ExecutionDoesNotExist, InvalidArn, + InvalidExecutionInput, InvalidName, + ResourceNotFound, StateMachineDoesNotExist, ) +from .utils import paginate, api_to_cfn_tags, cfn_to_api_tags -class StateMachine: +class StateMachine(CloudFormationModel): def __init__(self, arn, name, definition, roleArn, tags=None): - self.creation_date = iso_8601_datetime_without_milliseconds(datetime.now()) + self.creation_date = iso_8601_datetime_with_milliseconds(datetime.now()) + self.update_date = self.creation_date self.arn = arn self.name = name self.definition = definition self.roleArn = roleArn - self.tags = tags + self.tags = [] + if tags: + self.add_tags(tags) + + def update(self, **kwargs): + for key, value in kwargs.items(): + if value is not None: + setattr(self, key, value) + self.update_date = iso_8601_datetime_with_milliseconds(datetime.now()) + + def add_tags(self, tags): + merged_tags = [] + for tag in self.tags: + replacement_index = next( + (index for (index, d) in enumerate(tags) if d["key"] == tag["key"]), + None, + ) + if replacement_index is not None: + replacement = tags.pop(replacement_index) + merged_tags.append(replacement) + else: + merged_tags.append(tag) + for tag in tags: + merged_tags.append(tag) + self.tags = merged_tags + return self.tags + + def remove_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags if tag_set["key"] not in tag_keys] + return self.tags + + @property + def physical_resource_id(self): + return self.arn + + def get_cfn_properties(self, prop_overrides): + property_names = [ + "DefinitionString", + "RoleArn", + "StateMachineName", + ] + properties = {} + for prop in property_names: + properties[prop] = prop_overrides.get(prop, self.get_cfn_attribute(prop)) + # Special handling for Tags + overridden_keys = [tag["Key"] for tag in prop_overrides.get("Tags", [])] + original_tags_to_include = [ + tag + for tag in self.get_cfn_attribute("Tags") + if tag["Key"] not in overridden_keys + ] + properties["Tags"] = original_tags_to_include + prop_overrides.get("Tags", []) + return properties + + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Name": + return self.name + elif attribute_name == "DefinitionString": + return self.definition + elif attribute_name == "RoleArn": + return self.roleArn + elif attribute_name == "StateMachineName": + return self.name + elif attribute_name == "Tags": + return api_to_cfn_tags(self.tags) + + raise UnformattedGetAttTemplateException() + + @staticmethod + def cloudformation_name_type(): + return "StateMachine" + + @staticmethod + def cloudformation_type(): + return "AWS::StepFunctions::StateMachine" + + @classmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + name = properties.get("StateMachineName", resource_name) + definition = properties.get("DefinitionString", "") + role_arn = properties.get("RoleArn", "") + tags = cfn_to_api_tags(properties.get("Tags", [])) + sf_backend = stepfunction_backends[region_name] + return sf_backend.create_state_machine(name, definition, role_arn, tags=tags) + + @classmethod + def delete_from_cloudformation_json(cls, resource_name, _, region_name): + sf_backend = stepfunction_backends[region_name] + sf_backend.delete_state_machine(resource_name) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json.get("Properties", {}) + name = properties.get("StateMachineName", original_resource.name) + + if name != original_resource.name: + # Replacement + new_properties = original_resource.get_cfn_properties(properties) + cloudformation_json["Properties"] = new_properties + new_resource = cls.create_from_cloudformation_json( + name, cloudformation_json, region_name + ) + cls.delete_from_cloudformation_json( + original_resource.arn, cloudformation_json, region_name + ) + return new_resource + + else: + # No Interruption + definition = properties.get("DefinitionString") + role_arn = properties.get("RoleArn") + tags = cfn_to_api_tags(properties.get("Tags", [])) + sf_backend = stepfunction_backends[region_name] + state_machine = sf_backend.update_state_machine( + original_resource.arn, definition=definition, role_arn=role_arn, + ) + state_machine.add_tags(tags) + return state_machine class Execution: @@ -33,6 +162,7 @@ class Execution: state_machine_name, execution_name, state_machine_arn, + execution_input, ): execution_arn = "arn:aws:states:{}:{}:execution:{}:{}" execution_arn = execution_arn.format( @@ -40,14 +170,15 @@ class Execution: ) self.execution_arn = execution_arn self.name = execution_name - self.start_date = iso_8601_datetime_without_milliseconds(datetime.now()) + self.start_date = iso_8601_datetime_with_milliseconds(datetime.now()) self.state_machine_arn = state_machine_arn + self.execution_input = execution_input self.status = "RUNNING" self.stop_date = None def stop(self): - self.status = "SUCCEEDED" - self.stop_date = iso_8601_datetime_without_milliseconds(datetime.now()) + self.status = "ABORTED" + self.stop_date = iso_8601_datetime_with_milliseconds(datetime.now()) class StepFunctionBackend(BaseBackend): @@ -185,8 +316,10 @@ class StepFunctionBackend(BaseBackend): self.state_machines.append(state_machine) return state_machine + @paginate def list_state_machines(self): - return self.state_machines + state_machines = sorted(self.state_machines, key=lambda x: x.creation_date) + return state_machines def describe_state_machine(self, arn): self._validate_machine_arn(arn) @@ -203,14 +336,26 @@ class StepFunctionBackend(BaseBackend): if sm: self.state_machines.remove(sm) - def start_execution(self, state_machine_arn, name=None): + def update_state_machine(self, arn, definition=None, role_arn=None): + sm = self.describe_state_machine(arn) + updates = { + "definition": definition, + "roleArn": role_arn, + } + sm.update(**updates) + return sm + + def start_execution(self, state_machine_arn, name=None, execution_input=None): state_machine_name = self.describe_state_machine(state_machine_arn).name + self._ensure_execution_name_doesnt_exist(name) + self._validate_execution_input(execution_input) execution = Execution( region_name=self.region_name, account_id=self._get_account_id(), state_machine_name=state_machine_name, execution_name=name or str(uuid4()), state_machine_arn=state_machine_arn, + execution_input=execution_input, ) self.executions.append(execution) return execution @@ -226,13 +371,20 @@ class StepFunctionBackend(BaseBackend): execution.stop() return execution - def list_executions(self, state_machine_arn): - return [ + @paginate + def list_executions(self, state_machine_arn, status_filter=None): + executions = [ execution for execution in self.executions if execution.state_machine_arn == state_machine_arn ] + if status_filter: + executions = list(filter(lambda e: e.status == status_filter, executions)) + + executions = sorted(executions, key=lambda x: x.start_date, reverse=True) + return executions + def describe_execution(self, arn): self._validate_execution_arn(arn) exctn = next((x for x in self.executions if x.execution_arn == arn), None) @@ -240,6 +392,20 @@ class StepFunctionBackend(BaseBackend): raise ExecutionDoesNotExist("Execution Does Not Exist: '" + arn + "'") return exctn + def tag_resource(self, resource_arn, tags): + try: + state_machine = self.describe_state_machine(resource_arn) + state_machine.add_tags(tags) + except StateMachineDoesNotExist: + raise ResourceNotFound(resource_arn) + + def untag_resource(self, resource_arn, tag_keys): + try: + state_machine = self.describe_state_machine(resource_arn) + state_machine.remove_tags(tag_keys) + except StateMachineDoesNotExist: + raise ResourceNotFound(resource_arn) + def reset(self): region_name = self.region_name self.__dict__ = {} @@ -278,6 +444,21 @@ class StepFunctionBackend(BaseBackend): if not arn or not match: raise InvalidArn(invalid_msg) + def _ensure_execution_name_doesnt_exist(self, name): + for execution in self.executions: + if execution.name == name: + raise ExecutionAlreadyExists( + "Execution Already Exists: '" + execution.execution_arn + "'" + ) + + def _validate_execution_input(self, execution_input): + try: + json.loads(execution_input) + except Exception as ex: + raise InvalidExecutionInput( + "Invalid State Machine Execution Input: '" + str(ex) + "'" + ) + def _get_account_id(self): return ACCOUNT_ID diff --git a/moto/stepfunctions/responses.py b/moto/stepfunctions/responses.py index 689961d5a..7eae8091b 100644 --- a/moto/stepfunctions/responses.py +++ b/moto/stepfunctions/responses.py @@ -33,19 +33,22 @@ class StepFunctionResponse(BaseResponse): @amzn_request_id def list_state_machines(self): - list_all = self.stepfunction_backend.list_state_machines() - list_all = sorted( - [ - { - "creationDate": sm.creation_date, - "name": sm.name, - "stateMachineArn": sm.arn, - } - for sm in list_all - ], - key=lambda x: x["name"], + max_results = self._get_int_param("maxResults") + next_token = self._get_param("nextToken") + results, next_token = self.stepfunction_backend.list_state_machines( + max_results=max_results, next_token=next_token ) - response = {"stateMachines": list_all} + state_machines = [ + { + "creationDate": sm.creation_date, + "name": sm.name, + "stateMachineArn": sm.arn, + } + for sm in results + ] + response = {"stateMachines": state_machines} + if next_token: + response["nextToken"] = next_token return 200, {}, json.dumps(response) @amzn_request_id @@ -80,6 +83,22 @@ class StepFunctionResponse(BaseResponse): except AWSError as err: return err.response() + @amzn_request_id + def update_state_machine(self): + arn = self._get_param("stateMachineArn") + definition = self._get_param("definition") + role_arn = self._get_param("roleArn") + try: + state_machine = self.stepfunction_backend.update_state_machine( + arn=arn, definition=definition, role_arn=role_arn + ) + response = { + "updateDate": state_machine.update_date, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + @amzn_request_id def list_tags_for_resource(self): arn = self._get_param("resourceArn") @@ -91,11 +110,37 @@ class StepFunctionResponse(BaseResponse): response = {"tags": tags} return 200, {}, json.dumps(response) + @amzn_request_id + def tag_resource(self): + arn = self._get_param("resourceArn") + tags = self._get_param("tags", []) + try: + self.stepfunction_backend.tag_resource(arn, tags) + except AWSError as err: + return err.response() + return 200, {}, json.dumps({}) + + @amzn_request_id + def untag_resource(self): + arn = self._get_param("resourceArn") + tag_keys = self._get_param("tagKeys", []) + try: + self.stepfunction_backend.untag_resource(arn, tag_keys) + except AWSError as err: + return err.response() + return 200, {}, json.dumps({}) + @amzn_request_id def start_execution(self): arn = self._get_param("stateMachineArn") name = self._get_param("name") - execution = self.stepfunction_backend.start_execution(arn, name) + execution_input = self._get_param("input", if_none="{}") + try: + execution = self.stepfunction_backend.start_execution( + arn, name, execution_input + ) + except AWSError as err: + return err.response() response = { "executionArn": execution.execution_arn, "startDate": execution.start_date, @@ -104,9 +149,20 @@ class StepFunctionResponse(BaseResponse): @amzn_request_id def list_executions(self): + max_results = self._get_int_param("maxResults") + next_token = self._get_param("nextToken") arn = self._get_param("stateMachineArn") - state_machine = self.stepfunction_backend.describe_state_machine(arn) - executions = self.stepfunction_backend.list_executions(arn) + status_filter = self._get_param("statusFilter") + try: + state_machine = self.stepfunction_backend.describe_state_machine(arn) + results, next_token = self.stepfunction_backend.list_executions( + arn, + status_filter=status_filter, + max_results=max_results, + next_token=next_token, + ) + except AWSError as err: + return err.response() executions = [ { "executionArn": execution.execution_arn, @@ -115,9 +171,12 @@ class StepFunctionResponse(BaseResponse): "stateMachineArn": state_machine.arn, "status": execution.status, } - for execution in executions + for execution in results ] - return 200, {}, json.dumps({"executions": executions}) + response = {"executions": executions} + if next_token: + response["nextToken"] = next_token + return 200, {}, json.dumps(response) @amzn_request_id def describe_execution(self): @@ -126,7 +185,7 @@ class StepFunctionResponse(BaseResponse): execution = self.stepfunction_backend.describe_execution(arn) response = { "executionArn": arn, - "input": "{}", + "input": execution.execution_input, "name": execution.name, "startDate": execution.start_date, "stateMachineArn": execution.state_machine_arn, diff --git a/moto/stepfunctions/utils.py b/moto/stepfunctions/utils.py new file mode 100644 index 000000000..130ffe792 --- /dev/null +++ b/moto/stepfunctions/utils.py @@ -0,0 +1,148 @@ +from functools import wraps + +from botocore.paginate import TokenDecoder, TokenEncoder +from six.moves import reduce + +from .exceptions import InvalidToken + +PAGINATION_MODEL = { + "list_executions": { + "input_token": "next_token", + "limit_key": "max_results", + "limit_default": 100, + "page_ending_range_keys": ["start_date", "execution_arn"], + }, + "list_state_machines": { + "input_token": "next_token", + "limit_key": "max_results", + "limit_default": 100, + "page_ending_range_keys": ["creation_date", "arn"], + }, +} + + +def paginate(original_function=None, pagination_model=None): + def pagination_decorator(func): + @wraps(func) + def pagination_wrapper(*args, **kwargs): + method = func.__name__ + model = pagination_model or PAGINATION_MODEL + pagination_config = model.get(method) + if not pagination_config: + raise ValueError( + "No pagination config for backend method: {}".format(method) + ) + # We pop the pagination arguments, so the remaining kwargs (if any) + # can be used to compute the optional parameters checksum. + input_token = kwargs.pop(pagination_config.get("input_token"), None) + limit = kwargs.pop(pagination_config.get("limit_key"), None) + paginator = Paginator( + max_results=limit, + max_results_default=pagination_config.get("limit_default"), + starting_token=input_token, + page_ending_range_keys=pagination_config.get("page_ending_range_keys"), + param_values_to_check=kwargs, + ) + results = func(*args, **kwargs) + return paginator.paginate(results) + + return pagination_wrapper + + if original_function: + return pagination_decorator(original_function) + + return pagination_decorator + + +class Paginator(object): + def __init__( + self, + max_results=None, + max_results_default=None, + starting_token=None, + page_ending_range_keys=None, + param_values_to_check=None, + ): + self._max_results = max_results if max_results else max_results_default + self._starting_token = starting_token + self._page_ending_range_keys = page_ending_range_keys + self._param_values_to_check = param_values_to_check + self._token_encoder = TokenEncoder() + self._token_decoder = TokenDecoder() + self._param_checksum = self._calculate_parameter_checksum() + self._parsed_token = self._parse_starting_token() + + def _parse_starting_token(self): + if self._starting_token is None: + return None + # The starting token is a dict passed as a base64 encoded string. + next_token = self._starting_token + try: + next_token = self._token_decoder.decode(next_token) + except (ValueError, TypeError): + raise InvalidToken("Invalid token") + if next_token.get("parameterChecksum") != self._param_checksum: + raise InvalidToken( + "Input inconsistent with page token: {}".format(str(next_token)) + ) + return next_token + + def _calculate_parameter_checksum(self): + if not self._param_values_to_check: + return None + return reduce( + lambda x, y: x ^ y, + [hash(item) for item in self._param_values_to_check.items()], + ) + + def _check_predicate(self, item): + page_ending_range_key = self._parsed_token["pageEndingRangeKey"] + predicate_values = page_ending_range_key.split("|") + for (index, attr) in enumerate(self._page_ending_range_keys): + if not getattr(item, attr, None) == predicate_values[index]: + return False + return True + + def _build_next_token(self, next_item): + token_dict = {} + if self._param_checksum: + token_dict["parameterChecksum"] = self._param_checksum + range_keys = [] + for (index, attr) in enumerate(self._page_ending_range_keys): + range_keys.append(getattr(next_item, attr)) + token_dict["pageEndingRangeKey"] = "|".join(range_keys) + return TokenEncoder().encode(token_dict) + + def paginate(self, results): + index_start = 0 + if self._starting_token: + try: + index_start = next( + index + for (index, result) in enumerate(results) + if self._check_predicate(result) + ) + except StopIteration: + raise InvalidToken("Resource not found!") + + index_end = index_start + self._max_results + if index_end > len(results): + index_end = len(results) + + results_page = results[index_start:index_end] + + next_token = None + if results_page and index_end < len(results): + page_ending_result = results[index_end] + next_token = self._build_next_token(page_ending_result) + return results_page, next_token + + +def cfn_to_api_tags(cfn_tags_entry): + api_tags = [{k.lower(): v for k, v in d.items()} for d in cfn_tags_entry] + return api_tags + + +def api_to_cfn_tags(api_tags): + cfn_tags_entry = [{k.capitalize(): v for k, v in d.items()} for d in api_tags] + return cfn_tags_entry diff --git a/moto/sts/models.py b/moto/sts/models.py index 12824b2ed..b274b1acd 100644 --- a/moto/sts/models.py +++ b/moto/sts/models.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +from base64 import b64decode import datetime +import xmltodict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.core import ACCOUNT_ID @@ -79,5 +81,24 @@ class STSBackend(BaseBackend): def assume_role_with_web_identity(self, **kwargs): return self.assume_role(**kwargs) + def assume_role_with_saml(self, **kwargs): + del kwargs["principal_arn"] + saml_assertion_encoded = kwargs.pop("saml_assertion") + saml_assertion_decoded = b64decode(saml_assertion_encoded) + saml_assertion = xmltodict.parse(saml_assertion_decoded.decode("utf-8")) + kwargs["duration"] = int( + saml_assertion["samlp:Response"]["Assertion"]["AttributeStatement"][ + "Attribute" + ][2]["AttributeValue"] + ) + kwargs["role_session_name"] = saml_assertion["samlp:Response"]["Assertion"][ + "AttributeStatement" + ]["Attribute"][0]["AttributeValue"] + kwargs["external_id"] = None + kwargs["policy"] = None + role = AssumedRole(**kwargs) + self.assumed_roles.append(role) + return role + sts_backend = STSBackend() diff --git a/moto/sts/responses.py b/moto/sts/responses.py index f36799b03..9af2c3e12 100644 --- a/moto/sts/responses.py +++ b/moto/sts/responses.py @@ -71,6 +71,19 @@ class TokenResponse(BaseResponse): template = self.response_template(ASSUME_ROLE_WITH_WEB_IDENTITY_RESPONSE) return template.render(role=role) + def assume_role_with_saml(self): + role_arn = self.querystring.get("RoleArn")[0] + principal_arn = self.querystring.get("PrincipalArn")[0] + saml_assertion = self.querystring.get("SAMLAssertion")[0] + + role = sts_backend.assume_role_with_saml( + role_arn=role_arn, + principal_arn=principal_arn, + saml_assertion=saml_assertion, + ) + template = self.response_template(ASSUME_ROLE_WITH_SAML_RESPONSE) + return template.render(role=role) + def get_caller_identity(self): template = self.response_template(GET_CALLER_IDENTITY_RESPONSE) @@ -168,6 +181,30 @@ ASSUME_ROLE_WITH_WEB_IDENTITY_RESPONSE = """""" +ASSUME_ROLE_WITH_SAML_RESPONSE = """ + + https://signin.aws.amazon.com/saml + + {{ role.user_id }} + {{ role.arn }} + + + {{ role.access_key_id }} + {{ role.secret_access_key }} + {{ role.session_token }} + {{ role.expiration_ISO8601 }} + + {{ role.user_id }} + B64EncodedStringOfHashOfIssuerAccountIdAndUserId= + persistent + http://localhost:3000/ + + + c6104cbe-af31-11e0-8154-cbc7ccf896c7 + +""" + + GET_CALLER_IDENTITY_RESPONSE = """ {{ arn }} diff --git a/moto/swf/models/__init__.py b/moto/swf/models/__init__.py index e5b285f5b..010c8c734 100644 --- a/moto/swf/models/__init__.py +++ b/moto/swf/models/__init__.py @@ -121,6 +121,12 @@ class SWFBackend(BaseBackend): raise SWFDomainDeprecatedFault(name) domain.status = "DEPRECATED" + def undeprecate_domain(self, name): + domain = self._get_domain(name) + if domain.status == "REGISTERED": + raise SWFDomainAlreadyExistsFault(name) + domain.status = "REGISTERED" + def describe_domain(self, name): return self._get_domain(name) @@ -148,6 +154,13 @@ class SWFBackend(BaseBackend): raise SWFTypeDeprecatedFault(_type) _type.status = "DEPRECATED" + def undeprecate_type(self, kind, domain_name, name, version): + domain = self._get_domain(domain_name) + _type = domain.get_type(kind, name, version) + if _type.status == "REGISTERED": + raise SWFTypeAlreadyExistsFault(_type) + _type.status = "REGISTERED" + def describe_type(self, kind, domain_name, name, version): domain = self._get_domain(domain_name) return domain.get_type(kind, name, version) diff --git a/moto/swf/models/decision_task.py b/moto/swf/models/decision_task.py index c8c9824a2..aaf810f08 100644 --- a/moto/swf/models/decision_task.py +++ b/moto/swf/models/decision_task.py @@ -15,7 +15,7 @@ class DecisionTask(BaseModel): self.workflow_type = workflow_execution.workflow_type self.task_token = str(uuid.uuid4()) self.scheduled_event_id = scheduled_event_id - self.previous_started_event_id = 0 + self.previous_started_event_id = None self.started_event_id = None self.started_timestamp = None self.start_to_close_timeout = ( @@ -40,18 +40,20 @@ class DecisionTask(BaseModel): hsh = { "events": [evt.to_dict() for evt in events], "taskToken": self.task_token, - "previousStartedEventId": self.previous_started_event_id, "workflowExecution": self.workflow_execution.to_short_dict(), "workflowType": self.workflow_type.to_short_dict(), } + if self.previous_started_event_id is not None: + hsh["previousStartedEventId"] = self.previous_started_event_id if self.started_event_id: hsh["startedEventId"] = self.started_event_id return hsh - def start(self, started_event_id): + def start(self, started_event_id, previous_started_event_id=None): self.state = "STARTED" self.started_timestamp = unix_time() self.started_event_id = started_event_id + self.previous_started_event_id = previous_started_event_id def complete(self): self._check_workflow_execution_open() diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index 4d91b1f6f..035a47558 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -82,6 +82,7 @@ class WorkflowExecution(BaseModel): self._events = [] # child workflows self.child_workflow_executions = [] + self._previous_started_event_id = None def __repr__(self): return "WorkflowExecution(run_id: {0})".format(self.run_id) @@ -127,6 +128,10 @@ class WorkflowExecution(BaseModel): "executionInfo": self.to_medium_dict(), "executionConfiguration": {"taskList": {"name": self.task_list}}, } + # info + if self.execution_status == "CLOSED": + hsh["executionInfo"]["closeStatus"] = self.close_status + hsh["executionInfo"]["closeTimestamp"] = self.close_timestamp # configuration for key in self._configuration_keys: attr = camelcase_to_underscores(key) @@ -291,7 +296,8 @@ class WorkflowExecution(BaseModel): scheduled_event_id=dt.scheduled_event_id, identity=identity, ) - dt.start(evt.event_id) + dt.start(evt.event_id, self._previous_started_event_id) + self._previous_started_event_id = evt.event_id def complete_decision_task( self, task_token, decisions=None, execution_context=None diff --git a/moto/swf/models/workflow_type.py b/moto/swf/models/workflow_type.py index ddb2475b2..137f0e221 100644 --- a/moto/swf/models/workflow_type.py +++ b/moto/swf/models/workflow_type.py @@ -8,6 +8,8 @@ class WorkflowType(GenericType): "defaultChildPolicy", "defaultExecutionStartToCloseTimeout", "defaultTaskStartToCloseTimeout", + "defaultTaskPriority", + "defaultLambdaRole", ] @property diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 98b736cda..17ec7281a 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -92,6 +92,17 @@ class SWFResponse(BaseResponse): self.swf_backend.deprecate_type(kind, domain, name, version) return "" + def _undeprecate_type(self, kind): + domain = self._params["domain"] + _type_args = self._params["{0}Type".format(kind)] + name = _type_args["name"] + version = _type_args["version"] + self._check_string(domain) + self._check_string(name) + self._check_string(version) + self.swf_backend.undeprecate_type(kind, domain, name, version) + return "" + # TODO: implement pagination def list_domains(self): status = self._params["registrationStatus"] @@ -219,6 +230,12 @@ class SWFResponse(BaseResponse): self.swf_backend.deprecate_domain(name) return "" + def undeprecate_domain(self): + name = self._params["name"] + self._check_string(name) + self.swf_backend.undeprecate_domain(name) + return "" + def describe_domain(self): name = self._params["name"] self._check_string(name) @@ -278,6 +295,9 @@ class SWFResponse(BaseResponse): def deprecate_activity_type(self): return self._deprecate_type("activity") + def undeprecate_activity_type(self): + return self._undeprecate_type("activity") + def describe_activity_type(self): return self._describe_type("activity") @@ -300,6 +320,8 @@ class SWFResponse(BaseResponse): default_execution_start_to_close_timeout = self._params.get( "defaultExecutionStartToCloseTimeout" ) + default_task_priority = self._params.get("defaultTaskPriority") + default_lambda_role = self._params.get("defaultLambdaRole") description = self._params.get("description") self._check_string(domain) @@ -309,10 +331,10 @@ class SWFResponse(BaseResponse): self._check_none_or_string(default_child_policy) self._check_none_or_string(default_task_start_to_close_timeout) self._check_none_or_string(default_execution_start_to_close_timeout) + self._check_none_or_string(default_task_priority) + self._check_none_or_string(default_lambda_role) self._check_none_or_string(description) - # TODO: add defaultTaskPriority when boto gets to support it - # TODO: add defaultLambdaRole when boto gets to support it self.swf_backend.register_type( "workflow", domain, @@ -322,6 +344,8 @@ class SWFResponse(BaseResponse): default_child_policy=default_child_policy, default_task_start_to_close_timeout=default_task_start_to_close_timeout, default_execution_start_to_close_timeout=default_execution_start_to_close_timeout, + default_task_priority=default_task_priority, + default_lambda_role=default_lambda_role, description=description, ) return "" @@ -329,6 +353,9 @@ class SWFResponse(BaseResponse): def deprecate_workflow_type(self): return self._deprecate_type("workflow") + def undeprecate_workflow_type(self): + return self._undeprecate_type("workflow") + def describe_workflow_type(self): return self._describe_type("workflow") @@ -419,7 +446,9 @@ class SWFResponse(BaseResponse): if decision: return json.dumps(decision.to_full_dict(reverse_order=reverse_order)) else: - return json.dumps({"previousStartedEventId": 0, "startedEventId": 0}) + return json.dumps( + {"previousStartedEventId": 0, "startedEventId": 0, "taskToken": ""} + ) def count_pending_decision_tasks(self): domain_name = self._params["domain"] @@ -453,7 +482,7 @@ class SWFResponse(BaseResponse): if activity_task: return json.dumps(activity_task.to_full_dict()) else: - return json.dumps({"startedEventId": 0}) + return json.dumps({"startedEventId": 0, "taskToken": ""}) def count_pending_activity_tasks(self): domain_name = self._params["domain"] diff --git a/moto/transcribe/__init__.py b/moto/transcribe/__init__.py new file mode 100644 index 000000000..9c4a7ba2e --- /dev/null +++ b/moto/transcribe/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals + +from .models import transcribe_backends + +transcribe_backend = transcribe_backends["us-east-1"] +mock_transcribe = transcribe_backend.decorator diff --git a/moto/transcribe/exceptions.py b/moto/transcribe/exceptions.py new file mode 100644 index 000000000..d80f1e3e2 --- /dev/null +++ b/moto/transcribe/exceptions.py @@ -0,0 +1,13 @@ +from moto.core.exceptions import JsonRESTError + + +class ConflictException(JsonRESTError): + def __init__(self, message, **kwargs): + super(ConflictException, self).__init__("ConflictException", message, **kwargs) + + +class BadRequestException(JsonRESTError): + def __init__(self, message, **kwargs): + super(BadRequestException, self).__init__( + "BadRequestException", message, **kwargs + ) diff --git a/moto/transcribe/models.py b/moto/transcribe/models.py new file mode 100644 index 000000000..bf8e602e6 --- /dev/null +++ b/moto/transcribe/models.py @@ -0,0 +1,387 @@ +import uuid +from datetime import datetime, timedelta + +from moto.core import BaseBackend, BaseModel +from moto.ec2 import ec2_backends +from moto.sts.models import ACCOUNT_ID +from .exceptions import ConflictException, BadRequestException + + +class BaseObject(BaseModel): + def camelCase(self, key): + words = [] + for i, word in enumerate(key.split("_")): + words.append(word.title()) + return "".join(words) + + def gen_response_object(self): + response_object = dict() + for key, value in self.__dict__.items(): + if "_" in key: + response_object[self.camelCase(key)] = value + else: + response_object[key[0].upper() + key[1:]] = value + return response_object + + @property + def response_object(self): + return self.gen_response_object() + + +class FakeMedicalTranscriptionJob(BaseObject): + def __init__( + self, + region_name, + medical_transcription_job_name, + language_code, + media_sample_rate_hertz, + media_format, + media, + output_bucket_name, + output_encryption_kms_key_id, + settings, + specialty, + type, + ): + self._region_name = region_name + self.medical_transcription_job_name = medical_transcription_job_name + self.transcription_job_status = None + self.language_code = language_code + self.media_sample_rate_hertz = media_sample_rate_hertz + self.media_format = media_format + self.media = media + self.transcript = None + self.start_time = self.completion_time = None + self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + self.failure_reason = None + self.settings = settings or { + "ChannelIdentification": False, + "ShowAlternatives": False, + } + self.specialty = specialty + self.type = type + self._output_bucket_name = output_bucket_name + self._output_encryption_kms_key_id = output_encryption_kms_key_id + self.output_location_type = "CUSTOMER_BUCKET" + + def response_object(self, response_type): + response_field_dict = { + "CREATE": [ + "MedicalTranscriptionJobName", + "TranscriptionJobStatus", + "LanguageCode", + "MediaFormat", + "Media", + "StartTime", + "CreationTime", + "Specialty", + "Type", + ], + "GET": [ + "MedicalTranscriptionJobName", + "TranscriptionJobStatus", + "LanguageCode", + "MediaSampleRateHertz", + "MediaFormat", + "Media", + "Transcript", + "StartTime", + "CreationTime", + "CompletionTime", + "Settings", + "Specialty", + "Type", + ], + "LIST": [ + "MedicalTranscriptionJobName", + "CreationTime", + "StartTime", + "CompletionTime", + "LanguageCode", + "TranscriptionJobStatus", + "FailureReason", + "OutputLocationType", + "Specialty", + "Type", + ], + } + response_fields = response_field_dict[response_type] + response_object = self.gen_response_object() + if response_type != "LIST": + return { + "MedicalTranscriptionJob": { + k: v + for k, v in response_object.items() + if k in response_fields and v is not None and v != [None] + } + } + else: + return { + k: v + for k, v in response_object.items() + if k in response_fields and v is not None and v != [None] + } + + def advance_job_status(self): + # On each call advances the fake job status + + if not self.transcription_job_status: + self.transcription_job_status = "QUEUED" + elif self.transcription_job_status == "QUEUED": + self.transcription_job_status = "IN_PROGRESS" + self.start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + if not self.media_sample_rate_hertz: + self.media_sample_rate_hertz = 44100 + if not self.media_format: + file_ext = self.media["MediaFileUri"].split(".")[-1].lower() + self.media_format = ( + file_ext if file_ext in ["mp3", "mp4", "wav", "flac"] else "mp3" + ) + elif self.transcription_job_status == "IN_PROGRESS": + self.transcription_job_status = "COMPLETED" + self.completion_time = (datetime.now() + timedelta(seconds=10)).strftime( + "%Y-%m-%d %H:%M:%S" + ) + self.transcript = { + "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/medical/{}.json".format( + self._region_name, + self._output_bucket_name, + self.medical_transcription_job_name, + ) + } + + +class FakeMedicalVocabulary(BaseObject): + def __init__( + self, region_name, vocabulary_name, language_code, vocabulary_file_uri, + ): + self._region_name = region_name + self.vocabulary_name = vocabulary_name + self.language_code = language_code + self.vocabulary_file_uri = vocabulary_file_uri + self.vocabulary_state = None + self.last_modified_time = None + self.failure_reason = None + self.download_uri = "https://s3.us-east-1.amazonaws.com/aws-transcribe-dictionary-model-{}-prod/{}/medical/{}/{}/input.txt".format( + region_name, ACCOUNT_ID, self.vocabulary_name, uuid.uuid4() + ) + + def response_object(self, response_type): + response_field_dict = { + "CREATE": [ + "VocabularyName", + "LanguageCode", + "VocabularyState", + "LastModifiedTime", + "FailureReason", + ], + "GET": [ + "VocabularyName", + "LanguageCode", + "VocabularyState", + "LastModifiedTime", + "FailureReason", + "DownloadUri", + ], + "LIST": [ + "VocabularyName", + "LanguageCode", + "LastModifiedTime", + "VocabularyState", + ], + } + response_fields = response_field_dict[response_type] + response_object = self.gen_response_object() + return { + k: v + for k, v in response_object.items() + if k in response_fields and v is not None and v != [None] + } + + def advance_job_status(self): + # On each call advances the fake job status + + if not self.vocabulary_state: + self.vocabulary_state = "PENDING" + self.last_modified_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + elif self.vocabulary_state == "PENDING": + self.vocabulary_state = "READY" + self.last_modified_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + +class TranscribeBackend(BaseBackend): + def __init__(self, region_name=None): + self.medical_transcriptions = {} + self.medical_vocabularies = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def start_medical_transcription_job(self, **kwargs): + + name = kwargs.get("medical_transcription_job_name") + + if name in self.medical_transcriptions: + raise ConflictException( + message="The requested job name already exists. Use a different job name." + ) + + settings = kwargs.get("settings") + vocabulary_name = settings.get("VocabularyName") if settings else None + if vocabulary_name and vocabulary_name not in self.medical_vocabularies: + raise BadRequestException( + message="The requested vocabulary couldn't be found. Check the vocabulary name and try your request again." + ) + + transcription_job_object = FakeMedicalTranscriptionJob( + region_name=self.region_name, + medical_transcription_job_name=name, + language_code=kwargs.get("language_code"), + media_sample_rate_hertz=kwargs.get("media_sample_rate_hertz"), + media_format=kwargs.get("media_format"), + media=kwargs.get("media"), + output_bucket_name=kwargs.get("output_bucket_name"), + output_encryption_kms_key_id=kwargs.get("output_encryption_kms_key_id"), + settings=settings, + specialty=kwargs.get("specialty"), + type=kwargs.get("type"), + ) + + self.medical_transcriptions[name] = transcription_job_object + + return transcription_job_object.response_object("CREATE") + + def get_medical_transcription_job(self, medical_transcription_job_name): + try: + job = self.medical_transcriptions[medical_transcription_job_name] + job.advance_job_status() # Fakes advancement through statuses. + return job.response_object("GET") + except KeyError: + raise BadRequestException( + message="The requested job couldn't be found. Check the job name and try your request again." + ) + + def delete_medical_transcription_job(self, medical_transcription_job_name): + try: + del self.medical_transcriptions[medical_transcription_job_name] + except KeyError: + raise BadRequestException( + message="The requested job couldn't be found. Check the job name and try your request again.", + ) + + def list_medical_transcription_jobs( + self, status, job_name_contains, next_token, max_results + ): + jobs = list(self.medical_transcriptions.values()) + + if status: + jobs = [job for job in jobs if job.transcription_job_status == status] + + if job_name_contains: + jobs = [ + job + for job in jobs + if job_name_contains in job.medical_transcription_job_name + ] + + start_offset = int(next_token) if next_token else 0 + end_offset = start_offset + ( + max_results if max_results else 100 + ) # Arbitrarily selected... + jobs_paginated = jobs[start_offset:end_offset] + + response = { + "MedicalTranscriptionJobSummaries": [ + job.response_object("LIST") for job in jobs_paginated + ] + } + if end_offset < len(jobs): + response["NextToken"] = str(end_offset) + if status: + response["Status"] = status + return response + + def create_medical_vocabulary(self, **kwargs): + + vocabulary_name = kwargs.get("vocabulary_name") + language_code = kwargs.get("language_code") + vocabulary_file_uri = kwargs.get("vocabulary_file_uri") + + if vocabulary_name in self.medical_vocabularies: + raise ConflictException( + message="The requested vocabulary name already exists. Use a different vocabulary name." + ) + + medical_vocabulary_object = FakeMedicalVocabulary( + region_name=self.region_name, + vocabulary_name=vocabulary_name, + language_code=language_code, + vocabulary_file_uri=vocabulary_file_uri, + ) + + self.medical_vocabularies[vocabulary_name] = medical_vocabulary_object + + return medical_vocabulary_object.response_object("CREATE") + + def get_medical_vocabulary(self, vocabulary_name): + try: + job = self.medical_vocabularies[vocabulary_name] + job.advance_job_status() # Fakes advancement through statuses. + return job.response_object("GET") + except KeyError: + raise BadRequestException( + message="The requested vocabulary couldn't be found. Check the vocabulary name and try your request again." + ) + + def delete_medical_vocabulary(self, vocabulary_name): + try: + del self.medical_vocabularies[vocabulary_name] + except KeyError: + raise BadRequestException( + message="The requested vocabulary couldn't be found. Check the vocabulary name and try your request again." + ) + + def list_medical_vocabularies( + self, state_equals, name_contains, next_token, max_results + ): + vocabularies = list(self.medical_vocabularies.values()) + + if state_equals: + vocabularies = [ + vocabulary + for vocabulary in vocabularies + if vocabulary.vocabulary_state == state_equals + ] + + if name_contains: + vocabularies = [ + vocabulary + for vocabulary in vocabularies + if name_contains in vocabulary.vocabulary_name + ] + + start_offset = int(next_token) if next_token else 0 + end_offset = start_offset + ( + max_results if max_results else 100 + ) # Arbitrarily selected... + vocabularies_paginated = vocabularies[start_offset:end_offset] + + response = { + "Vocabularies": [ + vocabulary.response_object("LIST") + for vocabulary in vocabularies_paginated + ] + } + if end_offset < len(vocabularies): + response["NextToken"] = str(end_offset) + if state_equals: + response["Status"] = state_equals + return response + + +transcribe_backends = {} +for region, ec2_backend in ec2_backends.items(): + transcribe_backends[region] = TranscribeBackend(region_name=region) diff --git a/moto/transcribe/responses.py b/moto/transcribe/responses.py new file mode 100644 index 000000000..54d718b3c --- /dev/null +++ b/moto/transcribe/responses.py @@ -0,0 +1,111 @@ +from __future__ import unicode_literals + +import json + +from moto.core.responses import BaseResponse +from moto.core.utils import amzn_request_id +from .models import transcribe_backends + + +class TranscribeResponse(BaseResponse): + @property + def transcribe_backend(self): + return transcribe_backends[self.region] + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + @amzn_request_id + def start_medical_transcription_job(self): + name = self._get_param("MedicalTranscriptionJobName") + response = self.transcribe_backend.start_medical_transcription_job( + medical_transcription_job_name=name, + language_code=self._get_param("LanguageCode"), + media_sample_rate_hertz=self._get_param("MediaSampleRateHertz"), + media_format=self._get_param("MediaFormat"), + media=self._get_param("Media"), + output_bucket_name=self._get_param("OutputBucketName"), + output_encryption_kms_key_id=self._get_param("OutputEncryptionKMSKeyId"), + settings=self._get_param("Settings"), + specialty=self._get_param("Specialty"), + type=self._get_param("Type"), + ) + return json.dumps(response) + + @amzn_request_id + def list_medical_transcription_jobs(self): + status = self._get_param("Status") + job_name_contains = self._get_param("JobNameContains") + next_token = self._get_param("NextToken") + max_results = self._get_param("MaxResults") + + response = self.transcribe_backend.list_medical_transcription_jobs( + status=status, + job_name_contains=job_name_contains, + next_token=next_token, + max_results=max_results, + ) + return json.dumps(response) + + @amzn_request_id + def get_medical_transcription_job(self): + medical_transcription_job_name = self._get_param("MedicalTranscriptionJobName") + response = self.transcribe_backend.get_medical_transcription_job( + medical_transcription_job_name=medical_transcription_job_name + ) + return json.dumps(response) + + @amzn_request_id + def delete_medical_transcription_job(self): + medical_transcription_job_name = self._get_param("MedicalTranscriptionJobName") + response = self.transcribe_backend.delete_medical_transcription_job( + medical_transcription_job_name=medical_transcription_job_name + ) + return json.dumps(response) + + @amzn_request_id + def create_medical_vocabulary(self): + vocabulary_name = self._get_param("VocabularyName") + language_code = self._get_param("LanguageCode") + vocabulary_file_uri = self._get_param("VocabularyFileUri") + response = self.transcribe_backend.create_medical_vocabulary( + vocabulary_name=vocabulary_name, + language_code=language_code, + vocabulary_file_uri=vocabulary_file_uri, + ) + return json.dumps(response) + + @amzn_request_id + def get_medical_vocabulary(self): + vocabulary_name = self._get_param("VocabularyName") + response = self.transcribe_backend.get_medical_vocabulary( + vocabulary_name=vocabulary_name + ) + return json.dumps(response) + + @amzn_request_id + def list_medical_vocabularies(self): + state_equals = self._get_param("StateEquals") + name_contains = self._get_param("NameContains") + next_token = self._get_param("NextToken") + max_results = self._get_param("MaxResults") + + response = self.transcribe_backend.list_medical_vocabularies( + state_equals=state_equals, + name_contains=name_contains, + next_token=next_token, + max_results=max_results, + ) + return json.dumps(response) + + @amzn_request_id + def delete_medical_vocabulary(self): + vocabulary_name = self._get_param("VocabularyName") + response = self.transcribe_backend.delete_medical_vocabulary( + vocabulary_name=vocabulary_name + ) + return json.dumps(response) diff --git a/moto/transcribe/urls.py b/moto/transcribe/urls.py new file mode 100644 index 000000000..175f6fea9 --- /dev/null +++ b/moto/transcribe/urls.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals + +from .responses import TranscribeResponse + +url_bases = ["https?://transcribe.(.+).amazonaws.com"] + +url_paths = {"{0}/$": TranscribeResponse.dispatch} diff --git a/moto/utilities/__init__.py b/moto/utilities/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/utilities/docker_utilities.py b/moto/utilities/docker_utilities.py new file mode 100644 index 000000000..576a9df1d --- /dev/null +++ b/moto/utilities/docker_utilities.py @@ -0,0 +1,33 @@ +import docker +import functools +import requests.adapters + + +_orig_adapter_send = requests.adapters.HTTPAdapter.send + + +class DockerModel: + def __init__(self): + self.__docker_client = None + + @property + def docker_client(self): + if self.__docker_client is None: + # We should only initiate the Docker Client at runtime. + # The docker.from_env() call will fall if Docker is not running + self.__docker_client = docker.from_env() + + # Unfortunately mocking replaces this method w/o fallback enabled, so we + # need to replace it if we detect it's been mocked + if requests.adapters.HTTPAdapter.send != _orig_adapter_send: + _orig_get_adapter = self.docker_client.api.get_adapter + + def replace_adapter_send(*args, **kwargs): + adapter = _orig_get_adapter(*args, **kwargs) + + if isinstance(adapter, requests.adapters.HTTPAdapter): + adapter.send = functools.partial(_orig_adapter_send, adapter) + return adapter + + self.docker_client.api.get_adapter = replace_adapter_send + return self.__docker_client diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py new file mode 100644 index 000000000..2d6ac99c9 --- /dev/null +++ b/moto/utilities/tagging_service.py @@ -0,0 +1,76 @@ +class TaggingService: + def __init__(self, tagName="Tags", keyName="Key", valueName="Value"): + self.tagName = tagName + self.keyName = keyName + self.valueName = valueName + self.tags = {} + + def get_tag_dict_for_resource(self, arn): + result = {} + if self.has_tags(arn): + for k, v in self.tags[arn].items(): + result[k] = v + return result + + def list_tags_for_resource(self, arn): + result = [] + if self.has_tags(arn): + for k, v in self.tags[arn].items(): + result.append({self.keyName: k, self.valueName: v}) + return {self.tagName: result} + + def delete_all_tags_for_resource(self, arn): + if self.has_tags(arn): + del self.tags[arn] + + def has_tags(self, arn): + return arn in self.tags + + def tag_resource(self, arn, tags): + if arn not in self.tags: + self.tags[arn] = {} + for t in tags: + if self.valueName in t: + self.tags[arn][t[self.keyName]] = t[self.valueName] + else: + self.tags[arn][t[self.keyName]] = None + + def copy_tags(self, from_arn, to_arn): + if self.has_tags(from_arn): + self.tag_resource( + to_arn, self.list_tags_for_resource(from_arn)[self.tagName] + ) + + def untag_resource_using_names(self, arn, tag_names): + for name in tag_names: + if name in self.tags.get(arn, {}): + del self.tags[arn][name] + + def untag_resource_using_tags(self, arn, tags): + m = self.tags.get(arn, {}) + for t in tags: + if self.keyName in t: + if t[self.keyName] in m: + if self.valueName in t: + if m[t[self.keyName]] != t[self.valueName]: + continue + # If both key and value are provided, match both before deletion + del m[t[self.keyName]] + + def extract_tag_names(self, tags): + results = [] + if len(tags) == 0: + return results + for tag in tags: + if self.keyName in tag: + results.append(tag[self.keyName]) + return results + + def flatten_tag_list(self, tags): + result = {} + for t in tags: + if self.valueName in t: + result[t[self.keyName]] = t[self.valueName] + else: + result[t[self.keyName]] = None + return result diff --git a/moto/utilities/utils.py b/moto/utilities/utils.py new file mode 100644 index 000000000..6bd5e8b86 --- /dev/null +++ b/moto/utilities/utils.py @@ -0,0 +1,10 @@ +import random +import string + + +def random_string(length=None): + n = length or 20 + random_str = "".join( + [random.choice(string.ascii_letters + string.digits) for i in range(n)] + ) + return random_str diff --git a/moto/xray/exceptions.py b/moto/xray/exceptions.py index 8b5c87e36..2449cb45d 100644 --- a/moto/xray/exceptions.py +++ b/moto/xray/exceptions.py @@ -1,26 +1,3 @@ -import json - - -class AWSError(Exception): - CODE = None - STATUS = 400 - - def __init__(self, message, code=None, status=None): - self.message = message - self.code = code if code is not None else self.CODE - self.status = status if status is not None else self.STATUS - - def response(self): - return ( - json.dumps({"__type": self.code, "message": self.message}), - dict(status=self.status), - ) - - -class InvalidRequestException(AWSError): - CODE = "InvalidRequestException" - - class BadSegmentException(Exception): def __init__(self, seg_id=None, code=None, message=None): self.id = seg_id diff --git a/moto/xray/models.py b/moto/xray/models.py index 33a271f9b..6352fa37c 100644 --- a/moto/xray/models.py +++ b/moto/xray/models.py @@ -1,12 +1,13 @@ from __future__ import unicode_literals import bisect +from boto3 import Session import datetime from collections import defaultdict import json from moto.core import BaseBackend, BaseModel -from moto.ec2 import ec2_backends -from .exceptions import BadSegmentException, AWSError +from moto.core.exceptions import AWSError +from .exceptions import BadSegmentException class TelemetryRecords(BaseModel): @@ -287,5 +288,9 @@ class XRayBackend(BaseBackend): xray_backends = {} -for region, ec2_backend in ec2_backends.items(): +for region in Session().get_available_regions("xray"): + xray_backends[region] = XRayBackend() +for region in Session().get_available_regions("xray", partition_name="aws-us-gov"): + xray_backends[region] = XRayBackend() +for region in Session().get_available_regions("xray", partition_name="aws-cn"): xray_backends[region] = XRayBackend() diff --git a/moto/xray/responses.py b/moto/xray/responses.py index 118f2de2f..aaf56c80a 100644 --- a/moto/xray/responses.py +++ b/moto/xray/responses.py @@ -3,10 +3,11 @@ import json import datetime from moto.core.responses import BaseResponse +from moto.core.exceptions import AWSError from six.moves.urllib.parse import urlsplit from .models import xray_backends -from .exceptions import AWSError, BadSegmentException +from .exceptions import BadSegmentException class XRayResponse(BaseResponse): diff --git a/requirements-dev.txt b/requirements-dev.txt index c5f055a26..692a1cbf3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,20 +1,36 @@ -r requirements.txt -mock -nose -black; python_version >= '3.6' +-r requirements-tests.txt + +black==19.10b0; python_version >= '3.6' regex==2019.11.1; python_version >= '3.6' # Needed for black -sure==1.4.11 coverage==4.5.4 flake8==3.7.8 -freezegun flask +flask-cors boto>=2.45.0 boto3>=1.4.4 -botocore>=1.12.13 +botocore>=1.18.17 six>=1.9 -parameterized>=0.7.0 -prompt-toolkit==1.0.14 +prompt-toolkit==2.0.10 # 3.x is not available with python2 click==6.7 inflection==0.3.1 lxml==4.2.3 beautifulsoup4==4.6.0 + +# +# The below pins mirror the Python version-conditional pins in setup.py +# +Jinja2>=2.10.1; python_version >= '3.6' +mock; python_version >= '3.6' +more-itertools; python_version >= '3.6' +setuptools; python_version >= '3.6' +sshpubkeys>=3.1.0; python_version >= '3.6' +zipp; python_version >= '3.6' + +configparser<5.0; python_version == '2.7' +Jinja2<3.0.0,>=2.10.1; python_version == '2.7' +mock<=3.0.5; python_version == '2.7' +more-itertools==5.0.0; python_version == '2.7' +setuptools==44.0.0; python_version == '2.7' +sshpubkeys>=3.1.0,<4.0; python_version == '2.7' +zipp==0.6.0; python_version == '2.7' diff --git a/requirements-tests.txt b/requirements-tests.txt new file mode 100644 index 000000000..847ce539e --- /dev/null +++ b/requirements-tests.txt @@ -0,0 +1,4 @@ +pytest +pytest-cov +sure==1.4.11 +freezegun diff --git a/requirements.txt b/requirements.txt index 4de489f8c..f5a476248 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ # Please add requirements to setup.py --e . +-e .[all] diff --git a/scripts/get_amis.py b/scripts/get_amis.py index 687dab2d4..b694340bd 100644 --- a/scripts/get_amis.py +++ b/scripts/get_amis.py @@ -3,35 +3,64 @@ import json # Taken from free tier list when creating an instance instances = [ - 'ami-760aaa0f', 'ami-bb9a6bc2', 'ami-35e92e4c', 'ami-785db401', 'ami-b7e93bce', 'ami-dca37ea5', 'ami-999844e0', - 'ami-9b32e8e2', 'ami-f8e54081', 'ami-bceb39c5', 'ami-03cf127a', 'ami-1ecc1e67', 'ami-c2ff2dbb', 'ami-12c6146b', - 'ami-d1cb19a8', 'ami-61db0918', 'ami-56ec3e2f', 'ami-84ee3cfd', 'ami-86ee3cff', 'ami-f0e83a89', 'ami-1f12c066', - 'ami-afee3cd6', 'ami-1812c061', 'ami-77ed3f0e', 'ami-3bf32142', 'ami-6ef02217', 'ami-f4cf1d8d', 'ami-3df32144', - 'ami-c6f321bf', 'ami-24f3215d', 'ami-fa7cdd89', 'ami-1e749f67', 'ami-a9cc1ed0', 'ami-8104a4f8' + "ami-760aaa0f", + "ami-bb9a6bc2", + "ami-35e92e4c", + "ami-785db401", + "ami-b7e93bce", + "ami-dca37ea5", + "ami-999844e0", + "ami-9b32e8e2", + "ami-f8e54081", + "ami-bceb39c5", + "ami-03cf127a", + "ami-1ecc1e67", + "ami-c2ff2dbb", + "ami-12c6146b", + "ami-d1cb19a8", + "ami-61db0918", + "ami-56ec3e2f", + "ami-84ee3cfd", + "ami-86ee3cff", + "ami-f0e83a89", + "ami-1f12c066", + "ami-afee3cd6", + "ami-1812c061", + "ami-77ed3f0e", + "ami-3bf32142", + "ami-6ef02217", + "ami-f4cf1d8d", + "ami-3df32144", + "ami-c6f321bf", + "ami-24f3215d", + "ami-fa7cdd89", + "ami-1e749f67", + "ami-a9cc1ed0", + "ami-8104a4f8", ] -client = boto3.client('ec2', region_name='eu-west-1') +client = boto3.client("ec2", region_name="eu-west-1") test = client.describe_images(ImageIds=instances) result = [] -for image in test['Images']: +for image in test["Images"]: try: tmp = { - 'ami_id': image['ImageId'], - 'name': image['Name'], - 'description': image['Description'], - 'owner_id': image['OwnerId'], - 'public': image['Public'], - 'virtualization_type': image['VirtualizationType'], - 'architecture': image['Architecture'], - 'state': image['State'], - 'platform': image.get('Platform'), - 'image_type': image['ImageType'], - 'hypervisor': image['Hypervisor'], - 'root_device_name': image['RootDeviceName'], - 'root_device_type': image['RootDeviceType'], - 'sriov': image.get('SriovNetSupport', 'simple') + "ami_id": image["ImageId"], + "name": image["Name"], + "description": image["Description"], + "owner_id": image["OwnerId"], + "public": image["Public"], + "virtualization_type": image["VirtualizationType"], + "architecture": image["Architecture"], + "state": image["State"], + "platform": image.get("Platform"), + "image_type": image["ImageType"], + "hypervisor": image["Hypervisor"], + "root_device_name": image["RootDeviceName"], + "root_device_type": image["RootDeviceType"], + "sriov": image.get("SriovNetSupport", "simple"), } result.append(tmp) except Exception as err: diff --git a/scripts/get_instance_info.py b/scripts/get_instance_info.py index f883c0cae..7aea257f8 100755 --- a/scripts/get_instance_info.py +++ b/scripts/get_instance_info.py @@ -1,4 +1,5 @@ #!/usr/bin/env python + import json import os import subprocess @@ -11,128 +12,142 @@ class Instance(object): self.instance = instance def _get_td(self, td): - return self.instance.find('td', attrs={'class': td}) + return self.instance.find("td", attrs={"class": td}) def _get_sort(self, td): - return float(self.instance.find('td', attrs={'class': td}).find('span')['sort']) + return float(self.instance.find("td", attrs={"class": td}).find("span")["sort"]) @property def name(self): - return self._get_td('name').text.strip() + return self._get_td("name").text.strip() @property def apiname(self): - return self._get_td('apiname').text.strip() + return self._get_td("apiname").text.strip() @property def memory(self): - return self._get_sort('memory') + return self._get_sort("memory") @property def computeunits(self): - return self._get_sort('computeunits') + return self._get_sort("computeunits") @property def vcpus(self): - return self._get_sort('vcpus') + return self._get_sort("vcpus") @property def gpus(self): - return int(self._get_td('gpus').text.strip()) + return int(self._get_td("gpus").text.strip()) @property def fpga(self): - return int(self._get_td('fpga').text.strip()) + return int(self._get_td("fpga").text.strip()) @property def ecu_per_vcpu(self): - return self._get_sort('ecu-per-vcpu') + return self._get_sort("ecu-per-vcpu") @property def physical_processor(self): - return self._get_td('physical_processor').text.strip() + return self._get_td("physical_processor").text.strip() @property def clock_speed_ghz(self): - return self._get_td('clock_speed_ghz').text.strip() + return self._get_td("clock_speed_ghz").text.strip() @property def intel_avx(self): - return self._get_td('intel_avx').text.strip() + return self._get_td("intel_avx").text.strip() @property def intel_avx2(self): - return self._get_td('intel_avx2').text.strip() + return self._get_td("intel_avx2").text.strip() @property def intel_turbo(self): - return self._get_td('intel_turbo').text.strip() + return self._get_td("intel_turbo").text.strip() @property def storage(self): - return self._get_sort('storage') + return self._get_sort("storage") @property def architecture(self): - return self._get_td('architecture').text.strip() + return self._get_td("architecture").text.strip() @property def network_perf(self): # 2 == low - return self._get_sort('networkperf') + return self._get_sort("networkperf") @property def ebs_max_bandwidth(self): - return self._get_sort('ebs-max-bandwidth') + return self._get_sort("ebs-max-bandwidth") @property def ebs_throughput(self): - return self._get_sort('ebs-throughput') + return self._get_sort("ebs-throughput") @property def ebs_iops(self): - return self._get_sort('ebs-iops') + return self._get_sort("ebs-iops") @property def max_ips(self): - return int(self._get_td('maxips').text.strip()) + return int(self._get_td("maxips").text.strip()) @property def enhanced_networking(self): - return self._get_td('enhanced-networking').text.strip() != 'No' + return self._get_td("enhanced-networking").text.strip() != "No" @property def vpc_only(self): - return self._get_td('vpc-only').text.strip() != 'No' + return self._get_td("vpc-only").text.strip() != "No" @property def ipv6_support(self): - return self._get_td('ipv6-support').text.strip() != 'No' + return self._get_td("ipv6-support").text.strip() != "No" @property def placement_group_support(self): - return self._get_td('placement-group-support').text.strip() != 'No' + return self._get_td("placement-group-support").text.strip() != "No" @property def linux_virtualization(self): - return self._get_td('linux-virtualization').text.strip() + return self._get_td("linux-virtualization").text.strip() def to_dict(self): result = {} - for attr in [x for x in self.__class__.__dict__.keys() if not x.startswith('_') and x != 'to_dict']: - result[attr] = getattr(self, attr) + for attr in [ + x + for x in self.__class__.__dict__.keys() + if not x.startswith("_") and x != "to_dict" + ]: + try: + result[attr] = getattr(self, attr) + except ValueError as ex: + if "'N/A'" in str(ex): + print( + "Skipping attribute '{0}' for instance type '{1}' (not found)".format( + attr, self.name + ) + ) + else: + raise return self.apiname, result def main(): print("Getting HTML from http://www.ec2instances.info") - page_request = requests.get('http://www.ec2instances.info') - soup = BeautifulSoup(page_request.text, 'html.parser') - data_table = soup.find(id='data') + page_request = requests.get("http://www.ec2instances.info") + soup = BeautifulSoup(page_request.text, "html.parser") + data_table = soup.find(id="data") print("Finding data in table") - instances = data_table.find('tbody').find_all('tr') + instances = data_table.find("tbody").find_all("tr") print("Parsing data") result = {} @@ -140,11 +155,16 @@ def main(): instance_id, instance_data = Instance(instance).to_dict() result[instance_id] = instance_data - root_dir = subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).decode().strip() - dest = os.path.join(root_dir, 'moto/ec2/resources/instance_types.json') + root_dir = ( + subprocess.check_output(["git", "rev-parse", "--show-toplevel"]) + .decode() + .strip() + ) + dest = os.path.join(root_dir, "moto/ec2/resources/instance_types.json") print("Writing data to {0}".format(dest)) - with open(dest, 'w') as open_file: - json.dump(result, open_file) + with open(dest, "w") as open_file: + json.dump(result, open_file, sort_keys=True) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/scripts/implementation_coverage.py b/scripts/implementation_coverage.py index 4552ec18e..23def7700 100755 --- a/scripts/implementation_coverage.py +++ b/scripts/implementation_coverage.py @@ -7,18 +7,24 @@ import boto3 script_dir = os.path.dirname(os.path.abspath(__file__)) -alternative_service_names = {'lambda': 'awslambda'} +alternative_service_names = {"lambda": "awslambda", "dynamodb": "dynamodb2"} def get_moto_implementation(service_name): - service_name = service_name.replace("-", "") if "-" in service_name else service_name - alt_service_name = alternative_service_names[service_name] if service_name in alternative_service_names else service_name - if not hasattr(moto, alt_service_name): - return None - module = getattr(moto, alt_service_name) - if module is None: - return None - mock = getattr(module, "mock_{}".format(service_name)) + service_name = ( + service_name.replace("-", "") if "-" in service_name else service_name + ) + alt_service_name = ( + alternative_service_names[service_name] + if service_name in alternative_service_names + else service_name + ) + if hasattr(moto, "mock_{}".format(alt_service_name)): + mock = getattr(moto, "mock_{}".format(alt_service_name)) + elif hasattr(moto, "mock_{}".format(service_name)): + mock = getattr(moto, "mock_{}".format(service_name)) + else: + mock = None if mock is None: return None backends = list(mock().backends.values()) @@ -31,11 +37,13 @@ def calculate_implementation_coverage(): coverage = {} for service_name in service_names: moto_client = get_moto_implementation(service_name) - real_client = boto3.client(service_name, region_name='us-east-1') + real_client = boto3.client(service_name, region_name="us-east-1") implemented = [] not_implemented = [] - operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names] + operation_names = [ + xform_name(op) for op in real_client.meta.service_model.operation_names + ] for op in operation_names: if moto_client and op in dir(moto_client): implemented.append(op) @@ -43,20 +51,22 @@ def calculate_implementation_coverage(): not_implemented.append(op) coverage[service_name] = { - 'implemented': implemented, - 'not_implemented': not_implemented, + "implemented": implemented, + "not_implemented": not_implemented, } return coverage def print_implementation_coverage(coverage): for service_name in sorted(coverage): - implemented = coverage.get(service_name)['implemented'] - not_implemented = coverage.get(service_name)['not_implemented'] + implemented = coverage.get(service_name)["implemented"] + not_implemented = coverage.get(service_name)["not_implemented"] operations = sorted(implemented + not_implemented) if implemented and not_implemented: - percentage_implemented = int(100.0 * len(implemented) / (len(implemented) + len(not_implemented))) + percentage_implemented = int( + 100.0 * len(implemented) / (len(implemented) + len(not_implemented)) + ) elif implemented: percentage_implemented = 100 else: @@ -84,12 +94,14 @@ def write_implementation_coverage_to_file(coverage): print("Writing to {}".format(implementation_coverage_file)) with open(implementation_coverage_file, "w+") as file: for service_name in sorted(coverage): - implemented = coverage.get(service_name)['implemented'] - not_implemented = coverage.get(service_name)['not_implemented'] + implemented = coverage.get(service_name)["implemented"] + not_implemented = coverage.get(service_name)["not_implemented"] operations = sorted(implemented + not_implemented) if implemented and not_implemented: - percentage_implemented = int(100.0 * len(implemented) / (len(implemented) + len(not_implemented))) + percentage_implemented = int( + 100.0 * len(implemented) / (len(implemented) + len(not_implemented)) + ) elif implemented: percentage_implemented = 100 else: @@ -97,15 +109,19 @@ def write_implementation_coverage_to_file(coverage): file.write("\n") file.write("## {}\n".format(service_name)) - file.write("{}% implemented\n".format(percentage_implemented)) + file.write("
\n") + file.write( + "{}% implemented\n\n".format(percentage_implemented) + ) for op in operations: if op in implemented: file.write("- [X] {}\n".format(op)) else: file.write("- [ ] {}\n".format(op)) + file.write("
\n") -if __name__ == '__main__': +if __name__ == "__main__": cov = calculate_implementation_coverage() write_implementation_coverage_to_file(cov) print_implementation_coverage(cov) diff --git a/scripts/int_test.sh b/scripts/int_test.sh new file mode 100755 index 000000000..f57bb157f --- /dev/null +++ b/scripts/int_test.sh @@ -0,0 +1,87 @@ +# +# Dependency Integration Test script +# + +# Runs a test to verify whether each service has the correct dependencies listed in setup.py +# +# ::Algorithm:: +# For each valid service: +# - Create a virtual environment +# - Install only the necessary dependencies +# - Run the tests for that service +# - If the tests fail: +# - This service is probably missing a dependency +# - A log file with the test results will be created (test_results_service.log) +# - Delete the virtual environment +# +# Note: +# Only tested on Linux +# Parallelized to test 4 services at the time. +# Could take some time to run - around 20 minutes on the author's machine + + +overwrite() { echo -e "\r\033[1A\033[0K$@"; } + +contains() { + [[ $1 =~ (^|[[:space:]])$2($|[[:space:]]) ]] && return 0 || return 1 +} + +valid_service() { + # Verify whether this is a valid service + # We'll ignore metadata folders, and folders that test generic Moto behaviour + # We'll also ignore CloudFormation, as it will always depend on other services + local ignore_moto_folders="core instance_metadata __pycache__ templates cloudformation" + if echo $ignore_moto_folders | grep -q "$1"; then + return 1 + else + return 0 + fi +} + +test_service() { + service=$1 + path_to_test_file=$2 + venv_path="test_venv_${service}" + overwrite "Running tests for ${service}.." + virtualenv ${venv_path} -p `which python3` > /dev/null + source ${venv_path}/bin/activate > /dev/null + # Can't just install requirements-file, as it points to all dependencies + pip install -r requirements-tests.txt > /dev/null + pip install .[$service] > /dev/null 2>&1 + # Restart venv - ensure these deps are loaded + deactivate + source ${venv_path}/bin/activate > /dev/null + # Run tests for this service + test_result_filename="test_results_${service}.log" + touch $test_result_filename + nosetests -qxs --ignore-files="test_server\.py" --ignore-files="test_${service}_cloudformation\.py" --ignore-files="test_integration\.py" $path_to_test_file >$test_result_filename 2>&1 + RESULT=$? + if [[ $RESULT != 0 ]]; then + echo -e "Tests for ${service} have failed!\n" + else + rm $test_result_filename + fi + deactivate + rm -rf ${venv_path} +} + +echo "Running Dependency tests..." +ITER=0 +for file in moto/* +do + if [[ -d $file ]]; then + service=${file:5} + path_to_test_file="tests/test_${service}" + if valid_service $service && [[ -d $path_to_test_file ]]; then + test_service $service $path_to_test_file & + elif valid_service $service; then + echo -e "No tests for ${service} can be found on ${path_to_test_file}!\n" + fi + if (( $ITER % 4 == 0 )); then + # Ensure we're only processing 4 services at the time + wait + fi + fi + ITER=$(expr $ITER + 1) +done +wait diff --git a/scripts/scaffold.py b/scripts/scaffold.py index 43a648b48..9255ac008 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -17,9 +17,7 @@ from lxml import etree import click import jinja2 -from prompt_toolkit import ( - prompt -) +from prompt_toolkit import prompt from prompt_toolkit.completion import WordCompleter from prompt_toolkit.shortcuts import print_formatted_text @@ -29,35 +27,35 @@ import boto3 from moto.core.responses import BaseResponse from moto.core import BaseBackend -from implementation_coverage import ( - get_moto_implementation -) +from implementation_coverage import get_moto_implementation from inflection import singularize -TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), './template') +TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), "./template") -INPUT_IGNORED_IN_BACKEND = ['Marker', 'PageSize'] -OUTPUT_IGNORED_IN_BACKEND = ['NextMarker'] +INPUT_IGNORED_IN_BACKEND = ["Marker", "PageSize"] +OUTPUT_IGNORED_IN_BACKEND = ["NextMarker"] def print_progress(title, body, color): - click.secho(u'\t{}\t'.format(title), fg=color, nl=False) + click.secho(u"\t{}\t".format(title), fg=color, nl=False) click.echo(body) def select_service_and_operation(): service_names = Session().get_available_services() service_completer = WordCompleter(service_names) - service_name = prompt(u'Select service: ', completer=service_completer) + service_name = prompt(u"Select service: ", completer=service_completer) if service_name not in service_names: - click.secho(u'{} is not valid service'.format(service_name), fg='red') + click.secho(u"{} is not valid service".format(service_name), fg="red") raise click.Abort() moto_client = get_moto_implementation(service_name) - real_client = boto3.client(service_name, region_name='us-east-1') + real_client = boto3.client(service_name, region_name="us-east-1") implemented = [] not_implemented = [] - operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names] + operation_names = [ + xform_name(op) for op in real_client.meta.service_model.operation_names + ] for op in operation_names: if moto_client and op in dir(moto_client): implemented.append(op) @@ -65,169 +63,171 @@ def select_service_and_operation(): not_implemented.append(op) operation_completer = WordCompleter(operation_names) - click.echo('==Current Implementation Status==') + click.echo("==Current Implementation Status==") for operation_name in operation_names: - check = 'X' if operation_name in implemented else ' ' - click.secho('[{}] {}'.format(check, operation_name)) - click.echo('=================================') - operation_name = prompt(u'Select Operation: ', completer=operation_completer) + check = "X" if operation_name in implemented else " " + click.secho("[{}] {}".format(check, operation_name)) + click.echo("=================================") + operation_name = prompt(u"Select Operation: ", completer=operation_completer) if operation_name not in operation_names: - click.secho('{} is not valid operation'.format(operation_name), fg='red') + click.secho("{} is not valid operation".format(operation_name), fg="red") raise click.Abort() if operation_name in implemented: - click.secho('{} is already implemented'.format(operation_name), fg='red') + click.secho("{} is already implemented".format(operation_name), fg="red") raise click.Abort() return service_name, operation_name + def get_escaped_service(service): - return service.replace('-', '') + return service.replace("-", "") + def get_lib_dir(service): - return os.path.join('moto', get_escaped_service(service)) + return os.path.join("moto", get_escaped_service(service)) + def get_test_dir(service): - return os.path.join('tests', 'test_{}'.format(get_escaped_service(service))) + return os.path.join("tests", "test_{}".format(get_escaped_service(service))) def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None): - is_test = True if 'test' in tmpl_dir else False - rendered = jinja2.Environment( - loader=jinja2.FileSystemLoader(tmpl_dir) - ).get_template(tmpl_filename).render(context) + is_test = True if "test" in tmpl_dir else False + rendered = ( + jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_dir)) + .get_template(tmpl_filename) + .render(context) + ) dirname = get_test_dir(service) if is_test else get_lib_dir(service) filename = alt_filename or os.path.splitext(tmpl_filename)[0] filepath = os.path.join(dirname, filename) if os.path.exists(filepath): - print_progress('skip creating', filepath, 'yellow') + print_progress("skip creating", filepath, "yellow") else: - print_progress('creating', filepath, 'green') - with open(filepath, 'w') as f: + print_progress("creating", filepath, "green") + with open(filepath, "w") as f: f.write(rendered) def append_mock_to_init_py(service): - path = os.path.join(os.path.dirname(__file__), '..', 'moto', '__init__.py') + path = os.path.join(os.path.dirname(__file__), "..", "moto", "__init__.py") with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] + lines = [_.replace("\n", "") for _ in f.readlines()] - if any(_ for _ in lines if re.match('^from.*mock_{}.*$'.format(service), _)): + if any(_ for _ in lines if re.match("^mock_{}.*lazy_load(.*)$".format(service), _)): return - filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)] + filtered_lines = [_ for _ in lines if re.match("^mock_.*lazy_load(.*)$", _)] last_import_line_index = lines.index(filtered_lines[-1]) - new_line = 'from .{} import mock_{} # noqa'.format(get_escaped_service(service), get_escaped_service(service)) + new_line = 'mock_{} = lazy_load(".{}", "mock_{}")'.format( + get_escaped_service(service), + get_escaped_service(service), + get_escaped_service(service), + ) lines.insert(last_import_line_index + 1, new_line) - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: + body = "\n".join(lines) + "\n" + with open(path, "w") as f: f.write(body) -def append_mock_import_to_backends_py(service): - path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py') - with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] - - if any(_ for _ in lines if re.match('^from moto\.{}.*{}_backends.*$'.format(service, service), _)): - return - filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)] - last_import_line_index = lines.index(filtered_lines[-1]) - - new_line = 'from moto.{} import {}_backends'.format(get_escaped_service(service), get_escaped_service(service)) - lines.insert(last_import_line_index + 1, new_line) - - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: - f.write(body) - def append_mock_dict_to_backends_py(service): - path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py') + path = os.path.join(os.path.dirname(__file__), "..", "moto", "backends.py") with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] + lines = [_.replace("\n", "") for _ in f.readlines()] - if any(_ for _ in lines if re.match(".*\"{}\": {}_backends.*".format(service, service), _)): + if any( + _ + for _ in lines + if re.match('.*"{}": {}_backends.*'.format(service, service), _) + ): return - filtered_lines = [_ for _ in lines if re.match(".*\".*\":.*_backends.*", _)] + filtered_lines = [_ for _ in lines if re.match('.*".*":.*_backends.*', _)] last_elem_line_index = lines.index(filtered_lines[-1]) - new_line = " \"{}\": {}_backends,".format(service, get_escaped_service(service)) + new_line = ' "{}": ("{}", "{}_backends"),'.format( + service, get_escaped_service(service), get_escaped_service(service) + ) prev_line = lines[last_elem_line_index] - if not prev_line.endswith('{') and not prev_line.endswith(','): - lines[last_elem_line_index] += ',' + if not prev_line.endswith("{") and not prev_line.endswith(","): + lines[last_elem_line_index] += "," lines.insert(last_elem_line_index + 1, new_line) - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: + body = "\n".join(lines) + "\n" + with open(path, "w") as f: f.write(body) + def initialize_service(service, operation, api_protocol): - """create lib and test dirs if not exist - """ + """create lib and test dirs if not exist""" lib_dir = get_lib_dir(service) test_dir = get_test_dir(service) - print_progress('Initializing service', service, 'green') + print_progress("Initializing service", service, "green") client = boto3.client(service) service_class = client.__class__.__name__ endpoint_prefix = client._service_model.endpoint_prefix tmpl_context = { - 'service': service, - 'service_class': service_class, - 'endpoint_prefix': endpoint_prefix, - 'api_protocol': api_protocol, - 'escaped_service': get_escaped_service(service) + "service": service, + "service_class": service_class, + "endpoint_prefix": endpoint_prefix, + "api_protocol": api_protocol, + "escaped_service": get_escaped_service(service), } # initialize service directory if os.path.exists(lib_dir): - print_progress('skip creating', lib_dir, 'yellow') + print_progress("skip creating", lib_dir, "yellow") else: - print_progress('creating', lib_dir, 'green') + print_progress("creating", lib_dir, "green") os.makedirs(lib_dir) - tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib') + tmpl_dir = os.path.join(TEMPLATE_DIR, "lib") for tmpl_filename in os.listdir(tmpl_dir): - render_template( - tmpl_dir, tmpl_filename, tmpl_context, service - ) + render_template(tmpl_dir, tmpl_filename, tmpl_context, service) # initialize test directory if os.path.exists(test_dir): - print_progress('skip creating', test_dir, 'yellow') + print_progress("skip creating", test_dir, "yellow") else: - print_progress('creating', test_dir, 'green') + print_progress("creating", test_dir, "green") os.makedirs(test_dir) - tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') + tmpl_dir = os.path.join(TEMPLATE_DIR, "test") for tmpl_filename in os.listdir(tmpl_dir): - alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None - render_template( - tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename + alt_filename = ( + "test_{}.py".format(get_escaped_service(service)) + if tmpl_filename == "test_service.py.j2" + else None ) + render_template(tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename) # append mock to init files append_mock_to_init_py(service) - append_mock_import_to_backends_py(service) append_mock_dict_to_backends_py(service) def to_upper_camel_case(s): - return ''.join([_.title() for _ in s.split('_')]) + return "".join([_.title() for _ in s.split("_")]) def to_lower_camel_case(s): - words = s.split('_') - return ''.join(words[:1] + [_.title() for _ in words[1:]]) + words = s.split("_") + return "".join(words[:1] + [_.title() for _ in words[1:]]) def to_snake_case(s): - s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", s) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() + + +def get_operation_name_in_keys(operation_name, operation_keys): + index = [_.lower() for _ in operation_keys].index(operation_name.lower()) + return operation_keys[index] def get_function_in_responses(service, operation, protocol): @@ -237,42 +237,58 @@ def get_function_in_responses(service, operation, protocol): """ client = boto3.client(service) - aws_operation_name = to_upper_camel_case(operation) + aws_operation_name = get_operation_name_in_keys( + to_upper_camel_case(operation), + list(client._service_model._service_description["operations"].keys()), + ) + op_model = client._service_model.operation_model(aws_operation_name) - if not hasattr(op_model.output_shape, 'members'): + if not hasattr(op_model.output_shape, "members"): outputs = {} else: outputs = op_model.output_shape.members inputs = op_model.input_shape.members - input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] - output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] - body = '\ndef {}(self):\n'.format(operation) + input_names = [ + to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND + ] + output_names = [ + to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND + ] + body = "\ndef {}(self):\n".format(operation) for input_name, input_type in inputs.items(): type_name = input_type.type_name - if type_name == 'integer': + if type_name == "integer": arg_line_tmpl = ' {} = self._get_int_param("{}")\n' - elif type_name == 'list': + elif type_name == "list": arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n' else: arg_line_tmpl = ' {} = self._get_param("{}")\n' body += arg_line_tmpl.format(to_snake_case(input_name), input_name) if output_names: - body += ' {} = self.{}_backend.{}(\n'.format(', '.join(output_names), get_escaped_service(service), operation) - else: - body += ' self.{}_backend.{}(\n'.format(get_escaped_service(service), operation) - for input_name in input_names: - body += ' {}={},\n'.format(input_name, input_name) - - body += ' )\n' - if protocol == 'query': - body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper()) - body += ' return template.render({})\n'.format( - ', '.join(['{}={}'.format(_, _) for _ in output_names]) + body += " {} = self.{}_backend.{}(\n".format( + ", ".join(output_names), get_escaped_service(service), operation + ) + else: + body += " self.{}_backend.{}(\n".format( + get_escaped_service(service), operation + ) + for input_name in input_names: + body += " {}={},\n".format(input_name, input_name) + + body += " )\n" + if protocol == "query": + body += " template = self.response_template({}_TEMPLATE)\n".format( + operation.upper() + ) + body += " return template.render({})\n".format( + ", ".join(["{}={}".format(_, _) for _ in output_names]) + ) + elif protocol in ["json", "rest-json"]: + body += " # TODO: adjust response\n" + body += " return json.dumps(dict({}))\n".format( + ", ".join(["{}={}".format(to_lower_camel_case(_), _) for _ in output_names]) ) - elif protocol in ['json', 'rest-json']: - body += ' # TODO: adjust response\n' - body += ' return json.dumps(dict({}))\n'.format(', '.join(['{}={}'.format(to_lower_camel_case(_), _) for _ in output_names])) return body @@ -282,43 +298,57 @@ def get_function_in_models(service, operation): https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ client = boto3.client(service) - aws_operation_name = to_upper_camel_case(operation) + aws_operation_name = get_operation_name_in_keys( + to_upper_camel_case(operation), + list(client._service_model._service_description["operations"].keys()), + ) op_model = client._service_model.operation_model(aws_operation_name) inputs = op_model.input_shape.members - if not hasattr(op_model.output_shape, 'members'): + if not hasattr(op_model.output_shape, "members"): outputs = {} else: outputs = op_model.output_shape.members - input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] - output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] + input_names = [ + to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND + ] + output_names = [ + to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND + ] if input_names: - body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names)) + body = "def {}(self, {}):\n".format(operation, ", ".join(input_names)) else: - body = 'def {}(self)\n' - body += ' # implement here\n' - body += ' return {}\n\n'.format(', '.join(output_names)) + body = "def {}(self)\n" + body += " # implement here\n" + body += " return {}\n\n".format(", ".join(output_names)) return body def _get_subtree(name, shape, replace_list, name_prefix=[]): class_name = shape.__class__.__name__ - if class_name in ('StringShape', 'Shape'): + if class_name in ("StringShape", "Shape"): t = etree.Element(name) if name_prefix: - t.text = '{{ %s.%s }}' % (name_prefix[-1], to_snake_case(name)) + t.text = "{{ %s.%s }}" % (name_prefix[-1], to_snake_case(name)) else: - t.text = '{{ %s }}' % to_snake_case(name) + t.text = "{{ %s }}" % to_snake_case(name) return t - elif class_name in ('ListShape', ): + elif class_name in ("ListShape",): replace_list.append((name, name_prefix)) t = etree.Element(name) - t_member = etree.Element('member') + t_member = etree.Element("member") t.append(t_member) for nested_name, nested_shape in shape.member.members.items(): - t_member.append(_get_subtree(nested_name, nested_shape, replace_list, name_prefix + [singularize(name.lower())])) + t_member.append( + _get_subtree( + nested_name, + nested_shape, + replace_list, + name_prefix + [singularize(name.lower())], + ) + ) return t - raise ValueError('Not supported Shape') + raise ValueError("Not supported Shape") def get_response_query_template(service, operation): @@ -329,20 +359,24 @@ def get_response_query_template(service, operation): https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ client = boto3.client(service) - aws_operation_name = to_upper_camel_case(operation) + aws_operation_name = get_operation_name_in_keys( + to_upper_camel_case(operation), + list(client._service_model._service_description["operations"].keys()), + ) + op_model = client._service_model.operation_model(aws_operation_name) - result_wrapper = op_model.output_shape.serialization['resultWrapper'] - response_wrapper = result_wrapper.replace('Result', 'Response') + result_wrapper = op_model.output_shape.serialization["resultWrapper"] + response_wrapper = result_wrapper.replace("Result", "Response") metadata = op_model.metadata - xml_namespace = metadata['xmlNamespace'] + xml_namespace = metadata["xmlNamespace"] # build xml tree - t_root = etree.Element(response_wrapper, xmlns=xml_namespace) + t_root = etree.Element(response_wrapper, xmlns=xml_namespace) # build metadata - t_metadata = etree.Element('ResponseMetadata') - t_request_id = etree.Element('RequestId') - t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE' + t_metadata = etree.Element("ResponseMetadata") + t_request_id = etree.Element("RequestId") + t_request_id.text = "1549581b-12b7-11e3-895e-1334aEXAMPLE" t_metadata.append(t_request_id) t_root.append(t_metadata) @@ -353,66 +387,73 @@ def get_response_query_template(service, operation): for output_name, output_shape in outputs.items(): t_result.append(_get_subtree(output_name, output_shape, replace_list)) t_root.append(t_result) - xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8') + xml_body = etree.tostring(t_root, pretty_print=True).decode("utf-8") xml_body_lines = xml_body.splitlines() for replace in replace_list: name = replace[0] prefix = replace[1] singular_name = singularize(name) - start_tag = '<%s>' % name - iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower() - loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name) - end_tag = '' % name - loop_end = '{{ endfor }}' + start_tag = "<%s>" % name + iter_name = "{}.{}".format(prefix[-1], name.lower()) if prefix else name.lower() + loop_start = "{%% for %s in %s %%}" % (singular_name.lower(), iter_name) + end_tag = "" % name + loop_end = "{{ endfor }}" start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l] if len(start_tag_indexes) != 1: - raise Exception('tag %s not found in response body' % start_tag) + raise Exception("tag %s not found in response body" % start_tag) start_tag_index = start_tag_indexes[0] xml_body_lines.insert(start_tag_index + 1, loop_start) end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l] if len(end_tag_indexes) != 1: - raise Exception('tag %s not found in response body' % end_tag) + raise Exception("tag %s not found in response body" % end_tag) end_tag_index = end_tag_indexes[0] xml_body_lines.insert(end_tag_index, loop_end) - xml_body = '\n'.join(xml_body_lines) + xml_body = "\n".join(xml_body_lines) body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body) return body def insert_code_to_class(path, base_class, new_code): with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] - mod_path = os.path.splitext(path)[0].replace('/', '.') + lines = [_.replace("\n", "") for _ in f.readlines()] + mod_path = os.path.splitext(path)[0].replace("/", ".") mod = importlib.import_module(mod_path) clsmembers = inspect.getmembers(mod, inspect.isclass) - _response_cls = [_[1] for _ in clsmembers if issubclass(_[1], base_class) and _[1] != base_class] + _response_cls = [ + _[1] for _ in clsmembers if issubclass(_[1], base_class) and _[1] != base_class + ] if len(_response_cls) != 1: - raise Exception('unknown error, number of clsmembers is not 1') + raise Exception("unknown error, number of clsmembers is not 1") response_cls = _response_cls[0] code_lines, line_no = inspect.getsourcelines(response_cls) end_line_no = line_no + len(code_lines) - func_lines = [' ' * 4 + _ for _ in new_code.splitlines()] + func_lines = [" " * 4 + _ for _ in new_code.splitlines()] lines = lines[:end_line_no] + func_lines + lines[end_line_no:] - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: + body = "\n".join(lines) + "\n" + with open(path, "w") as f: f.write(body) def insert_url(service, operation, api_protocol): client = boto3.client(service) service_class = client.__class__.__name__ - aws_operation_name = to_upper_camel_case(operation) - uri = client._service_model.operation_model(aws_operation_name).http['requestUri'] + aws_operation_name = get_operation_name_in_keys( + to_upper_camel_case(operation), + list(client._service_model._service_description["operations"].keys()), + ) + uri = client._service_model.operation_model(aws_operation_name).http["requestUri"] - path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py') + path = os.path.join( + os.path.dirname(__file__), "..", "moto", get_escaped_service(service), "urls.py" + ) with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] + lines = [_.replace("\n", "") for _ in f.readlines()] if any(_ for _ in lines if re.match(uri, _)): return @@ -420,50 +461,49 @@ def insert_url(service, operation, api_protocol): url_paths_found = False last_elem_line_index = -1 for i, line in enumerate(lines): - if line.startswith('url_paths'): + if line.startswith("url_paths"): url_paths_found = True - if url_paths_found and line.startswith('}'): + if url_paths_found and line.startswith("}"): last_elem_line_index = i - 1 prev_line = lines[last_elem_line_index] - if not prev_line.endswith('{') and not prev_line.endswith(','): - lines[last_elem_line_index] += ',' + if not prev_line.endswith("{") and not prev_line.endswith(","): + lines[last_elem_line_index] += "," # generate url pattern - if api_protocol == 'rest-json': + if api_protocol == "rest-json": new_line = " '{0}/.*$': response.dispatch," else: - new_line = " '{0}%s$': %sResponse.dispatch," % ( - uri, service_class - ) + new_line = " '{0}%s$': %sResponse.dispatch," % (uri, service_class) if new_line in lines: return lines.insert(last_elem_line_index + 1, new_line) - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: + body = "\n".join(lines) + "\n" + with open(path, "w") as f: f.write(body) + def insert_codes(service, operation, api_protocol): func_in_responses = get_function_in_responses(service, operation, api_protocol) func_in_models = get_function_in_models(service, operation) # edit responses.py - responses_path = 'moto/{}/responses.py'.format(get_escaped_service(service)) - print_progress('inserting code', responses_path, 'green') + responses_path = "moto/{}/responses.py".format(get_escaped_service(service)) + print_progress("inserting code", responses_path, "green") insert_code_to_class(responses_path, BaseResponse, func_in_responses) # insert template - if api_protocol == 'query': + if api_protocol == "query": template = get_response_query_template(service, operation) with open(responses_path) as f: lines = [_[:-1] for _ in f.readlines()] lines += template.splitlines() - with open(responses_path, 'w') as f: - f.write('\n'.join(lines)) + with open(responses_path, "w") as f: + f.write("\n".join(lines)) # edit models.py - models_path = 'moto/{}/models.py'.format(get_escaped_service(service)) - print_progress('inserting code', models_path, 'green') + models_path = "moto/{}/models.py".format(get_escaped_service(service)) + print_progress("inserting code", models_path, "green") insert_code_to_class(models_path, BaseBackend, func_in_models) # edit urls.py @@ -473,15 +513,20 @@ def insert_codes(service, operation, api_protocol): @click.command() def main(): service, operation = select_service_and_operation() - api_protocol = boto3.client(service)._service_model.metadata['protocol'] + api_protocol = boto3.client(service)._service_model.metadata["protocol"] initialize_service(service, operation, api_protocol) - if api_protocol in ['query', 'json', 'rest-json']: + if api_protocol in ["query", "json", "rest-json"]: insert_codes(service, operation, api_protocol) else: - print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') + print_progress( + "skip inserting code", + 'api protocol "{}" is not supported'.format(api_protocol), + "yellow", + ) click.echo('You will still need to add the mock into "__init__.py"'.format(service)) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/scripts/update_managed_policies.py b/scripts/update_managed_policies.py index de7058fd7..2e227b752 100755 --- a/scripts/update_managed_policies.py +++ b/scripts/update_managed_policies.py @@ -23,42 +23,53 @@ def json_serial(obj): raise TypeError("Type not serializable") -client = boto3.client('iam') +client = boto3.client("iam") policies = {} -paginator = client.get_paginator('list_policies') +paginator = client.get_paginator("list_policies") try: - response_iterator = paginator.paginate(Scope='AWS') + response_iterator = paginator.paginate(Scope="AWS") for response in response_iterator: - for policy in response['Policies']: - policies[policy['PolicyName']] = policy + for policy in response["Policies"]: + policies[policy["PolicyName"]] = policy except NoCredentialsError: print("USAGE:") print("Put your AWS credentials into ~/.aws/credentials and run:") print(__file__) print("") print("Or specify them on the command line:") - print("AWS_ACCESS_KEY_ID=your_personal_access_key AWS_SECRET_ACCESS_KEY=your_personal_secret {}".format(__file__)) + print( + "AWS_ACCESS_KEY_ID=your_personal_access_key AWS_SECRET_ACCESS_KEY=your_personal_secret {}".format( + __file__ + ) + ) print("") sys.exit(1) for policy_name in policies: response = client.get_policy_version( - PolicyArn=policies[policy_name]['Arn'], - VersionId=policies[policy_name]['DefaultVersionId']) - for key in response['PolicyVersion']: - if key != "CreateDate": # the policy's CreateDate should not be overwritten by its version's CreateDate - policies[policy_name][key] = response['PolicyVersion'][key] + PolicyArn=policies[policy_name]["Arn"], + VersionId=policies[policy_name]["DefaultVersionId"], + ) + for key in response["PolicyVersion"]: + if ( + key != "CreateDate" + ): # the policy's CreateDate should not be overwritten by its version's CreateDate + policies[policy_name][key] = response["PolicyVersion"][key] -with open(output_file, 'w') as f: - triple_quote = '\"\"\"' +with open(output_file, "w") as f: + triple_quote = '"""' f.write("# Imported via `make aws_managed_policies`\n") - f.write('aws_managed_policies_data = {}\n'.format(triple_quote)) - f.write(json.dumps(policies, - sort_keys=True, - indent=4, - separators=(',', ': '), - default=json_serial)) - f.write('{}\n'.format(triple_quote)) + f.write("aws_managed_policies_data = {}\n".format(triple_quote)) + f.write( + json.dumps( + policies, + sort_keys=True, + indent=4, + separators=(",", ": "), + default=json_serial, + ) + ) + f.write("{}\n".format(triple_quote)) diff --git a/setup.cfg b/setup.cfg index fb04c16a8..1c247ef3d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,8 +1,6 @@ -[nosetests] -verbosity=1 -detailed-errors=1 -with-coverage=1 -cover-package=moto - [bdist_wheel] universal=1 + +[tool:pytest] +markers = + network: marks tests which require network connection diff --git a/setup.py b/setup.py index 1dde71ac7..913565eb4 100755 --- a/setup.py +++ b/setup.py @@ -1,35 +1,37 @@ #!/usr/bin/env python from __future__ import unicode_literals import codecs +from io import open import os import re import setuptools from setuptools import setup, find_packages import sys +PY2 = sys.version_info[0] == 2 # Borrowed from pip at https://github.com/pypa/pip/blob/62c27dee45625e1b63d1e023b0656310f276e050/setup.py#L11-L15 here = os.path.abspath(os.path.dirname(__file__)) + def read(*parts): # intentionally *not* adding an encoding option to open, See: # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 - with codecs.open(os.path.join(here, *parts), 'r') as fp: + with open(os.path.join(here, *parts), "r") as fp: return fp.read() def get_version(): - version_file = read('moto', '__init__.py') - version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', - version_file, re.MULTILINE) + version_file = read("moto", "__init__.py") + version_match = re.search( + r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.MULTILINE + ) if version_match: return version_match.group(1) - raise RuntimeError('Unable to find version string.') + raise RuntimeError("Unable to find version string.") install_requires = [ - "Jinja2>=2.10.1", - "boto>=2.36.0", "boto3>=1.9.201", "botocore>=1.12.201", "cryptography>=2.3.0", @@ -37,23 +39,96 @@ install_requires = [ "xmltodict", "six>1.9", "werkzeug", - "PyYAML>=5.1", "pytz", "python-dateutil<3.0.0,>=2.1", - "python-jose<4.0.0", - "mock", - "docker>=2.5.1", - "jsondiff>=1.1.2", - "aws-xray-sdk!=0.96,>=0.93", "responses>=0.9.0", - "idna<2.9,>=2.5", - "cfn-lint>=0.4.0", - "sshpubkeys>=3.1.0,<4.0" + "MarkupSafe<2.0", # This is a Jinja2 dependency, 2.0.0a1 currently seems broken ] -extras_require = { - 'server': ['flask'], +# +# Avoid pins where they are not necessary. These pins were introduced by the +# following commit for Py2 compatibility. They are not required for non-Py2 +# users. +# +# https://github.com/mpenkov/moto/commit/00134d2df37bb4dcd5f447ef951d383bfec0903c +# +if PY2: + install_requires += [ + # + # This is an indirect dependency. Version 5.0.0 claims to be for + # Py2.6+, but it really isn't. + # + # https://github.com/jaraco/configparser/issues/51 + # + "configparser<5.0", + "Jinja2<3.0.0,>=2.10.1", + "mock<=3.0.5", + "more-itertools==5.0.0", + "setuptools==44.0.0", + "zipp==0.6.0", + ] +else: + install_requires += [ + "Jinja2>=2.10.1", + "mock", + "more-itertools", + "setuptools", + "zipp", + ] + +_dep_PyYAML = "PyYAML>=5.1" +_dep_python_jose = "python-jose[cryptography]>=3.1.0,<4.0.0" +_dep_python_jose_ecdsa_pin = ( + "ecdsa<0.15" # https://github.com/spulec/moto/pull/3263#discussion_r477404984 +) +_dep_docker = "docker>=2.5.1" +_dep_jsondiff = "jsondiff>=1.1.2" +_dep_aws_xray_sdk = "aws-xray-sdk!=0.96,>=0.93" +_dep_idna = "idna<3,>=2.5" +_dep_cfn_lint = "cfn-lint>=0.4.0" +_dep_sshpubkeys_py2 = "sshpubkeys>=3.1.0,<4.0; python_version<'3'" +_dep_sshpubkeys_py3 = "sshpubkeys>=3.1.0; python_version>'3'" + +all_extra_deps = [ + _dep_PyYAML, + _dep_python_jose, + _dep_python_jose_ecdsa_pin, + _dep_docker, + _dep_jsondiff, + _dep_aws_xray_sdk, + _dep_idna, + _dep_cfn_lint, + _dep_sshpubkeys_py2, + _dep_sshpubkeys_py3, +] +all_server_deps = all_extra_deps + ["flask", "flask-cors"] + +# TODO: do we want to add ALL services here? +# i.e. even those without extra dependencies. +# Would be good for future-compatibility, I guess. +extras_per_service = { + "apigateway": [_dep_python_jose, _dep_python_jose_ecdsa_pin], + "awslambda": [_dep_docker], + "batch": [_dep_docker], + "cloudformation": [_dep_docker, _dep_PyYAML, _dep_cfn_lint], + "cognitoidp": [_dep_python_jose, _dep_python_jose_ecdsa_pin], + "dynamodb2": [_dep_docker], + "dynamodbstreams": [_dep_docker], + "ec2": [_dep_docker, _dep_sshpubkeys_py2, _dep_sshpubkeys_py3], + "iotdata": [_dep_jsondiff], + "s3": [_dep_PyYAML], + "ses": [_dep_docker], + "sns": [_dep_docker], + "sqs": [_dep_docker], + "ssm": [_dep_docker, _dep_PyYAML, _dep_cfn_lint], + "xray": [_dep_aws_xray_sdk], } +extras_require = { + "all": all_extra_deps, + "server": all_server_deps, +} + +extras_require.update(extras_per_service) # https://hynek.me/articles/conditional-python-dependencies/ if int(setuptools.__version__.split(".", 1)[0]) < 18: @@ -64,18 +139,18 @@ else: setup( - name='moto', + name="moto", version=get_version(), - description='A library that allows your python tests to easily' - ' mock out the boto library', - long_description=read('README.md'), - long_description_content_type='text/markdown', - author='Steve Pulec', - author_email='spulec@gmail.com', - url='https://github.com/spulec/moto', + description="A library that allows your python tests to easily" + " mock out the boto library", + long_description=read("README.md"), + long_description_content_type="text/markdown", + author="Steve Pulec", + author_email="spulec@gmail.com", + url="https://github.com/spulec/moto", entry_points={ - 'console_scripts': [ - 'moto_server = moto.server:main', + "console_scripts": [ + "moto_server = moto.server:main", ], }, packages=find_packages(exclude=("tests", "tests.*")), @@ -91,6 +166,7 @@ setup( "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "License :: OSI Approved :: Apache Software License", "Topic :: Software Development :: Testing", ], diff --git a/tests/__init__.py b/tests/__init__.py index 05b1d476b..01fe5ab1f 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -6,4 +6,3 @@ import logging logging.getLogger("boto").setLevel(logging.CRITICAL) logging.getLogger("boto3").setLevel(logging.CRITICAL) logging.getLogger("botocore").setLevel(logging.CRITICAL) -logging.getLogger("nose").setLevel(logging.CRITICAL) diff --git a/tests/backport_assert_raises.py b/tests/backport_assert_raises.py deleted file mode 100644 index bfed51308..000000000 --- a/tests/backport_assert_raises.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import unicode_literals - -""" -Patch courtesy of: -https://marmida.com/blog/index.php/2012/08/08/monkey-patching-assert_raises/ -""" - -# code for monkey-patching -import nose.tools - -# let's fix nose.tools.assert_raises (which is really unittest.assertRaises) -# so that it always supports context management - -# in order for these changes to be available to other modules, you'll need -# to guarantee this module is imported by your fixture before either nose or -# unittest are imported - -try: - nose.tools.assert_raises(Exception) -except TypeError: - # this version of assert_raises doesn't support the 1-arg version - class AssertRaisesContext(object): - def __init__(self, expected): - self.expected = expected - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, tb): - self.exception = exc_val - if issubclass(exc_type, self.expected): - return True - nose.tools.assert_equal(exc_type, self.expected) - # if you get to this line, the last assertion must have passed - # suppress the propagation of this exception - return True - - def assert_raises_context(exc_type): - return AssertRaisesContext(exc_type) - - nose.tools.assert_raises = assert_raises_context diff --git a/tests/helpers.py b/tests/helpers.py index ffe27103d..9293bcad9 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import boto -from nose.plugins.skip import SkipTest +from unittest import SkipTest import six diff --git a/tests/test_acm/__init__.py b/tests/test_acm/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_acm/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_acm/resources/__init__.py b/tests/test_acm/resources/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_acm/resources/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index b38cd1843..b32fabeed 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -1,16 +1,16 @@ from __future__ import unicode_literals import os -import boto3 -from freezegun import freeze_time -import sure # noqa import uuid +import boto3 +import pytest +import sure # noqa from botocore.exceptions import ClientError - -from moto import mock_acm +from freezegun import freeze_time +from moto import mock_acm, settings from moto.core import ACCOUNT_ID - +from unittest import SkipTest RESOURCE_FOLDER = os.path.join(os.path.dirname(__file__), "resources") _GET_RESOURCE = lambda x: open(os.path.join(RESOURCE_FOLDER, x), "rb").read() @@ -46,6 +46,30 @@ def test_import_certificate(): resp.should.contain("CertificateChain") +@mock_acm +def test_import_certificate_with_tags(): + client = boto3.client("acm", region_name="eu-central-1") + + resp = client.import_certificate( + Certificate=SERVER_CRT, + PrivateKey=SERVER_KEY, + CertificateChain=CA_CRT, + Tags=[{"Key": "Environment", "Value": "QA"}, {"Key": "KeyOnly"},], + ) + arn = resp["CertificateArn"] + + resp = client.get_certificate(CertificateArn=arn) + resp["Certificate"].should.equal(SERVER_CRT.decode()) + resp.should.contain("CertificateChain") + + resp = client.list_tags_for_certificate(CertificateArn=arn) + tags = {item["Key"]: item.get("Value", "__NONE__") for item in resp["Tags"]} + tags.should.contain("Environment") + tags.should.contain("KeyOnly") + tags["Environment"].should.equal("QA") + tags["KeyOnly"].should.equal("__NONE__") + + @mock_acm def test_import_bad_certificate(): client = boto3.client("acm", region_name="eu-central-1") @@ -140,7 +164,7 @@ def test_describe_certificate(): @mock_acm -def test_describe_certificate(): +def test_describe_certificate_with_bad_arn(): client = boto3.client("acm", region_name="eu-central-1") try: @@ -313,6 +337,150 @@ def test_request_certificate(): resp["CertificateArn"].should.equal(arn) +@mock_acm +def test_request_certificate_with_tags(): + client = boto3.client("acm", region_name="eu-central-1") + + token = str(uuid.uuid4()) + + resp = client.request_certificate( + DomainName="google.com", + IdempotencyToken=token, + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + Tags=[ + {"Key": "Environment", "Value": "QA"}, + {"Key": "WithEmptyStr", "Value": ""}, + ], + ) + resp.should.contain("CertificateArn") + arn_1 = resp["CertificateArn"] + + resp = client.list_tags_for_certificate(CertificateArn=arn_1) + tags = {item["Key"]: item.get("Value", "__NONE__") for item in resp["Tags"]} + tags.should.have.length_of(2) + tags["Environment"].should.equal("QA") + tags["WithEmptyStr"].should.equal("") + + # Request certificate for "google.com" with same IdempotencyToken but with different Tags + resp = client.request_certificate( + DomainName="google.com", + IdempotencyToken=token, + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + Tags=[{"Key": "Environment", "Value": "Prod"}, {"Key": "KeyOnly"},], + ) + arn_2 = resp["CertificateArn"] + + assert arn_1 != arn_2 # if tags are matched, ACM would have returned same arn + + resp = client.list_tags_for_certificate(CertificateArn=arn_2) + tags = {item["Key"]: item.get("Value", "__NONE__") for item in resp["Tags"]} + tags.should.have.length_of(2) + tags["Environment"].should.equal("Prod") + tags["KeyOnly"].should.equal("__NONE__") + + resp = client.request_certificate( + DomainName="google.com", + IdempotencyToken=token, + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + Tags=[ + {"Key": "Environment", "Value": "QA"}, + {"Key": "WithEmptyStr", "Value": ""}, + ], + ) + + +@mock_acm +def test_operations_with_invalid_tags(): + client = boto3.client("acm", region_name="eu-central-1") + + # request certificate with invalid tags + with pytest.raises(ClientError) as ex: + client.request_certificate( + DomainName="example.com", Tags=[{"Key": "X" * 200, "Value": "Valid"}], + ) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( + "Member must have length less than or equal to 128" + ) + + # import certificate with invalid tags + with pytest.raises(ClientError) as ex: + client.import_certificate( + Certificate=SERVER_CRT, + PrivateKey=SERVER_KEY, + CertificateChain=CA_CRT, + Tags=[ + {"Key": "Valid", "Value": "X" * 300}, + {"Key": "aws:xx", "Value": "Valid"}, + ], + ) + + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( + "Member must have length less than or equal to 256" + ) + + arn = _import_cert(client) + + # add invalid tags to existing certificate + with pytest.raises(ClientError) as ex: + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[{"Key": "aws:xxx", "Value": "Valid"}, {"Key": "key2"}], + ) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( + "AWS internal tags cannot be changed with this API" + ) + + # try removing invalid tags from existing certificate + with pytest.raises(ClientError) as ex: + client.remove_tags_from_certificate( + CertificateArn=arn, Tags=[{"Key": "aws:xxx", "Value": "Valid"}] + ) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( + "AWS internal tags cannot be changed with this API" + ) + + +@mock_acm +def test_add_too_many_tags(): + client = boto3.client("acm", region_name="eu-central-1") + arn = _import_cert(client) + + # Add 51 tags + with pytest.raises(ClientError) as ex: + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[{"Key": "a-%d" % i, "Value": "abcd"} for i in range(1, 52)], + ) + ex.value.response["Error"]["Code"].should.equal("TooManyTagsException") + ex.value.response["Error"]["Message"].should.contain("contains too many Tags") + client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.empty + + # Add 49 tags first, then try to add 2 more. + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[{"Key": "p-%d" % i, "Value": "pqrs"} for i in range(1, 50)], + ) + client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.length_of( + 49 + ) + with pytest.raises(ClientError) as ex: + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[{"Key": "x-1", "Value": "xyz"}, {"Key": "x-2", "Value": "xyz"}], + ) + ex.value.response["Error"]["Code"].should.equal("TooManyTagsException") + ex.value.response["Error"]["Message"].should.contain("contains too many Tags") + ex.value.response["Error"]["Message"].count("pqrs").should.equal(49) + ex.value.response["Error"]["Message"].count("xyz").should.equal(2) + client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.length_of( + 49 + ) + + @mock_acm def test_request_certificate_no_san(): client = boto3.client("acm", region_name="eu-central-1") @@ -324,70 +492,87 @@ def test_request_certificate_no_san(): resp2.should.contain("Certificate") -# # Also tests the SAN code -# # requires Pull: https://github.com/spulec/freezegun/pull/210 -# @freeze_time("2012-01-01 12:00:00", as_arg=True) -# @mock_acm -# def test_request_certificate(frozen_time): -# # After requesting a certificate, it should then auto-validate after 1 minute -# # Some sneaky programming for that ;-) -# client = boto3.client('acm', region_name='eu-central-1') -# -# resp = client.request_certificate( -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# -# resp = client.describe_certificate(CertificateArn=arn) -# resp['Certificate']['CertificateArn'].should.equal(arn) -# resp['Certificate']['DomainName'].should.equal('google.com') -# resp['Certificate']['Issuer'].should.equal('Amazon') -# resp['Certificate']['KeyAlgorithm'].should.equal('RSA_2048') -# resp['Certificate']['Status'].should.equal('PENDING_VALIDATION') -# resp['Certificate']['Type'].should.equal('AMAZON_ISSUED') -# len(resp['Certificate']['SubjectAlternativeNames']).should.equal(3) -# -# # Move time -# frozen_time.move_to('2012-01-01 12:02:00') -# resp = client.describe_certificate(CertificateArn=arn) -# resp['Certificate']['CertificateArn'].should.equal(arn) -# resp['Certificate']['Status'].should.equal('ISSUED') -# -# -# # requires Pull: https://github.com/spulec/freezegun/pull/210 -# @freeze_time("2012-01-01 12:00:00", as_arg=True) -# @mock_acm -# def test_request_certificate(frozen_time): -# # After requesting a certificate, it should then auto-validate after 1 minute -# # Some sneaky programming for that ;-) -# client = boto3.client('acm', region_name='eu-central-1') -# -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# original_arn = resp['CertificateArn'] -# -# # Should be able to request a certificate multiple times in an hour -# # after that it makes a new one -# for time_intervals in ('2012-01-01 12:15:00', '2012-01-01 12:30:00', '2012-01-01 12:45:00'): -# frozen_time.move_to(time_intervals) -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# arn.should.equal(original_arn) -# -# # Move time -# frozen_time.move_to('2012-01-01 13:01:00') -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# arn.should_not.equal(original_arn) +# Also tests the SAN code +@mock_acm +def test_request_certificate_issued_status(): + # After requesting a certificate, it should then auto-validate after 1 minute + # Some sneaky programming for that ;-) + client = boto3.client("acm", region_name="eu-central-1") + + with freeze_time("2012-01-01 12:00:00"): + resp = client.request_certificate( + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) + arn = resp["CertificateArn"] + + with freeze_time("2012-01-01 12:00:00"): + resp = client.describe_certificate(CertificateArn=arn) + resp["Certificate"]["CertificateArn"].should.equal(arn) + resp["Certificate"]["DomainName"].should.equal("google.com") + resp["Certificate"]["Issuer"].should.equal("Amazon") + resp["Certificate"]["KeyAlgorithm"].should.equal("RSA_2048") + resp["Certificate"]["Status"].should.equal("PENDING_VALIDATION") + resp["Certificate"]["Type"].should.equal("AMAZON_ISSUED") + len(resp["Certificate"]["SubjectAlternativeNames"]).should.equal(3) + + # validation will be pending for 1 minute. + with freeze_time("2012-01-01 12:00:00"): + resp = client.describe_certificate(CertificateArn=arn) + resp["Certificate"]["CertificateArn"].should.equal(arn) + resp["Certificate"]["Status"].should.equal("PENDING_VALIDATION") + + if not settings.TEST_SERVER_MODE: + # Move time to get it issued. + with freeze_time("2012-01-01 12:02:00"): + resp = client.describe_certificate(CertificateArn=arn) + resp["Certificate"]["CertificateArn"].should.equal(arn) + resp["Certificate"]["Status"].should.equal("ISSUED") + + +@mock_acm +def test_request_certificate_with_mutiple_times(): + if settings.TEST_SERVER_MODE: + raise SkipTest("Cant manipulate time in server mode") + + # After requesting a certificate, it should then auto-validate after 1 minute + # Some sneaky programming for that ;-) + client = boto3.client("acm", region_name="eu-central-1") + + with freeze_time("2012-01-01 12:00:00"): + resp = client.request_certificate( + IdempotencyToken="test_token", + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) + original_arn = resp["CertificateArn"] + + # Should be able to request a certificate multiple times in an hour + # after that it makes a new one + for time_intervals in ( + "2012-01-01 12:15:00", + "2012-01-01 12:30:00", + "2012-01-01 12:45:00", + ): + with freeze_time(time_intervals): + resp = client.request_certificate( + IdempotencyToken="test_token", + DomainName="google.com", + SubjectAlternativeNames=[ + "google.com", + "www.google.com", + "mail.google.com", + ], + ) + arn = resp["CertificateArn"] + arn.should.equal(original_arn) + + # Move time + with freeze_time("2012-01-01 13:01:00"): + resp = client.request_certificate( + IdempotencyToken="test_token", + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) + arn = resp["CertificateArn"] + arn.should_not.equal(original_arn) diff --git a/tests/test_apigateway/__init__.py b/tests/test_apigateway/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_apigateway/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 496098e8c..f85fd4a02 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import json import boto3 from freezegun import freeze_time @@ -8,9 +9,9 @@ import sure # noqa from botocore.exceptions import ClientError import responses -from moto import mock_apigateway, settings +from moto import mock_apigateway, mock_cognitoidp, settings from moto.core import ACCOUNT_ID -from nose.tools import assert_raises +import pytest @freeze_time("2015-01-01") @@ -69,17 +70,33 @@ def test_create_rest_api_with_tags(): response["tags"].should.equal({"MY_TAG1": "MY_VALUE1"}) +@mock_apigateway +def test_create_rest_api_with_policy(): + client = boto3.client("apigateway", region_name="us-west-2") + + policy = '{"Version": "2012-10-17","Statement": []}' + response = client.create_rest_api( + name="my_api", description="this is my api", policy=policy + ) + api_id = response["id"] + + response = client.get_rest_api(restApiId=api_id) + + assert "policy" in response + response["policy"].should.equal(policy) + + @mock_apigateway def test_create_rest_api_invalid_apikeysource(): client = boto3.client("apigateway", region_name="us-west-2") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_rest_api( name="my_api", description="this is my api", apiKeySource="not a valid api key source", ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") @mock_apigateway @@ -109,13 +126,13 @@ def test_create_rest_api_valid_apikeysources(): def test_create_rest_api_invalid_endpointconfiguration(): client = boto3.client("apigateway", region_name="us-west-2") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_rest_api( name="my_api", description="this is my api", endpointConfiguration={"types": ["INVALID"]}, ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") @mock_apigateway @@ -177,10 +194,10 @@ def test_create_resource__validate_name(): valid_names = ["users", "{user_id}", "{proxy+}", "user_09", "good-dog"] # All invalid names should throw an exception for name in invalid_names: - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_resource(restApiId=api_id, parentId=root_id, pathPart=name) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Resource's path part only allow a-zA-Z0-9._- and curly braces at the beginning and the end and an optional plus sign before the closing brace." ) # All valid names should go through @@ -204,12 +221,7 @@ def test_create_resource(): root_resource["ResponseMetadata"].pop("HTTPHeaders", None) root_resource["ResponseMetadata"].pop("RetryAttempts", None) root_resource.should.equal( - { - "path": "/", - "id": root_id, - "ResponseMetadata": {"HTTPStatusCode": 200}, - "resourceMethods": {"GET": {}}, - } + {"path": "/", "id": root_id, "ResponseMetadata": {"HTTPStatusCode": 200},} ) client.create_resource(restApiId=api_id, parentId=root_id, pathPart="users") @@ -257,7 +269,6 @@ def test_child_resource(): "parentId": users_id, "id": tags_id, "ResponseMetadata": {"HTTPStatusCode": 200}, - "resourceMethods": {"GET": {}}, } ) @@ -286,6 +297,41 @@ def test_create_method(): { "httpMethod": "GET", "authorizationType": "none", + "apiKeyRequired": False, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + +@mock_apigateway +def test_create_method_apikeyrequired(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + resources = client.get_resources(restApiId=api_id) + root_id = [resource for resource in resources["items"] if resource["path"] == "/"][ + 0 + ]["id"] + + client.put_method( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + authorizationType="none", + apiKeyRequired=True, + ) + + response = client.get_method(restApiId=api_id, resourceId=root_id, httpMethod="GET") + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "httpMethod": "GET", + "authorizationType": "none", + "apiKeyRequired": True, "ResponseMetadata": {"HTTPStatusCode": 200}, } ) @@ -498,6 +544,7 @@ def test_integration_response(): selectionPattern="foobar", responseTemplates={}, ) + # this is hard to match against, so remove it response["ResponseMetadata"].pop("HTTPHeaders", None) response["ResponseMetadata"].pop("RetryAttempts", None) @@ -546,6 +593,311 @@ def test_integration_response(): response = client.get_method(restApiId=api_id, resourceId=root_id, httpMethod="GET") response["methodIntegration"]["integrationResponses"].should.equal({}) + # adding a new method and perfomring put intergration with contentHandling as CONVERT_TO_BINARY + client.put_method( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", authorizationType="none" + ) + + client.put_method_response( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", statusCode="200" + ) + + client.put_integration( + restApiId=api_id, + resourceId=root_id, + httpMethod="PUT", + type="HTTP", + uri="http://httpbin.org/robots.txt", + integrationHttpMethod="POST", + ) + + response = client.put_integration_response( + restApiId=api_id, + resourceId=root_id, + httpMethod="PUT", + statusCode="200", + selectionPattern="foobar", + responseTemplates={}, + contentHandling="CONVERT_TO_BINARY", + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": None}, + "contentHandling": "CONVERT_TO_BINARY", + } + ) + + response = client.get_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", statusCode="200" + ) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": None}, + "contentHandling": "CONVERT_TO_BINARY", + } + ) + + +@mock_apigateway +@mock_cognitoidp +def test_update_authorizer_configuration(): + client = boto3.client("apigateway", region_name="us-west-2") + authorizer_name = "my_authorizer" + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + cognito_client = boto3.client("cognito-idp", region_name="us-west-2") + user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[ + "UserPool" + ]["Arn"] + + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id = response["id"] + + response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + # createdDate is hard to match against, remove it + response.pop("createdDate", None) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "id": authorizer_id, + "name": authorizer_name, + "type": "COGNITO_USER_POOLS", + "providerARNs": [user_pool_arn], + "identitySource": "method.request.header.Authorization", + "authorizerResultTtlInSeconds": 300, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + client.update_authorizer( + restApiId=api_id, + authorizerId=authorizer_id, + patchOperations=[{"op": "replace", "path": "/type", "value": "TOKEN"}], + ) + + authorizer = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + + authorizer.should.have.key("type").which.should.equal("TOKEN") + + client.update_authorizer( + restApiId=api_id, + authorizerId=authorizer_id, + patchOperations=[{"op": "replace", "path": "/type", "value": "REQUEST"}], + ) + + authorizer = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + + authorizer.should.have.key("type").which.should.equal("REQUEST") + + # TODO: implement mult-update tests + + try: + client.update_authorizer( + restApiId=api_id, + authorizerId=authorizer_id, + patchOperations=[ + {"op": "add", "path": "/notasetting", "value": "eu-west-1"} + ], + ) + assert False.should.be.ok # Fail, should not be here + except Exception: + assert True.should.be.ok + + +@mock_apigateway +def test_non_existent_authorizer(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + client.get_authorizer.when.called_with( + restApiId=api_id, authorizerId="xxx" + ).should.throw(ClientError) + + +@mock_apigateway +@mock_cognitoidp +def test_create_authorizer(): + client = boto3.client("apigateway", region_name="us-west-2") + authorizer_name = "my_authorizer" + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + cognito_client = boto3.client("cognito-idp", region_name="us-west-2") + user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[ + "UserPool" + ]["Arn"] + + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id = response["id"] + + response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + # createdDate is hard to match against, remove it + response.pop("createdDate", None) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "id": authorizer_id, + "name": authorizer_name, + "type": "COGNITO_USER_POOLS", + "providerARNs": [user_pool_arn], + "identitySource": "method.request.header.Authorization", + "authorizerResultTtlInSeconds": 300, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + authorizer_name2 = "my_authorizer2" + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name2, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id2 = response["id"] + + response = client.get_authorizers(restApiId=api_id) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + + response["items"][0]["id"].should.match( + r"{0}|{1}".format(authorizer_id2, authorizer_id) + ) + response["items"][1]["id"].should.match( + r"{0}|{1}".format(authorizer_id2, authorizer_id) + ) + + new_authorizer_name_with_vars = "authorizer_with_vars" + response = client.create_authorizer( + restApiId=api_id, + name=new_authorizer_name_with_vars, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id3 = response["id"] + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + + response.should.equal( + { + "name": new_authorizer_name_with_vars, + "id": authorizer_id3, + "type": "COGNITO_USER_POOLS", + "providerARNs": [user_pool_arn], + "identitySource": "method.request.header.Authorization", + "authorizerResultTtlInSeconds": 300, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + stage = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id3) + stage["name"].should.equal(new_authorizer_name_with_vars) + stage["id"].should.equal(authorizer_id3) + stage["type"].should.equal("COGNITO_USER_POOLS") + stage["providerARNs"].should.equal([user_pool_arn]) + stage["identitySource"].should.equal("method.request.header.Authorization") + stage["authorizerResultTtlInSeconds"].should.equal(300) + + +@mock_apigateway +@mock_cognitoidp +def test_delete_authorizer(): + client = boto3.client("apigateway", region_name="us-west-2") + authorizer_name = "my_authorizer" + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + cognito_client = boto3.client("cognito-idp", region_name="us-west-2") + user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[ + "UserPool" + ]["Arn"] + + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id = response["id"] + + response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + # createdDate is hard to match against, remove it + response.pop("createdDate", None) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "id": authorizer_id, + "name": authorizer_name, + "type": "COGNITO_USER_POOLS", + "providerARNs": [user_pool_arn], + "identitySource": "method.request.header.Authorization", + "authorizerResultTtlInSeconds": 300, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + authorizer_name2 = "my_authorizer2" + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name2, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id2 = response["id"] + + authorizers = client.get_authorizers(restApiId=api_id)["items"] + sorted([authorizer["name"] for authorizer in authorizers]).should.equal( + sorted([authorizer_name2, authorizer_name]) + ) + # delete stage + response = client.delete_authorizer(restApiId=api_id, authorizerId=authorizer_id2) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(202) + # verify other stage still exists + authorizers = client.get_authorizers(restApiId=api_id)["items"] + sorted([authorizer["name"] for authorizer in authorizers]).should.equal( + sorted([authorizer_name]) + ) + @mock_apigateway def test_update_stage_configuration(): @@ -842,10 +1194,10 @@ def test_create_deployment_requires_REST_methods(): response = client.create_rest_api(name="my_api", description="this is my api") api_id = response["id"] - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_deployment(restApiId=api_id, stageName=stage_name)["id"] - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "The REST API doesn't contain any methods" ) @@ -865,10 +1217,10 @@ def test_create_deployment_requires_REST_method_integrations(): restApiId=api_id, resourceId=root_id, httpMethod="GET", authorizationType="NONE" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_deployment(restApiId=api_id, stageName=stage_name)["id"] - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "No integration defined for method" ) @@ -921,12 +1273,12 @@ def test_put_integration_response_requires_responseTemplate(): integrationHttpMethod="POST", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration_response( restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal("Invalid request input") + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal("Invalid request input") # Works fine if responseTemplate is defined client.put_integration_response( restApiId=api_id, @@ -937,6 +1289,65 @@ def test_put_integration_response_requires_responseTemplate(): ) +@mock_apigateway +def test_put_integration_response_with_response_template(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + resources = client.get_resources(restApiId=api_id) + root_id = [resource for resource in resources["items"] if resource["path"] == "/"][ + 0 + ]["id"] + + client.put_method( + restApiId=api_id, resourceId=root_id, httpMethod="GET", authorizationType="NONE" + ) + client.put_method_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + client.put_integration( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + type="HTTP", + uri="http://httpbin.org/robots.txt", + integrationHttpMethod="POST", + ) + + with pytest.raises(ClientError) as ex: + client.put_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal("Invalid request input") + + client.put_integration_response( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + statusCode="200", + selectionPattern="foobar", + responseTemplates={"application/json": json.dumps({"data": "test"})}, + ) + + response = client.get_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": json.dumps({"data": "test"})}, + } + ) + + @mock_apigateway def test_put_integration_validation(): client = boto3.client("apigateway", region_name="us-west-2") @@ -961,7 +1372,7 @@ def test_put_integration_validation(): for type in types_requiring_integration_method: # Ensure that integrations of these types fail if no integrationHttpMethod is provided - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -969,8 +1380,8 @@ def test_put_integration_validation(): type=type, uri="http://httpbin.org/robots.txt", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Enumeration value for HttpMethod must be non-empty" ) for type in types_not_requiring_integration_method: @@ -1017,7 +1428,7 @@ def test_put_integration_validation(): ) for type in ["AWS_PROXY"]: # Ensure that aws_proxy does not support S3 - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1029,13 +1440,13 @@ def test_put_integration_validation(): uri="arn:aws:apigateway:us-west-2:s3:path/b/k", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Integrations of type 'AWS_PROXY' currently only supports Lambda function and Firehose stream invocations." ) for type in aws_types: # Ensure that the Role ARN is for the current account - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1045,13 +1456,13 @@ def test_put_integration_validation(): uri="arn:aws:apigateway:us-west-2:s3:path/b/k", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("AccessDeniedException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDeniedException") + ex.value.response["Error"]["Message"].should.equal( "Cross-account pass role is not allowed." ) for type in ["AWS"]: # Ensure that the Role ARN is specified for aws integrations - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1060,13 +1471,13 @@ def test_put_integration_validation(): uri="arn:aws:apigateway:us-west-2:s3:path/b/k", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Role ARN must be specified for AWS integrations" ) for type in http_types: # Ensure that the URI is valid HTTP - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1075,13 +1486,13 @@ def test_put_integration_validation(): uri="non-valid-http", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Invalid HTTP endpoint specified for URI" ) for type in aws_types: # Ensure that the URI is an ARN - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1090,13 +1501,13 @@ def test_put_integration_validation(): uri="non-valid-arn", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Invalid ARN specified in the request" ) for type in aws_types: # Ensure that the URI is a valid ARN - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1105,8 +1516,8 @@ def test_put_integration_validation(): uri="arn:aws:iam::0000000000:role/service-role/asdf", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "AWS ARN for integration must contain path or action" ) @@ -1206,6 +1617,173 @@ def test_deployment(): stage["description"].should.equal("_new_description_") +@mock_apigateway +def test_create_domain_names(): + client = boto3.client("apigateway", region_name="us-west-2") + domain_name = "testDomain" + test_certificate_name = "test.certificate" + test_certificate_private_key = "testPrivateKey" + # success case with valid params + response = client.create_domain_name( + domainName=domain_name, + certificateName=test_certificate_name, + certificatePrivateKey=test_certificate_private_key, + ) + response["domainName"].should.equal(domain_name) + response["certificateName"].should.equal(test_certificate_name) + # without domain name it should throw BadRequestException + with pytest.raises(ClientError) as ex: + client.create_domain_name(domainName="") + + ex.value.response["Error"]["Message"].should.equal("No Domain Name specified") + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + + +@mock_apigateway +def test_get_domain_names(): + client = boto3.client("apigateway", region_name="us-west-2") + # without any domain names already present + result = client.get_domain_names() + result["items"].should.equal([]) + domain_name = "testDomain" + test_certificate_name = "test.certificate" + response = client.create_domain_name( + domainName=domain_name, certificateName=test_certificate_name + ) + + response["domainName"].should.equal(domain_name) + response["certificateName"].should.equal(test_certificate_name) + response["domainNameStatus"].should.equal("AVAILABLE") + # after adding a new domain name + result = client.get_domain_names() + result["items"][0]["domainName"].should.equal(domain_name) + result["items"][0]["certificateName"].should.equal(test_certificate_name) + result["items"][0]["domainNameStatus"].should.equal("AVAILABLE") + + +@mock_apigateway +def test_get_domain_name(): + client = boto3.client("apigateway", region_name="us-west-2") + domain_name = "testDomain" + # quering an invalid domain name which is not present + with pytest.raises(ClientError) as ex: + client.get_domain_name(domainName=domain_name) + + ex.value.response["Error"]["Message"].should.equal("Invalid Domain Name specified") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + # adding a domain name + client.create_domain_name(domainName=domain_name) + # retrieving the data of added domain name. + result = client.get_domain_name(domainName=domain_name) + result["domainName"].should.equal(domain_name) + result["domainNameStatus"].should.equal("AVAILABLE") + + +@mock_apigateway +def test_create_model(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + rest_api_id = response["id"] + dummy_rest_api_id = "a12b3c4d" + model_name = "testModel" + description = "test model" + content_type = "application/json" + # success case with valid params + response = client.create_model( + restApiId=rest_api_id, + name=model_name, + description=description, + contentType=content_type, + ) + response["name"].should.equal(model_name) + response["description"].should.equal(description) + + # with an invalid rest_api_id it should throw NotFoundException + with pytest.raises(ClientError) as ex: + client.create_model( + restApiId=dummy_rest_api_id, + name=model_name, + description=description, + contentType=content_type, + ) + ex.value.response["Error"]["Message"].should.equal("Invalid Rest API Id specified") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + + with pytest.raises(ClientError) as ex: + client.create_model( + restApiId=rest_api_id, + name="", + description=description, + contentType=content_type, + ) + + ex.value.response["Error"]["Message"].should.equal("No Model Name specified") + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + + +@mock_apigateway +def test_get_api_models(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + rest_api_id = response["id"] + model_name = "testModel" + description = "test model" + content_type = "application/json" + # when no models are present + result = client.get_models(restApiId=rest_api_id) + result["items"].should.equal([]) + # add a model + client.create_model( + restApiId=rest_api_id, + name=model_name, + description=description, + contentType=content_type, + ) + # get models after adding + result = client.get_models(restApiId=rest_api_id) + result["items"][0]["name"] = model_name + result["items"][0]["description"] = description + + +@mock_apigateway +def test_get_model_by_name(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + rest_api_id = response["id"] + dummy_rest_api_id = "a12b3c4d" + model_name = "testModel" + description = "test model" + content_type = "application/json" + # add a model + client.create_model( + restApiId=rest_api_id, + name=model_name, + description=description, + contentType=content_type, + ) + # get models after adding + result = client.get_model(restApiId=rest_api_id, modelName=model_name) + result["name"] = model_name + result["description"] = description + + with pytest.raises(ClientError) as ex: + client.get_model(restApiId=dummy_rest_api_id, modelName=model_name) + ex.value.response["Error"]["Message"].should.equal("Invalid Rest API Id specified") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + + +@mock_apigateway +def test_get_model_with_invalid_name(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + rest_api_id = response["id"] + # test with an invalid model name + with pytest.raises(ClientError) as ex: + client.get_model(restApiId=rest_api_id, modelName="fake") + ex.value.response["Error"]["Message"].should.equal("Invalid Model Name specified") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + + @mock_apigateway def test_http_proxying_integration(): responses.add( @@ -1259,7 +1837,12 @@ def test_create_api_key(): apikey_name = "TESTKEY1" payload = {"value": apikey_value, "name": apikey_name} - client.create_api_key(**payload) + response = client.create_api_key(**payload) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(201) + response["name"].should.equal(apikey_name) + response["value"].should.equal(apikey_value) + response["enabled"].should.equal(False) + response["stageKeys"].should.equal([]) response = client.get_api_keys() len(response["items"]).should.equal(1) @@ -1267,6 +1850,23 @@ def test_create_api_key(): client.create_api_key.when.called_with(**payload).should.throw(ClientError) +@mock_apigateway +def test_create_api_headers(): + region_name = "us-west-2" + client = boto3.client("apigateway", region_name=region_name) + + apikey_value = "12345" + apikey_name = "TESTKEY1" + payload = {"value": apikey_value, "name": apikey_name} + + client.create_api_key(**payload) + with pytest.raises(ClientError) as ex: + client.create_api_key(**payload) + ex.value.response["Error"]["Code"].should.equal("ConflictException") + if not settings.TEST_SERVER_MODE: + ex.value.response["ResponseMetadata"]["HTTPHeaders"].should.equal({}) + + @mock_apigateway def test_api_keys(): region_name = "us-west-2" @@ -1316,7 +1916,8 @@ def test_api_keys(): response = client.get_api_keys() len(response["items"]).should.equal(2) - client.delete_api_key(apiKey=apikey_id) + response = client.delete_api_key(apiKey=apikey_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(202) response = client.get_api_keys() len(response["items"]).should.equal(1) @@ -1329,6 +1930,14 @@ def test_usage_plans(): response = client.get_usage_plans() len(response["items"]).should.equal(0) + # # Try to get info about a non existing usage + with pytest.raises(ClientError) as ex: + client.get_usage_plan(usagePlanId="not_existing") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Message"].should.equal( + "Invalid Usage Plan ID specified" + ) + usage_plan_name = "TEST-PLAN" payload = {"name": usage_plan_name} response = client.create_usage_plan(**payload) @@ -1389,6 +1998,7 @@ def test_usage_plan_keys(): key_type = "API_KEY" payload = {"usagePlanId": usage_plan_id, "keyId": key_id, "keyType": key_type} response = client.create_usage_plan_key(**payload) + response["ResponseMetadata"]["HTTPStatusCode"].should.equals(201) usage_plan_key_id = response["id"] # Get current plan keys (expect 1) @@ -1411,6 +2021,30 @@ def test_usage_plan_keys(): response = client.get_usage_plan_keys(usagePlanId=usage_plan_id) len(response["items"]).should.equal(0) + # Try to get info about a non existing api key + with pytest.raises(ClientError) as ex: + client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId="not_existing_key") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Message"].should.equal( + "Invalid API Key identifier specified" + ) + + # Try to get info about an existing api key that has not jet added to a valid usage plan + with pytest.raises(ClientError) as ex: + client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId=key_id) + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Message"].should.equal( + "Invalid Usage Plan ID specified" + ) + + # Try to get info about an existing api key that has not jet added to a valid usage plan + with pytest.raises(ClientError) as ex: + client.get_usage_plan_key(usagePlanId="not_existing_plan_id", keyId=key_id) + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Message"].should.equal( + "Invalid Usage Plan ID specified" + ) + @mock_apigateway def test_create_usage_plan_key_non_existent_api_key(): diff --git a/tests/test_apigateway/test_server.py b/tests/test_apigateway/test_server.py index 08b20cc61..9be948ef6 100644 --- a/tests/test_apigateway/test_server.py +++ b/tests/test_apigateway/test_server.py @@ -39,6 +39,10 @@ def test_usage_plans_apis(): fetched_plan = json.loads(res.data) fetched_plan.should.equal(created_plan) + # Not existing usage plan + res = test_client.get("/usageplans/{0}".format("not_existing")) + res.status_code.should.equal(404) + # Delete usage plan res = test_client.delete("/usageplans/{0}".format(created_plan["id"])) res.data.should.equal(b"{}") @@ -61,6 +65,24 @@ def test_usage_plans_keys(): res = test_client.get("/usageplans/{0}/keys".format(usage_plan_id)) json.loads(res.data)["item"].should.have.length_of(0) + # Invalid api key (does not exists at all) + res = test_client.get( + "/usageplans/{0}/keys/{1}".format(usage_plan_id, "not_existing") + ) + res.status_code.should.equal(404) + + # not existing usage plan with existing api key + res = test_client.get( + "/usageplans/{0}/keys/{1}".format("not_existing", created_api_key["id"]) + ) + res.status_code.should.equal(404) + + # not jet added api key + res = test_client.get( + "/usageplans/{0}/keys/{1}".format(usage_plan_id, created_api_key["id"]) + ) + res.status_code.should.equal(404) + # Create usage plan key res = test_client.post( "/usageplans/{0}/keys".format(usage_plan_id), diff --git a/tests/test_applicationautoscaling/__init__.py b/tests/test_applicationautoscaling/__init__.py new file mode 100644 index 000000000..baffc4882 --- /dev/null +++ b/tests/test_applicationautoscaling/__init__.py @@ -0,0 +1 @@ +from __future__ import unicode_literals diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py new file mode 100644 index 000000000..aed728ab6 --- /dev/null +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -0,0 +1,516 @@ +from __future__ import unicode_literals + +import boto3 +import botocore +import pytest +import sure # noqa +from moto import mock_applicationautoscaling, mock_ecs + +DEFAULT_REGION = "us-east-1" +DEFAULT_ECS_CLUSTER = "default" +DEFAULT_ECS_TASK = "test_ecs_task" +DEFAULT_ECS_SERVICE = "sample-webapp" +DEFAULT_SERVICE_NAMESPACE = "ecs" +DEFAULT_RESOURCE_ID = "service/{}/{}".format(DEFAULT_ECS_CLUSTER, DEFAULT_ECS_SERVICE) +DEFAULT_SCALABLE_DIMENSION = "ecs:service:DesiredCount" +DEFAULT_MIN_CAPACITY = 1 +DEFAULT_MAX_CAPACITY = 1 +DEFAULT_ROLE_ARN = "test:arn" +DEFAULT_SUSPENDED_STATE = { + "DynamicScalingInSuspended": True, + "DynamicScalingOutSuspended": True, + "ScheduledScalingSuspended": True, +} + + +def _create_ecs_defaults(ecs, create_service=True): + _ = ecs.create_cluster(clusterName=DEFAULT_ECS_CLUSTER) + _ = ecs.register_task_definition( + family=DEFAULT_ECS_TASK, + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + if create_service: + _ = ecs.create_service( + cluster=DEFAULT_ECS_CLUSTER, + serviceName=DEFAULT_ECS_SERVICE, + taskDefinition=DEFAULT_ECS_TASK, + desiredCount=2, + ) + + +@mock_ecs +@mock_applicationautoscaling +def test_describe_scalable_targets_one_basic_ecs_success(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + client.register_scalable_target( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, + ResourceId=DEFAULT_RESOURCE_ID, + ScalableDimension=DEFAULT_SCALABLE_DIMENSION, + ) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(1) + t = response["ScalableTargets"][0] + t.should.have.key("ServiceNamespace").which.should.equal(DEFAULT_SERVICE_NAMESPACE) + t.should.have.key("ResourceId").which.should.equal(DEFAULT_RESOURCE_ID) + t.should.have.key("ScalableDimension").which.should.equal( + DEFAULT_SCALABLE_DIMENSION + ) + t.should.have.key("CreationTime").which.should.be.a("datetime.datetime") + + +@mock_ecs +@mock_applicationautoscaling +def test_describe_scalable_targets_one_full_ecs_success(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + register_scalable_target(client) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(1) + t = response["ScalableTargets"][0] + t.should.have.key("ServiceNamespace").which.should.equal(DEFAULT_SERVICE_NAMESPACE) + t.should.have.key("ResourceId").which.should.equal(DEFAULT_RESOURCE_ID) + t.should.have.key("ScalableDimension").which.should.equal( + DEFAULT_SCALABLE_DIMENSION + ) + t.should.have.key("MinCapacity").which.should.equal(DEFAULT_MIN_CAPACITY) + t.should.have.key("MaxCapacity").which.should.equal(DEFAULT_MAX_CAPACITY) + t.should.have.key("RoleARN").which.should.equal(DEFAULT_ROLE_ARN) + t.should.have.key("CreationTime").which.should.be.a("datetime.datetime") + t.should.have.key("SuspendedState") + t["SuspendedState"]["DynamicScalingInSuspended"].should.equal( + DEFAULT_SUSPENDED_STATE["DynamicScalingInSuspended"] + ) + + +@mock_ecs +@mock_applicationautoscaling +def test_describe_scalable_targets_only_return_ecs_targets(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs, create_service=False) + _ = ecs.create_service( + cluster=DEFAULT_ECS_CLUSTER, + serviceName="test1", + taskDefinition=DEFAULT_ECS_TASK, + desiredCount=2, + ) + _ = ecs.create_service( + cluster=DEFAULT_ECS_CLUSTER, + serviceName="test2", + taskDefinition=DEFAULT_ECS_TASK, + desiredCount=2, + ) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + register_scalable_target( + client, + ServiceNamespace="ecs", + ResourceId="service/{}/test1".format(DEFAULT_ECS_CLUSTER), + ) + register_scalable_target( + client, + ServiceNamespace="ecs", + ResourceId="service/{}/test2".format(DEFAULT_ECS_CLUSTER), + ) + register_scalable_target( + client, + ServiceNamespace="elasticmapreduce", + ResourceId="instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0", + ScalableDimension="elasticmapreduce:instancegroup:InstanceCount", + ) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(2) + + +@mock_ecs +@mock_applicationautoscaling +def test_describe_scalable_targets_next_token_success(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs, create_service=False) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + for i in range(0, 100): + _ = ecs.create_service( + cluster=DEFAULT_ECS_CLUSTER, + serviceName=str(i), + taskDefinition=DEFAULT_ECS_TASK, + desiredCount=2, + ) + register_scalable_target( + client, + ServiceNamespace="ecs", + ResourceId="service/{}/{}".format(DEFAULT_ECS_CLUSTER, i), + ) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(50) + response["ScalableTargets"][0]["ResourceId"].should.equal("service/default/0") + response.should.have.key("NextToken").which.should.equal("49") + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, NextToken=str(response["NextToken"]) + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(50) + response["ScalableTargets"][0]["ResourceId"].should.equal("service/default/50") + response.should_not.have.key("NextToken") + + +def register_scalable_target(client, **kwargs): + """ Build a default scalable target object for use in tests. """ + return client.register_scalable_target( + ServiceNamespace=kwargs.get("ServiceNamespace", DEFAULT_SERVICE_NAMESPACE), + ResourceId=kwargs.get("ResourceId", DEFAULT_RESOURCE_ID), + ScalableDimension=kwargs.get("ScalableDimension", DEFAULT_SCALABLE_DIMENSION), + MinCapacity=kwargs.get("MinCapacity", DEFAULT_MIN_CAPACITY), + MaxCapacity=kwargs.get("MaxCapacity", DEFAULT_MAX_CAPACITY), + RoleARN=kwargs.get("RoleARN", DEFAULT_ROLE_ARN), + SuspendedState=kwargs.get("SuspendedState", DEFAULT_SUSPENDED_STATE), + ) + + +@mock_ecs +@mock_applicationautoscaling +def test_register_scalable_target_resource_id_variations(): + + # Required to register an ECS target in moto + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs) + + # See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-applicationautoscaling-scalabletarget.html + resource_id_variations = [ + ( + DEFAULT_SERVICE_NAMESPACE, + DEFAULT_RESOURCE_ID, + DEFAULT_SCALABLE_DIMENSION, + ), # ECS + ( + "ec2", + "spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE", + "ec2:spot-fleet-request:TargetCapacity", + ), + ( + "elasticmapreduce", + "instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0", + "elasticmapreduce:instancegroup:InstanceCount", + ), + ("appstream", "fleet/sample-fleet", "appstream:fleet:DesiredCapacity"), + ("dynamodb", "table/my-table", "dynamodb:table:ReadCapacityUnits"), + ( + "dynamodb", + "table/my-table/index/my-table-index", + "dynamodb:index:ReadCapacityUnits", + ), + ("rds", "cluster:my-db-cluster", "rds:cluster:ReadReplicaCount"), + ( + "sagemaker", + "endpoint/MyEndPoint/variant/MyVariant", + "sagemaker:variant:DesiredInstanceCount", + ), + ( + "comprehend", + "arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE", + "comprehend:document-classifier-endpoint:DesiredInferenceUnits", + ), + ( + "lambda", + "function:my-function:prod", + "lambda:function:ProvisionedConcurrency", + ), + ( + "cassandra", + "keyspace/mykeyspace/table/mytable", + "cassandra:table:ReadCapacityUnits", + ), + ] + + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + for namespace, resource_id, scalable_dimension in resource_id_variations: + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + response = client.describe_scalable_targets(ServiceNamespace=namespace) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + num_targets = 2 if namespace == "dynamodb" and "index" in resource_id else 1 + len(response["ScalableTargets"]).should.equal(num_targets) + t = response["ScalableTargets"][-1] + t.should.have.key("ServiceNamespace").which.should.equal(namespace) + t.should.have.key("ResourceId").which.should.equal(resource_id) + t.should.have.key("ScalableDimension").which.should.equal(scalable_dimension) + t.should.have.key("CreationTime").which.should.be.a("datetime.datetime") + + +@mock_ecs +@mock_applicationautoscaling +def test_register_scalable_target_updates_existing_target(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + register_scalable_target(client) + + updated_min_capacity = 3 + updated_max_capacity = 10 + updated_suspended_state = { + "DynamicScalingInSuspended": False, + "DynamicScalingOutSuspended": False, + "ScheduledScalingSuspended": False, + } + + client.register_scalable_target( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, + ResourceId=DEFAULT_RESOURCE_ID, + ScalableDimension=DEFAULT_SCALABLE_DIMENSION, + MinCapacity=updated_min_capacity, + MaxCapacity=updated_max_capacity, + SuspendedState=updated_suspended_state, + ) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + + len(response["ScalableTargets"]).should.equal(1) + t = response["ScalableTargets"][0] + t.should.have.key("MinCapacity").which.should.equal(updated_min_capacity) + t.should.have.key("MaxCapacity").which.should.equal(updated_max_capacity) + t.should.have.key("SuspendedState") + t["SuspendedState"]["DynamicScalingInSuspended"].should.equal( + updated_suspended_state["DynamicScalingInSuspended"] + ) + t["SuspendedState"]["DynamicScalingOutSuspended"].should.equal( + updated_suspended_state["DynamicScalingOutSuspended"] + ) + t["SuspendedState"]["ScheduledScalingSuspended"].should.equal( + updated_suspended_state["ScheduledScalingSuspended"] + ) + + +@mock_applicationautoscaling +def test_put_scaling_policy(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + namespace = "sagemaker" + resource_id = "endpoint/MyEndPoint/variant/MyVariant" + scalable_dimension = "sagemaker:variant:DesiredInstanceCount" + + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + + policy_name = "MyPolicy" + policy_type = "TargetTrackingScaling" + policy_body = { + "TargetValue": 70.0, + "PredefinedMetricSpecification": { + "PredefinedMetricType": "SageMakerVariantInvocationsPerInstance" + }, + } + + with pytest.raises(client.exceptions.ValidationException) as e: + client.put_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + PolicyType="ABCDEFG", + TargetTrackingScalingPolicyConfiguration=policy_body, + ) + e.value.response["Error"]["Message"].should.match( + r"Unknown policy type .* specified." + ) + + response = client.put_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + PolicyType=policy_type, + TargetTrackingScalingPolicyConfiguration=policy_body, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response["PolicyARN"].should.match( + r"arn:aws:autoscaling:.*1:scalingPolicy:.*:resource/{}/{}:policyName/{}".format( + namespace, resource_id, policy_name + ) + ) + + +@mock_applicationautoscaling +def test_describe_scaling_policies(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + namespace = "sagemaker" + resource_id = "endpoint/MyEndPoint/variant/MyVariant" + scalable_dimension = "sagemaker:variant:DesiredInstanceCount" + + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + + policy_name = "MyPolicy" + policy_type = "TargetTrackingScaling" + policy_body = { + "TargetValue": 70.0, + "PredefinedMetricSpecification": { + "PredefinedMetricType": "SageMakerVariantInvocationsPerInstance" + }, + } + + response = client.put_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + PolicyType=policy_type, + TargetTrackingScalingPolicyConfiguration=policy_body, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_scaling_policies( + PolicyNames=[policy_name], + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + policy = response["ScalingPolicies"][0] + policy["PolicyName"].should.equal(policy_name) + policy["ServiceNamespace"].should.equal(namespace) + policy["ResourceId"].should.equal(resource_id) + policy["ScalableDimension"].should.equal(scalable_dimension) + policy["PolicyType"].should.equal(policy_type) + policy["TargetTrackingScalingPolicyConfiguration"].should.equal(policy_body) + policy["PolicyARN"].should.match( + r"arn:aws:autoscaling:.*1:scalingPolicy:.*:resource/{}/{}:policyName/{}".format( + namespace, resource_id, policy_name + ) + ) + policy.should.have.key("CreationTime").which.should.be.a("datetime.datetime") + + +@mock_applicationautoscaling +def test_delete_scaling_policies(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + namespace = "sagemaker" + resource_id = "endpoint/MyEndPoint/variant/MyVariant" + scalable_dimension = "sagemaker:variant:DesiredInstanceCount" + + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + + policy_name = "MyPolicy" + policy_type = "TargetTrackingScaling" + policy_body = { + "TargetValue": 70.0, + "PredefinedMetricSpecification": { + "PredefinedMetricType": "SageMakerVariantInvocationsPerInstance" + }, + } + + with pytest.raises(client.exceptions.ValidationException) as e: + client.delete_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + e.value.response["Error"]["Message"].should.match(r"No scaling policy found .*") + + response = client.put_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + PolicyType=policy_type, + TargetTrackingScalingPolicyConfiguration=policy_body, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.delete_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_scaling_policies( + PolicyNames=[policy_name], + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalingPolicies"]).should.equal(0) + + +@mock_applicationautoscaling +def test_deregister_scalable_target(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + namespace = "sagemaker" + resource_id = "endpoint/MyEndPoint/variant/MyVariant" + scalable_dimension = "sagemaker:variant:DesiredInstanceCount" + + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + + response = client.describe_scalable_targets(ServiceNamespace=namespace) + len(response["ScalableTargets"]).should.equal(1) + + client.deregister_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + + response = client.describe_scalable_targets(ServiceNamespace=namespace) + len(response["ScalableTargets"]).should.equal(0) + + with pytest.raises(client.exceptions.ValidationException) as e: + client.deregister_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + e.value.response["Error"]["Message"].should.match(r"No scalable target found .*") diff --git a/tests/test_applicationautoscaling/test_validation.py b/tests/test_applicationautoscaling/test_validation.py new file mode 100644 index 000000000..b074d3396 --- /dev/null +++ b/tests/test_applicationautoscaling/test_validation.py @@ -0,0 +1,123 @@ +from __future__ import unicode_literals +import boto3 +from moto import mock_applicationautoscaling, mock_ecs +from moto.applicationautoscaling import models +from moto.applicationautoscaling.exceptions import AWSValidationException +from botocore.exceptions import ParamValidationError +import pytest +import sure # noqa +from botocore.exceptions import ClientError +from .test_applicationautoscaling import register_scalable_target + +DEFAULT_REGION = "us-east-1" +DEFAULT_ECS_CLUSTER = "default" +DEFAULT_ECS_TASK = "test_ecs_task" +DEFAULT_ECS_SERVICE = "sample-webapp" +DEFAULT_SERVICE_NAMESPACE = "ecs" +DEFAULT_RESOURCE_ID = "service/{}/{}".format(DEFAULT_ECS_CLUSTER, DEFAULT_ECS_SERVICE) +DEFAULT_SCALABLE_DIMENSION = "ecs:service:DesiredCount" +DEFAULT_MIN_CAPACITY = 1 +DEFAULT_MAX_CAPACITY = 1 +DEFAULT_ROLE_ARN = "test:arn" + + +@mock_applicationautoscaling +def test_describe_scalable_targets_no_params_should_raise_param_validation_errors(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + with pytest.raises(ParamValidationError): + client.describe_scalable_targets() + + +@mock_applicationautoscaling +def test_register_scalable_target_no_params_should_raise_param_validation_errors(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + with pytest.raises(ParamValidationError): + client.register_scalable_target() + + +@mock_applicationautoscaling +def test_register_scalable_target_with_none_service_namespace_should_raise_param_validation_errors(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + with pytest.raises(ParamValidationError): + register_scalable_target(client, ServiceNamespace=None) + + +@mock_applicationautoscaling +def test_describe_scalable_targets_with_invalid_scalable_dimension_should_return_validation_exception(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + + with pytest.raises(ClientError) as err: + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, ScalableDimension="foo", + ) + err.response["Error"]["Code"].should.equal("ValidationException") + err.response["Error"]["Message"].split(":")[0].should.look_like( + "1 validation error detected" + ) + err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@mock_applicationautoscaling +def test_describe_scalable_targets_with_invalid_service_namespace_should_return_validation_exception(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + + with pytest.raises(ClientError) as err: + response = client.describe_scalable_targets( + ServiceNamespace="foo", ScalableDimension=DEFAULT_SCALABLE_DIMENSION, + ) + err.response["Error"]["Code"].should.equal("ValidationException") + err.response["Error"]["Message"].split(":")[0].should.look_like( + "1 validation error detected" + ) + err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@mock_applicationautoscaling +def test_describe_scalable_targets_with_multiple_invalid_parameters_should_return_validation_exception(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + + with pytest.raises(ClientError) as err: + response = client.describe_scalable_targets( + ServiceNamespace="foo", ScalableDimension="bar", + ) + err.response["Error"]["Code"].should.equal("ValidationException") + err.response["Error"]["Message"].split(":")[0].should.look_like( + "2 validation errors detected" + ) + err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@mock_ecs +@mock_applicationautoscaling +def test_register_scalable_target_ecs_with_non_existent_service_should_return_validation_exception(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + resource_id = "service/{}/foo".format(DEFAULT_ECS_CLUSTER) + + with pytest.raises(ClientError) as err: + register_scalable_target(client, ServiceNamespace="ecs", ResourceId=resource_id) + err.response["Error"]["Code"].should.equal("ValidationException") + err.response["Error"]["Message"].should.equal( + "ECS service doesn't exist: {}".format(resource_id) + ) + err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@pytest.mark.parametrize( + "namespace,r_id,dimension,expected", + [ + ("ecs", "service/default/test-svc", "ecs:service:DesiredCount", True), + ("ecs", "banana/default/test-svc", "ecs:service:DesiredCount", False), + ("rds", "service/default/test-svc", "ecs:service:DesiredCount", False), + ], +) +def test_target_params_are_valid_success(namespace, r_id, dimension, expected): + if expected is True: + models._target_params_are_valid(namespace, r_id, dimension).should.equal( + expected + ) + else: + with pytest.raises(AWSValidationException): + models._target_params_are_valid(namespace, r_id, dimension) + + +# TODO add a test for not-supplied MinCapacity or MaxCapacity (ValidationException) diff --git a/tests/test_athena/__init__.py b/tests/test_athena/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_athena/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index d36653910..f667f2316 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -import datetime - from botocore.exceptions import ClientError +import pytest import boto3 import sure # noqa @@ -57,3 +56,165 @@ def test_create_work_group(): work_group["Name"].should.equal("athena_workgroup") work_group["Description"].should.equal("Test work group") work_group["State"].should.equal("ENABLED") + + +@mock_athena +def test_create_and_get_workgroup(): + client = boto3.client("athena", region_name="us-east-1") + + create_basic_workgroup(client=client, name="athena_workgroup") + + work_group = client.get_work_group(WorkGroup="athena_workgroup")["WorkGroup"] + del work_group["CreationTime"] # Were not testing creationtime atm + work_group.should.equal( + { + "Name": "athena_workgroup", + "State": "ENABLED", + "Configuration": { + "ResultConfiguration": {"OutputLocation": "s3://bucket-name/prefix/"} + }, + "Description": "Test work group", + } + ) + + +@mock_athena +def test_start_query_execution(): + client = boto3.client("athena", region_name="us-east-1") + + create_basic_workgroup(client=client, name="athena_workgroup") + response = client.start_query_execution( + QueryString="query1", + QueryExecutionContext={"Database": "string"}, + ResultConfiguration={"OutputLocation": "string"}, + WorkGroup="athena_workgroup", + ) + assert "QueryExecutionId" in response + + sec_response = client.start_query_execution( + QueryString="query2", + QueryExecutionContext={"Database": "string"}, + ResultConfiguration={"OutputLocation": "string"}, + ) + assert "QueryExecutionId" in sec_response + response["QueryExecutionId"].shouldnt.equal(sec_response["QueryExecutionId"]) + + +@mock_athena +def test_start_query_validate_workgroup(): + client = boto3.client("athena", region_name="us-east-1") + + with pytest.raises(ClientError) as err: + client.start_query_execution( + QueryString="query1", + QueryExecutionContext={"Database": "string"}, + ResultConfiguration={"OutputLocation": "string"}, + WorkGroup="unknown_workgroup", + ) + err.value.response["Error"]["Code"].should.equal("InvalidRequestException") + err.value.response["Error"]["Message"].should.equal("WorkGroup does not exist") + + +@mock_athena +def test_get_query_execution(): + client = boto3.client("athena", region_name="us-east-1") + + query = "SELECT stuff" + location = "s3://bucket-name/prefix/" + database = "database" + # Start Query + exex_id = client.start_query_execution( + QueryString=query, + QueryExecutionContext={"Database": database}, + ResultConfiguration={"OutputLocation": location}, + )["QueryExecutionId"] + # + details = client.get_query_execution(QueryExecutionId=exex_id)["QueryExecution"] + # + details["QueryExecutionId"].should.equal(exex_id) + details["Query"].should.equal(query) + details["StatementType"].should.equal("DDL") + details["ResultConfiguration"]["OutputLocation"].should.equal(location) + details["QueryExecutionContext"]["Database"].should.equal(database) + details["Status"]["State"].should.equal("QUEUED") + details["Statistics"].should.equal( + { + "EngineExecutionTimeInMillis": 0, + "DataScannedInBytes": 0, + "TotalExecutionTimeInMillis": 0, + "QueryQueueTimeInMillis": 0, + "QueryPlanningTimeInMillis": 0, + "ServiceProcessingTimeInMillis": 0, + } + ) + assert "WorkGroup" not in details + + +@mock_athena +def test_stop_query_execution(): + client = boto3.client("athena", region_name="us-east-1") + + query = "SELECT stuff" + location = "s3://bucket-name/prefix/" + database = "database" + # Start Query + exex_id = client.start_query_execution( + QueryString=query, + QueryExecutionContext={"Database": database}, + ResultConfiguration={"OutputLocation": location}, + )["QueryExecutionId"] + # Stop Query + client.stop_query_execution(QueryExecutionId=exex_id) + # Verify status + details = client.get_query_execution(QueryExecutionId=exex_id)["QueryExecution"] + # + details["QueryExecutionId"].should.equal(exex_id) + details["Status"]["State"].should.equal("CANCELLED") + + +@mock_athena +def test_create_named_query(): + client = boto3.client("athena", region_name="us-east-1") + + # craete named query + res = client.create_named_query( + Name="query-name", Database="target_db", QueryString="SELECT * FROM table1", + ) + + assert "NamedQueryId" in res + + +@mock_athena +def test_get_named_query(): + client = boto3.client("athena", region_name="us-east-1") + query_name = "query-name" + database = "target_db" + query_string = "SELECT * FROM tbl1" + description = "description of this query" + + # craete named query + res_create = client.create_named_query( + Name=query_name, + Database=database, + QueryString=query_string, + Description=description, + ) + query_id = res_create["NamedQueryId"] + + # get named query + res_get = client.get_named_query(NamedQueryId=query_id)["NamedQuery"] + res_get["Name"].should.equal(query_name) + res_get["Description"].should.equal(description) + res_get["Database"].should.equal(database) + res_get["QueryString"].should.equal(query_string) + res_get["NamedQueryId"].should.equal(query_id) + + +def create_basic_workgroup(client, name): + client.create_work_group( + Name=name, + Description="Test work group", + Configuration={ + "ResultConfiguration": {"OutputLocation": "s3://bucket-name/prefix/",} + }, + ) diff --git a/tests/test_autoscaling/__init__.py b/tests/test_autoscaling/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_autoscaling/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 2e7255381..cbcd8eb20 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -8,7 +8,7 @@ from boto.ec2.autoscale import Tag import boto.ec2.elb import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import ( mock_autoscaling, @@ -17,10 +17,11 @@ from moto import ( mock_elb, mock_autoscaling_deprecated, mock_ec2, + mock_cloudformation, ) from tests.helpers import requires_boto_gte -from utils import ( +from .utils import ( setup_networking, setup_networking_deprecated, setup_instance_with_networking, @@ -96,8 +97,8 @@ def test_create_autoscaling_group(): @mock_autoscaling_deprecated def test_create_autoscaling_groups_defaults(): - """ Test with the minimum inputs and check that all of the proper defaults - are assigned for the other attributes """ + """Test with the minimum inputs and check that all of the proper defaults + are assigned for the other attributes""" mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() @@ -164,7 +165,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling @mock_ec2 -def test_list_many_autoscaling_groups(): +def test_propogate_tags(): mocked_networking = setup_networking() conn = boto3.client("autoscaling", region_name="us-east-1") conn.create_launch_configuration(LaunchConfigurationName="TestLC") @@ -692,7 +693,7 @@ def test_detach_load_balancer(): def test_create_autoscaling_group_boto3(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName="test_launch_configuration" ) response = client.create_auto_scaling_group( @@ -780,7 +781,7 @@ def test_create_autoscaling_group_from_invalid_instance_id(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_auto_scaling_group( AutoScalingGroupName="test_asg", InstanceId=invalid_instance_id, @@ -790,21 +791,179 @@ def test_create_autoscaling_group_from_invalid_instance_id(): VPCZoneIdentifier=mocked_networking["subnet1"], NewInstancesProtectedFromScaleIn=False, ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Code"].should.equal("ValidationError") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( "Instance [{0}] is invalid.".format(invalid_instance_id) ) @mock_autoscaling -def test_describe_autoscaling_groups_boto3(): +@mock_ec2 +def test_create_autoscaling_group_from_template(): + mocked_networking = setup_networking() + + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + response = client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={ + "LaunchTemplateId": template["LaunchTemplateId"], + "Version": str(template["LatestVersionNumber"]), + }, + MinSize=1, + MaxSize=3, + DesiredCapacity=2, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_autoscaling +@mock_ec2 +def test_create_autoscaling_group_no_template_ref(): + mocked_networking = setup_networking() + + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + + with pytest.raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={"Version": str(template["LatestVersionNumber"])}, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( + "Valid requests must contain either launchTemplateId or LaunchTemplateName" + ) + + +@mock_autoscaling +@mock_ec2 +def test_create_autoscaling_group_multiple_template_ref(): + mocked_networking = setup_networking() + + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + + with pytest.raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={ + "LaunchTemplateId": template["LaunchTemplateId"], + "LaunchTemplateName": template["LaunchTemplateName"], + "Version": str(template["LatestVersionNumber"]), + }, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( + "Valid requests must contain either launchTemplateId or LaunchTemplateName" + ) + + +@mock_autoscaling +def test_create_autoscaling_group_boto3_no_launch_configuration(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") - _ = client.create_launch_configuration( + with pytest.raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( + "Valid requests must contain either LaunchTemplate, LaunchConfigurationName, " + "InstanceId or MixedInstancesPolicy parameter." + ) + + +@mock_autoscaling +@mock_ec2 +def test_create_autoscaling_group_boto3_multiple_launch_configurations(): + mocked_networking = setup_networking() + + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_launch_configuration( LaunchConfigurationName="test_launch_configuration" ) - _ = client.create_auto_scaling_group( + + with pytest.raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + LaunchTemplate={ + "LaunchTemplateId": template["LaunchTemplateId"], + "Version": str(template["LatestVersionNumber"]), + }, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( + "Valid requests must contain either LaunchTemplate, LaunchConfigurationName, " + "InstanceId or MixedInstancesPolicy parameter." + ) + + +@mock_autoscaling +def test_describe_autoscaling_groups_boto3_launch_config(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + ) + client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration", MinSize=0, @@ -818,16 +977,133 @@ def test_describe_autoscaling_groups_boto3(): response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) group = response["AutoScalingGroups"][0] group["AutoScalingGroupName"].should.equal("test_asg") + group["LaunchConfigurationName"].should.equal("test_launch_configuration") + group.should_not.have.key("LaunchTemplate") group["AvailabilityZones"].should.equal(["us-east-1a"]) group["VPCZoneIdentifier"].should.equal(mocked_networking["subnet1"]) group["NewInstancesProtectedFromScaleIn"].should.equal(True) for instance in group["Instances"]: + instance["LaunchConfigurationName"].should.equal("test_launch_configuration") + instance.should_not.have.key("LaunchTemplate") instance["AvailabilityZone"].should.equal("us-east-1a") instance["ProtectedFromScaleIn"].should.equal(True) + instance["InstanceType"].should.equal("t2.micro") @mock_autoscaling -def test_describe_autoscaling_instances_boto3(): +@mock_ec2 +def test_describe_autoscaling_groups_boto3_launch_template(): + mocked_networking = setup_networking() + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={"LaunchTemplateName": "test_launch_template", "Version": "1"}, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=True, + ) + expected_launch_template = { + "LaunchTemplateId": template["LaunchTemplateId"], + "LaunchTemplateName": "test_launch_template", + "Version": "1", + } + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + group = response["AutoScalingGroups"][0] + group["AutoScalingGroupName"].should.equal("test_asg") + group["LaunchTemplate"].should.equal(expected_launch_template) + group.should_not.have.key("LaunchConfigurationName") + group["AvailabilityZones"].should.equal(["us-east-1a"]) + group["VPCZoneIdentifier"].should.equal(mocked_networking["subnet1"]) + group["NewInstancesProtectedFromScaleIn"].should.equal(True) + for instance in group["Instances"]: + instance["LaunchTemplate"].should.equal(expected_launch_template) + instance.should_not.have.key("LaunchConfigurationName") + instance["AvailabilityZone"].should.equal("us-east-1a") + instance["ProtectedFromScaleIn"].should.equal(True) + instance["InstanceType"].should.equal("t2.micro") + + +@mock_autoscaling +def test_describe_autoscaling_instances_boto3_launch_config(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=True, + ) + + response = client.describe_auto_scaling_instances() + len(response["AutoScalingInstances"]).should.equal(5) + for instance in response["AutoScalingInstances"]: + instance["LaunchConfigurationName"].should.equal("test_launch_configuration") + instance.should_not.have.key("LaunchTemplate") + instance["AutoScalingGroupName"].should.equal("test_asg") + instance["AvailabilityZone"].should.equal("us-east-1a") + instance["ProtectedFromScaleIn"].should.equal(True) + instance["InstanceType"].should.equal("t2.micro") + + +@mock_autoscaling +@mock_ec2 +def test_describe_autoscaling_instances_boto3_launch_template(): + mocked_networking = setup_networking() + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={"LaunchTemplateName": "test_launch_template", "Version": "1"}, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=True, + ) + expected_launch_template = { + "LaunchTemplateId": template["LaunchTemplateId"], + "LaunchTemplateName": "test_launch_template", + "Version": "1", + } + + response = client.describe_auto_scaling_instances() + len(response["AutoScalingInstances"]).should.equal(5) + for instance in response["AutoScalingInstances"]: + instance["LaunchTemplate"].should.equal(expected_launch_template) + instance.should_not.have.key("LaunchConfigurationName") + instance["AutoScalingGroupName"].should.equal("test_asg") + instance["AvailabilityZone"].should.equal("us-east-1a") + instance["ProtectedFromScaleIn"].should.equal(True) + instance["InstanceType"].should.equal("t2.micro") + + +@mock_autoscaling +def test_describe_autoscaling_instances_instanceid_filter(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") _ = client.create_launch_configuration( @@ -849,7 +1125,10 @@ def test_describe_autoscaling_instances_boto3(): for instance in response["AutoScalingGroups"][0]["Instances"] ] - response = client.describe_auto_scaling_instances(InstanceIds=instance_ids) + response = client.describe_auto_scaling_instances( + InstanceIds=instance_ids[0:2] + ) # Filter by first 2 of 5 + len(response["AutoScalingInstances"]).should.equal(2) for instance in response["AutoScalingInstances"]: instance["AutoScalingGroupName"].should.equal("test_asg") instance["AvailabilityZone"].should.equal("us-east-1a") @@ -857,13 +1136,16 @@ def test_describe_autoscaling_instances_boto3(): @mock_autoscaling -def test_update_autoscaling_group_boto3(): +def test_update_autoscaling_group_boto3_launch_config(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName="test_launch_configuration" ) - _ = client.create_auto_scaling_group( + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration_new" + ) + client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration", MinSize=0, @@ -873,8 +1155,9 @@ def test_update_autoscaling_group_boto3(): NewInstancesProtectedFromScaleIn=True, ) - _ = client.update_auto_scaling_group( + client.update_auto_scaling_group( AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration_new", MinSize=1, VPCZoneIdentifier="{subnet1},{subnet2}".format( subnet1=mocked_networking["subnet1"], subnet2=mocked_networking["subnet2"] @@ -884,6 +1167,64 @@ def test_update_autoscaling_group_boto3(): response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) group = response["AutoScalingGroups"][0] + group["LaunchConfigurationName"].should.equal("test_launch_configuration_new") + group["MinSize"].should.equal(1) + set(group["AvailabilityZones"]).should.equal({"us-east-1a", "us-east-1b"}) + group["NewInstancesProtectedFromScaleIn"].should.equal(False) + + +@mock_autoscaling +@mock_ec2 +def test_update_autoscaling_group_boto3_launch_template(): + mocked_networking = setup_networking() + ec2_client = boto3.client("ec2", region_name="us-east-1") + ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + ) + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template_new", + LaunchTemplateData={ + "ImageId": "ami-1ea5b10a3d8867db4", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={"LaunchTemplateName": "test_launch_template", "Version": "1"}, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=True, + ) + + client.update_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={ + "LaunchTemplateName": "test_launch_template_new", + "Version": "1", + }, + MinSize=1, + VPCZoneIdentifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking["subnet1"], subnet2=mocked_networking["subnet2"] + ), + NewInstancesProtectedFromScaleIn=False, + ) + + expected_launch_template = { + "LaunchTemplateId": template["LaunchTemplateId"], + "LaunchTemplateName": "test_launch_template_new", + "Version": "1", + } + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + group = response["AutoScalingGroups"][0] + group["LaunchTemplate"].should.equal(expected_launch_template) group["MinSize"].should.equal(1) set(group["AvailabilityZones"]).should.equal({"us-east-1a", "us-east-1b"}) group["NewInstancesProtectedFromScaleIn"].should.equal(False) @@ -938,7 +1279,7 @@ def test_update_autoscaling_group_max_size_desired_capacity_change(): @mock_autoscaling -def test_autoscaling_taqs_update_boto3(): +def test_autoscaling_tags_update_boto3(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") _ = client.create_launch_configuration( @@ -1043,6 +1384,7 @@ def test_autoscaling_describe_policies_boto3(): response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down") +@mock_elb @mock_autoscaling @mock_ec2 def test_detach_one_instance_decrement(): @@ -1068,14 +1410,25 @@ def test_detach_one_instance_decrement(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] ec2_client = boto3.client("ec2", region_name="us-east-1") - response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) - response = client.detach_instances( AutoScalingGroupName="test_asg", InstanceIds=[instance_to_detach], @@ -1085,6 +1438,9 @@ def test_detach_one_instance_decrement(): response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["AutoScalingGroups"][0]["Instances"]] + ) # test to ensure tag has been removed response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) @@ -1096,7 +1452,14 @@ def test_detach_one_instance_decrement(): tags = response["Reservations"][0]["Instances"][0]["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_detach_one_instance(): @@ -1122,14 +1485,25 @@ def test_detach_one_instance(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] ec2_client = boto3.client("ec2", region_name="us-east-1") - response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) - response = client.detach_instances( AutoScalingGroupName="test_asg", InstanceIds=[instance_to_detach], @@ -1149,7 +1523,662 @@ def test_detach_one_instance(): tags = response["Reservations"][0]["Instances"][0]["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_one_instance_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] + instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby], + ShouldDecrementDesiredCapacity=True, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(2) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby]) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + # test to ensure tag has been retained (standby instance is still part of the ASG) + response = ec2_client.describe_instances() + for reservation in response["Reservations"]: + for instance in reservation["Instances"]: + tags = instance["Tags"] + tags.should.have.length_of(2) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_one_instance(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] + instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby]) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + # test to ensure tag has been retained (standby instance is still part of the ASG) + response = ec2_client.describe_instances() + for reservation in response["Reservations"]: + for instance in reservation["Instances"]: + tags = instance["Tags"] + tags.should.have.length_of(2) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_elb_update(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby]) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_terminate_instance_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_terminate], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_terminate] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.terminate_instance_in_auto_scaling_group( + InstanceId=instance_to_standby_terminate, ShouldDecrementDesiredCapacity=True + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # AWS still decrements desired capacity ASG if requested, even if the terminated instance is in standby + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) + response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"].should_not.equal( + instance_to_standby_terminate + ) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = ec2_client.describe_instances( + InstanceIds=[instance_to_standby_terminate] + ) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal( + "terminated" + ) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_terminate_instance_no_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_terminate], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_terminate] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.terminate_instance_in_auto_scaling_group( + InstanceId=instance_to_standby_terminate, ShouldDecrementDesiredCapacity=False + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + group = response["AutoScalingGroups"][0] + group["Instances"].should.have.length_of(2) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in group["Instances"]] + ) + group["DesiredCapacity"].should.equal(2) + + response = ec2_client.describe_instances( + InstanceIds=[instance_to_standby_terminate] + ) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal( + "terminated" + ) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_detach_instance_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_detach], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_detach] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.detach_instances( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_detach], + ShouldDecrementDesiredCapacity=True, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # AWS still decrements desired capacity ASG if requested, even if the detached instance was in standby + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) + response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"].should_not.equal( + instance_to_standby_detach + ) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_detach_instance_no_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_detach], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_detach] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.detach_instances( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_detach], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + group = response["AutoScalingGroups"][0] + group["Instances"].should.have.length_of(2) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in group["Instances"]] + ) + group["DesiredCapacity"].should.equal(2) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_exit_standby(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_exit_standby], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_exit_standby] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.exit_standby( + AutoScalingGroupName="test_asg", InstanceIds=[instance_to_standby_exit_standby], + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + group = response["AutoScalingGroups"][0] + group["Instances"].should.have.length_of(3) + instance_to_standby_exit_standby.should.be.within( + [x["InstanceId"] for x in group["Instances"]] + ) + group["DesiredCapacity"].should.equal(3) + + response = ec2_client.describe_instances( + InstanceIds=[instance_to_standby_exit_standby] + ) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3) + instance_to_standby_exit_standby.should.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + + +@mock_elb @mock_autoscaling @mock_ec2 def test_attach_one_instance(): @@ -1177,6 +2206,18 @@ def test_attach_one_instance(): NewInstancesProtectedFromScaleIn=True, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + ec2 = boto3.resource("ec2", "us-east-1") instances_to_add = [ x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1) @@ -1193,6 +2234,9 @@ def test_attach_one_instance(): for instance in instances: instance["ProtectedFromScaleIn"].should.equal(True) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3) + @mock_autoscaling @mock_ec2 @@ -1383,7 +2427,7 @@ def test_set_desired_capacity_down_boto3(): @mock_autoscaling @mock_ec2 -def test_terminate_instance_in_autoscaling_group(): +def test_terminate_instance_via_ec2_in_autoscaling_group(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") _ = client.create_launch_configuration( @@ -1412,3 +2456,106 @@ def test_terminate_instance_in_autoscaling_group(): for instance in response["AutoScalingGroups"][0]["Instances"] ) replaced_instance_id.should_not.equal(original_instance_id) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_terminate_instance_in_auto_scaling_group_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + DesiredCapacity=1, + MaxSize=2, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + original_instance_id = next( + instance["InstanceId"] + for instance in response["AutoScalingGroups"][0]["Instances"] + ) + client.terminate_instance_in_auto_scaling_group( + InstanceId=original_instance_id, ShouldDecrementDesiredCapacity=True + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.equal([]) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(0) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_terminate_instance_in_auto_scaling_group_no_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + DesiredCapacity=1, + MaxSize=2, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + original_instance_id = next( + instance["InstanceId"] + for instance in response["AutoScalingGroups"][0]["Instances"] + ) + client.terminate_instance_in_auto_scaling_group( + InstanceId=original_instance_id, ShouldDecrementDesiredCapacity=False + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + replaced_instance_id = next( + instance["InstanceId"] + for instance in response["AutoScalingGroups"][0]["Instances"] + ) + replaced_instance_id.should_not.equal(original_instance_id) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + original_instance_id.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) diff --git a/tests/test_autoscaling/test_autoscaling_cloudformation.py b/tests/test_autoscaling/test_autoscaling_cloudformation.py new file mode 100644 index 000000000..24a5b5628 --- /dev/null +++ b/tests/test_autoscaling/test_autoscaling_cloudformation.py @@ -0,0 +1,276 @@ +import boto3 +import sure # noqa + +from moto import ( + mock_autoscaling, + mock_cloudformation, + mock_ec2, +) + +from .utils import setup_networking + + +@mock_autoscaling +@mock_cloudformation +def test_launch_configuration(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + client = boto3.client("autoscaling", region_name="us-east-1") + + stack_name = "test-launch-configuration" + + cf_template = """ +Resources: + LaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: ami-0cc293023f983ed53 + InstanceType: t2.micro + LaunchConfigurationName: test_launch_configuration +Outputs: + LaunchConfigurationName: + Value: !Ref LaunchConfiguration +""".strip() + + cf_client.create_stack( + StackName=stack_name, TemplateBody=cf_template, + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_launch_configuration") + + lc = client.describe_launch_configurations()["LaunchConfigurations"][0] + lc["LaunchConfigurationName"].should.be.equal("test_launch_configuration") + lc["ImageId"].should.be.equal("ami-0cc293023f983ed53") + lc["InstanceType"].should.be.equal("t2.micro") + + cf_template = """ +Resources: + LaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: ami-1ea5b10a3d8867db4 + InstanceType: m5.large + LaunchConfigurationName: test_launch_configuration +Outputs: + LaunchConfigurationName: + Value: !Ref LaunchConfiguration +""".strip() + + cf_client.update_stack( + StackName=stack_name, TemplateBody=cf_template, + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_launch_configuration") + + lc = client.describe_launch_configurations()["LaunchConfigurations"][0] + lc["LaunchConfigurationName"].should.be.equal("test_launch_configuration") + lc["ImageId"].should.be.equal("ami-1ea5b10a3d8867db4") + lc["InstanceType"].should.be.equal("m5.large") + + +@mock_autoscaling +@mock_cloudformation +def test_autoscaling_group_from_launch_config(): + subnet_id = setup_networking()["subnet1"] + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + client = boto3.client("autoscaling", region_name="us-east-1") + + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + ) + stack_name = "test-auto-scaling-group" + + cf_template = """ +Parameters: + SubnetId: + Type: AWS::EC2::Subnet::Id +Resources: + AutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + AutoScalingGroupName: test_auto_scaling_group + AvailabilityZones: + - us-east-1a + LaunchConfigurationName: test_launch_configuration + MaxSize: "5" + MinSize: "1" + VPCZoneIdentifier: + - !Ref SubnetId +Outputs: + AutoScalingGroupName: + Value: !Ref AutoScalingGroup +""".strip() + + cf_client.create_stack( + StackName=stack_name, + TemplateBody=cf_template, + Parameters=[{"ParameterKey": "SubnetId", "ParameterValue": subnet_id}], + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_auto_scaling_group") + + asg = client.describe_auto_scaling_groups()["AutoScalingGroups"][0] + asg["AutoScalingGroupName"].should.be.equal("test_auto_scaling_group") + asg["MinSize"].should.be.equal(1) + asg["MaxSize"].should.be.equal(5) + asg["LaunchConfigurationName"].should.be.equal("test_launch_configuration") + + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration_new", + InstanceType="t2.micro", + ) + + cf_template = """ +Parameters: + SubnetId: + Type: AWS::EC2::Subnet::Id +Resources: + AutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + AutoScalingGroupName: test_auto_scaling_group + AvailabilityZones: + - us-east-1a + LaunchConfigurationName: test_launch_configuration_new + MaxSize: "6" + MinSize: "2" + VPCZoneIdentifier: + - !Ref SubnetId +Outputs: + AutoScalingGroupName: + Value: !Ref AutoScalingGroup +""".strip() + + cf_client.update_stack( + StackName=stack_name, + TemplateBody=cf_template, + Parameters=[{"ParameterKey": "SubnetId", "ParameterValue": subnet_id}], + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_auto_scaling_group") + + asg = client.describe_auto_scaling_groups()["AutoScalingGroups"][0] + asg["AutoScalingGroupName"].should.be.equal("test_auto_scaling_group") + asg["MinSize"].should.be.equal(2) + asg["MaxSize"].should.be.equal(6) + asg["LaunchConfigurationName"].should.be.equal("test_launch_configuration_new") + + +@mock_autoscaling +@mock_cloudformation +@mock_ec2 +def test_autoscaling_group_from_launch_template(): + subnet_id = setup_networking()["subnet1"] + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + ec2_client = boto3.client("ec2", region_name="us-east-1") + client = boto3.client("autoscaling", region_name="us-east-1") + + template_response = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + ) + launch_template_id = template_response["LaunchTemplate"]["LaunchTemplateId"] + stack_name = "test-auto-scaling-group" + + cf_template = """ +Parameters: + SubnetId: + Type: AWS::EC2::Subnet::Id + LaunchTemplateId: + Type: String +Resources: + AutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + AutoScalingGroupName: test_auto_scaling_group + AvailabilityZones: + - us-east-1a + LaunchTemplate: + LaunchTemplateId: !Ref LaunchTemplateId + Version: "1" + MaxSize: "5" + MinSize: "1" + VPCZoneIdentifier: + - !Ref SubnetId +Outputs: + AutoScalingGroupName: + Value: !Ref AutoScalingGroup +""".strip() + + cf_client.create_stack( + StackName=stack_name, + TemplateBody=cf_template, + Parameters=[ + {"ParameterKey": "SubnetId", "ParameterValue": subnet_id}, + {"ParameterKey": "LaunchTemplateId", "ParameterValue": launch_template_id}, + ], + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_auto_scaling_group") + + asg = client.describe_auto_scaling_groups()["AutoScalingGroups"][0] + asg["AutoScalingGroupName"].should.be.equal("test_auto_scaling_group") + asg["MinSize"].should.be.equal(1) + asg["MaxSize"].should.be.equal(5) + lt = asg["LaunchTemplate"] + lt["LaunchTemplateId"].should.be.equal(launch_template_id) + lt["LaunchTemplateName"].should.be.equal("test_launch_template") + lt["Version"].should.be.equal("1") + + template_response = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template_new", + LaunchTemplateData={ + "ImageId": "ami-1ea5b10a3d8867db4", + "InstanceType": "m5.large", + }, + ) + launch_template_id = template_response["LaunchTemplate"]["LaunchTemplateId"] + + cf_template = """ +Parameters: + SubnetId: + Type: AWS::EC2::Subnet::Id + LaunchTemplateId: + Type: String +Resources: + AutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + AutoScalingGroupName: test_auto_scaling_group + AvailabilityZones: + - us-east-1a + LaunchTemplate: + LaunchTemplateId: !Ref LaunchTemplateId + Version: "1" + MaxSize: "6" + MinSize: "2" + VPCZoneIdentifier: + - !Ref SubnetId +Outputs: + AutoScalingGroupName: + Value: !Ref AutoScalingGroup +""".strip() + + cf_client.update_stack( + StackName=stack_name, + TemplateBody=cf_template, + Parameters=[ + {"ParameterKey": "SubnetId", "ParameterValue": subnet_id}, + {"ParameterKey": "LaunchTemplateId", "ParameterValue": launch_template_id}, + ], + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_auto_scaling_group") + + asg = client.describe_auto_scaling_groups()["AutoScalingGroups"][0] + asg["AutoScalingGroupName"].should.be.equal("test_auto_scaling_group") + asg["MinSize"].should.be.equal(2) + asg["MaxSize"].should.be.equal(6) + lt = asg["LaunchTemplate"] + lt["LaunchTemplateId"].should.be.equal(launch_template_id) + lt["LaunchTemplateName"].should.be.equal("test_launch_template_new") + lt["Version"].should.be.equal("1") diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index a3d3dba9f..d3b1cc5f8 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -4,7 +4,7 @@ import boto3 import sure # noqa from moto import mock_autoscaling, mock_ec2, mock_elbv2 -from utils import setup_networking +from .utils import setup_networking @mock_elbv2 diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index ab2743f54..3ed296f64 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -152,8 +152,8 @@ def test_create_launch_configuration_using_ip_association_should_default_to_fals @mock_autoscaling_deprecated def test_create_launch_configuration_defaults(): - """ Test with the minimum inputs and check that all of the proper defaults - are assigned for the other attributes """ + """Test with the minimum inputs and check that all of the proper defaults + are assigned for the other attributes""" conn = boto.connect_autoscale() config = LaunchConfiguration( name="tester", image_id="ami-abcd1234", instance_type="m1.small" diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index f44938eea..ca1009dbe 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -7,7 +7,7 @@ import sure # noqa from moto import mock_autoscaling_deprecated -from utils import setup_networking_deprecated +from .utils import setup_networking_deprecated def setup_autoscale_group(): @@ -170,7 +170,7 @@ def test_execute_policy_percent_change_in_capacity(): @mock_autoscaling_deprecated def test_execute_policy_small_percent_change_in_capacity(): - """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html + """http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html If PercentChangeInCapacity returns a value between 0 and 1, Auto Scaling will round it off to 1.""" setup_autoscale_group() diff --git a/tests/test_awslambda/__init__.py b/tests/test_awslambda/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_awslambda/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_awslambda/test_awslambda_cloudformation.py b/tests/test_awslambda/test_awslambda_cloudformation.py new file mode 100644 index 000000000..6d998bfd4 --- /dev/null +++ b/tests/test_awslambda/test_awslambda_cloudformation.py @@ -0,0 +1,343 @@ +import boto3 +import io +import sure # noqa +import zipfile +from botocore.exceptions import ClientError +from moto import mock_cloudformation, mock_iam, mock_lambda, mock_s3, mock_sqs +import pytest +from string import Template +from uuid import uuid4 + + +def _process_lambda(func_str): + zip_output = io.BytesIO() + zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED) + zip_file.writestr("lambda_function.py", func_str) + zip_file.close() + zip_output.seek(0) + return zip_output.read() + + +def get_zip_file(): + pfunc = """ +def lambda_handler1(event, context): + return event +def lambda_handler2(event, context): + return event +""" + return _process_lambda(pfunc) + + +template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "LF3ABOV": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Handler": "$handler", + "Role": "$role_arn", + "Runtime": "$runtime", + "Code": { + "S3Bucket": "$bucket_name", + "S3Key": "$key" + }, + } + } + } +}""" +) + +event_source_mapping_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "$resource_name": { + "Type": "AWS::Lambda::EventSourceMapping", + "Properties": { + "BatchSize": $batch_size, + "EventSourceArn": $event_source_arn, + "FunctionName": $function_name, + "Enabled": $enabled + } + } + } +}""" +) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +def test_lambda_can_be_updated_by_cloudformation(): + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + body2, stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, stack) + # Verify function has been created + created_fn = lmbda.get_function(FunctionName=created_fn_name) + created_fn["Configuration"]["Handler"].should.equal( + "lambda_function.lambda_handler1" + ) + created_fn["Configuration"]["Runtime"].should.equal("python3.7") + created_fn["Code"]["Location"].should.match("/test1.zip") + # Update CF stack + cf.update_stack(StackName="teststack", TemplateBody=body2) + updated_fn_name = get_created_function_name(cf, stack) + # Verify function has been updated + updated_fn = lmbda.get_function(FunctionName=updated_fn_name) + updated_fn["Configuration"]["FunctionArn"].should.equal( + created_fn["Configuration"]["FunctionArn"] + ) + updated_fn["Configuration"]["Handler"].should.equal( + "lambda_function.lambda_handler2" + ) + updated_fn["Configuration"]["Runtime"].should.equal("python3.8") + updated_fn["Code"]["Location"].should.match("/test2.zip") + + +@mock_cloudformation +@mock_lambda +@mock_s3 +def test_lambda_can_be_deleted_by_cloudformation(): + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + _, stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, stack) + # Delete Stack + cf.delete_stack(StackName=stack["StackId"]) + # Verify function was deleted + with pytest.raises(ClientError) as e: + lmbda.get_function(FunctionName=created_fn_name) + e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_create_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + event_source = event_sources["EventSourceMappings"][0] + event_source["EventSourceArn"].should.be.equal(queue.attributes["QueueArn"]) + event_source["FunctionArn"].should.be.equal(created_fn_arn) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_delete_stack(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + + template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + esm_stack = cf.create_stack(StackName="test-event-source", TemplateBody=template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + + cf.delete_stack(StackName=esm_stack["StackId"]) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(0) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_update_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + original_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=original_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + original_esm = event_sources["EventSourceMappings"][0] + + original_esm["State"].should.equal("Enabled") + original_esm["BatchSize"].should.equal(1) + + # Update + new_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 10, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": False, + } + ) + + cf.update_stack(StackName="test-event-source", TemplateBody=new_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + updated_esm = event_sources["EventSourceMappings"][0] + + updated_esm["State"].should.equal("Disabled") + updated_esm["BatchSize"].should.equal(10) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_delete_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + original_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=original_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + original_esm = event_sources["EventSourceMappings"][0] + + original_esm["State"].should.equal("Enabled") + original_esm["BatchSize"].should.equal(1) + + # Update with deletion of old resources + new_template = event_source_mapping_template.substitute( + { + "resource_name": "Bar", # changed name + "batch_size": 10, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": False, + } + ) + + cf.update_stack(StackName="test-event-source", TemplateBody=new_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + updated_esm = event_sources["EventSourceMappings"][0] + + updated_esm["State"].should.equal("Disabled") + updated_esm["BatchSize"].should.equal(10) + updated_esm["UUID"].shouldnt.equal(original_esm["UUID"]) + + +def create_stack(cf, s3): + bucket_name = str(uuid4()) + s3.create_bucket(Bucket=bucket_name) + s3.put_object(Bucket=bucket_name, Key="test1.zip", Body=get_zip_file()) + s3.put_object(Bucket=bucket_name, Key="test2.zip", Body=get_zip_file()) + body1 = get_template(bucket_name, "1", "python3.7") + body2 = get_template(bucket_name, "2", "python3.8") + stack = cf.create_stack(StackName="teststack", TemplateBody=body1) + return body2, stack + + +def get_created_function_name(cf, stack): + res = cf.list_stack_resources(StackName=stack["StackId"]) + return res["StackResourceSummaries"][0]["PhysicalResourceId"] + + +def get_template(bucket_name, version, runtime): + key = "test" + version + ".zip" + handler = "lambda_function.lambda_handler" + version + return template.substitute( + bucket_name=bucket_name, + key=key, + handler=handler, + role_arn=get_role_arn(), + runtime=runtime, + ) + + +def get_role_arn(): + with mock_iam(): + iam = boto3.client("iam", region_name="us-west-2") + try: + return iam.get_role(RoleName="my-role")["Role"]["Arn"] + except ClientError: + return iam.create_role( + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", + )["Role"]["Arn"] diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 4db13d220..8308195fb 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -24,7 +24,7 @@ from moto import ( mock_sqs, ) from moto.sts.models import ACCOUNT_ID -from nose.tools import assert_raises +import pytest from botocore.exceptions import ClientError _lambda_region = "us-west-2" @@ -43,6 +43,7 @@ def _process_lambda(func_str): def get_test_zip_file1(): pfunc = """ def lambda_handler(event, context): + print("custom log event") return event """ return _process_lambda(pfunc) @@ -70,6 +71,7 @@ def lambda_handler(event, context): def get_test_zip_file3(): pfunc = """ def lambda_handler(event, context): + print("Nr_of_records("+str(len(event['Records']))+")") print("get_test_zip_file3 success") return event """ @@ -78,7 +80,7 @@ def lambda_handler(event, context): def get_test_zip_file4(): pfunc = """ -def lambda_handler(event, context): +def lambda_handler(event, context): raise Exception('I failed!') """ return _process_lambda(pfunc) @@ -86,14 +88,15 @@ def lambda_handler(event, context): @mock_lambda def test_list_functions(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.list_functions() result["Functions"].should.have.length_of(0) +@pytest.mark.network @mock_lambda def test_invoke_requestresponse_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -111,19 +114,31 @@ def test_invoke_requestresponse_function(): FunctionName="testFunction", InvocationType="RequestResponse", Payload=json.dumps(in_data), + LogType="Tail", ) - success_result["StatusCode"].should.equal(202) - result_obj = json.loads( - base64.b64decode(success_result["LogResult"]).decode("utf-8") - ) + success_result["StatusCode"].should.equal(200) + logs = base64.b64decode(success_result["LogResult"]).decode("utf-8") - result_obj.should.equal(in_data) + logs.should.contain("START RequestId:") + logs.should.contain("custom log event") + logs.should.contain("END RequestId:") payload = success_result["Payload"].read().decode("utf-8") json.loads(payload).should.equal(in_data) + # Logs should not be returned by default, only when the LogType-param is supplied + success_result = conn.invoke( + FunctionName="testFunction", + InvocationType="RequestResponse", + Payload=json.dumps(in_data), + ) + success_result["StatusCode"].should.equal(200) + assert "LogResult" not in success_result + + +@pytest.mark.network @mock_lambda def test_invoke_requestresponse_function_with_arn(): from moto.awslambda.models import ACCOUNT_ID @@ -150,20 +165,16 @@ def test_invoke_requestresponse_function_with_arn(): Payload=json.dumps(in_data), ) - success_result["StatusCode"].should.equal(202) - result_obj = json.loads( - base64.b64decode(success_result["LogResult"]).decode("utf-8") - ) - - result_obj.should.equal(in_data) + success_result["StatusCode"].should.equal(200) payload = success_result["Payload"].read().decode("utf-8") json.loads(payload).should.equal(in_data) +@pytest.mark.network @mock_lambda def test_invoke_event_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -188,16 +199,45 @@ def test_invoke_event_function(): json.loads(success_result["Payload"].read().decode("utf-8")).should.equal(in_data) +@pytest.mark.network +@mock_lambda +def test_invoke_dryrun_function(): + conn = boto3.client("lambda", _lambda_region) + conn.create_function( + FunctionName="testFunction", + Runtime="python2.7", + Role=get_role_name(), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": get_test_zip_file1(),}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + + conn.invoke.when.called_with( + FunctionName="notAFunction", InvocationType="Event", Payload="{}" + ).should.throw(botocore.client.ClientError) + + in_data = {"msg": "So long and thanks for all the fish"} + success_result = conn.invoke( + FunctionName="testFunction", + InvocationType="DryRun", + Payload=json.dumps(in_data), + ) + success_result["StatusCode"].should.equal(204) + + if settings.TEST_SERVER_MODE: @mock_ec2 @mock_lambda def test_invoke_function_get_ec2_volume(): - conn = boto3.resource("ec2", "us-west-2") - vol = conn.create_volume(Size=99, AvailabilityZone="us-west-2") + conn = boto3.resource("ec2", _lambda_region) + vol = conn.create_volume(Size=99, AvailabilityZone=_lambda_region) vol = conn.Volume(vol.id) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python3.7", @@ -216,25 +256,26 @@ if settings.TEST_SERVER_MODE: InvocationType="RequestResponse", Payload=json.dumps(in_data), ) - result["StatusCode"].should.equal(202) + result["StatusCode"].should.equal(200) actual_payload = json.loads(result["Payload"].read().decode("utf-8")) expected_payload = {"id": vol.id, "state": vol.state, "size": vol.size} actual_payload.should.equal(expected_payload) +@pytest.mark.network @mock_logs @mock_sns @mock_ec2 @mock_lambda def test_invoke_function_from_sns(): - logs_conn = boto3.client("logs", region_name="us-west-2") - sns_conn = boto3.client("sns", region_name="us-west-2") + logs_conn = boto3.client("logs", region_name=_lambda_region) + sns_conn = boto3.client("sns", region_name=_lambda_region) sns_conn.create_topic(Name="some-topic") topics_json = sns_conn.list_topics() topics = topics_json["Topics"] topic_arn = topics[0]["TopicArn"] - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -277,7 +318,7 @@ def test_invoke_function_from_sns(): @mock_lambda def test_create_based_on_s3_with_missing_bucket(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function.when.called_with( FunctionName="testFunction", @@ -297,12 +338,15 @@ def test_create_based_on_s3_with_missing_bucket(): @mock_s3 @freeze_time("2015-01-01 00:00:00") def test_create_function_from_aws_bucket(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.create_function( FunctionName="testFunction", @@ -350,7 +394,7 @@ def test_create_function_from_aws_bucket(): @mock_lambda @freeze_time("2015-01-01 00:00:00") def test_create_function_from_zipfile(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() result = conn.create_function( FunctionName="testFunction", @@ -395,12 +439,15 @@ def test_create_function_from_zipfile(): @mock_s3 @freeze_time("2015-01-01 00:00:00") def test_get_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file1() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -447,7 +494,7 @@ def test_get_function(): {"test_variable": "test_value"} ) - # Test get function with + # Test get function with qualifier result = conn.get_function(FunctionName="testFunction", Qualifier="$LATEST") result["Configuration"]["Version"].should.equal("$LATEST") result["Configuration"]["FunctionArn"].should.equal( @@ -455,7 +502,7 @@ def test_get_function(): ) # Test get function when can't find function name - with assert_raises(ClientError): + with pytest.raises(conn.exceptions.ResourceNotFoundException): conn.get_function(FunctionName="junk", Qualifier="$LATEST") @@ -464,7 +511,10 @@ def test_get_function(): def test_get_function_by_arn(): bucket_name = "test-bucket" s3_conn = boto3.client("s3", "us-east-1") - s3_conn.create_bucket(Bucket=bucket_name) + s3_conn.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content) @@ -489,12 +539,15 @@ def test_get_function_by_arn(): @mock_lambda @mock_s3 def test_delete_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -525,7 +578,10 @@ def test_delete_function(): def test_delete_function_by_arn(): bucket_name = "test-bucket" s3_conn = boto3.client("s3", "us-east-1") - s3_conn.create_bucket(Bucket=bucket_name) + s3_conn.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content) @@ -550,7 +606,7 @@ def test_delete_function_by_arn(): @mock_lambda def test_delete_unknown_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.delete_function.when.called_with( FunctionName="testFunctionThatDoesntExist" ).should.throw(botocore.client.ClientError) @@ -559,12 +615,15 @@ def test_delete_unknown_function(): @mock_lambda @mock_s3 def test_publish(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -609,12 +668,15 @@ def test_list_create_list_get_delete_list(): test `list -> create -> list -> get -> delete -> list` integration """ - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.list_functions()["Functions"].should.have.length_of(0) @@ -672,6 +734,7 @@ def test_list_create_list_get_delete_list(): conn.list_functions()["Functions"].should.have.length_of(0) +@pytest.mark.network @mock_lambda def test_invoke_lambda_error(): lambda_fx = """ @@ -711,12 +774,15 @@ def test_tags(): """ test list_tags -> tag_resource -> list_tags -> tag_resource -> list_tags -> untag_resource -> list_tags integration """ - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) function = conn.create_function( FunctionName="testFunction", @@ -768,7 +834,7 @@ def test_tags_not_found(): """ Test list_tags and tag_resource when the lambda with the given arn does not exist """ - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.list_tags.when.called_with( Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID) ).should.throw(botocore.client.ClientError) @@ -784,9 +850,10 @@ def test_tags_not_found(): ).should.throw(botocore.client.ClientError) +@pytest.mark.network @mock_lambda def test_invoke_async_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -809,7 +876,7 @@ def test_invoke_async_function(): @mock_lambda @freeze_time("2015-01-01 00:00:00") def test_get_function_created_with_zipfile(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() result = conn.create_function( FunctionName="testFunction", @@ -855,7 +922,7 @@ def test_get_function_created_with_zipfile(): @mock_lambda def test_add_function_permission(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() conn.create_function( FunctionName="testFunction", @@ -886,7 +953,7 @@ def test_add_function_permission(): @mock_lambda def test_get_function_policy(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() conn.create_function( FunctionName="testFunction", @@ -921,12 +988,15 @@ def test_get_function_policy(): @mock_lambda @mock_s3 def test_list_versions_by_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -977,12 +1047,15 @@ def test_list_versions_by_function(): @mock_lambda @mock_s3 def test_create_function_with_already_exists(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -1014,7 +1087,7 @@ def test_create_function_with_already_exists(): @mock_lambda @mock_s3 def test_list_versions_by_function_for_nonexistent_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) versions = conn.list_versions_by_function(FunctionName="testFunction") assert len(versions["Versions"]) == 0 @@ -1049,11 +1122,11 @@ def test_create_event_source_mapping(): assert response["State"] == "Enabled" +@pytest.mark.network @mock_logs @mock_lambda @mock_sqs def test_invoke_function_from_sqs(): - logs_conn = boto3.client("logs", region_name="us-east-1") sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-sqs-queue1") @@ -1079,32 +1152,23 @@ def test_invoke_function_from_sqs(): sqs_client = boto3.client("sqs", region_name="us-east-1") sqs_client.send_message(QueueUrl=queue.url, MessageBody="test") - start = time.time() - while (time.time() - start) < 30: - result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") - log_streams = result.get("logStreams") - if not log_streams: - time.sleep(1) - continue - assert len(log_streams) == 1 - result = logs_conn.get_log_events( - logGroupName="/aws/lambda/testFunction", - logStreamName=log_streams[0]["logStreamName"], - ) - for event in result.get("events"): - if event["message"] == "get_test_zip_file3 success": - return - time.sleep(1) + expected_msg = "get_test_zip_file3 success" + log_group = "/aws/lambda/testFunction" + msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) - assert False, "Test Failed" + assert msg_showed_up, ( + expected_msg + + " was not found after sending an SQS message. All logs: " + + all_logs + ) +@pytest.mark.network @mock_logs @mock_lambda @mock_dynamodb2 -def test_invoke_function_from_dynamodb(): - logs_conn = boto3.client("logs", region_name="us-east-1") +def test_invoke_function_from_dynamodb_put(): dynamodb = boto3.client("dynamodb", region_name="us-east-1") table_name = "table_with_stream" table = dynamodb.create_table( @@ -1139,27 +1203,100 @@ def test_invoke_function_from_dynamodb(): assert response["State"] == "Enabled" dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}}) + + expected_msg = "get_test_zip_file3 success" + log_group = "/aws/lambda/testFunction" + msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) + + assert msg_showed_up, ( + expected_msg + " was not found after a DDB insert. All logs: " + all_logs + ) + + +@pytest.mark.network +@mock_logs +@mock_lambda +@mock_dynamodb2 +def test_invoke_function_from_dynamodb_update(): + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + table_name = "table_with_stream" + table = dynamodb.create_table( + TableName=table_name, + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + StreamSpecification={ + "StreamEnabled": True, + "StreamViewType": "NEW_AND_OLD_IMAGES", + }, + ) + + conn = boto3.client("lambda", region_name="us-east-1") + func = conn.create_function( + FunctionName="testFunction", + Runtime="python2.7", + Role=get_role_name(), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": get_test_zip_file3()}, + Description="test lambda function executed after a DynamoDB table is updated", + Timeout=3, + MemorySize=128, + Publish=True, + ) + + conn.create_event_source_mapping( + EventSourceArn=table["TableDescription"]["LatestStreamArn"], + FunctionName=func["FunctionArn"], + ) + + dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}}) + log_group = "/aws/lambda/testFunction" + expected_msg = "get_test_zip_file3 success" + msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) + assert "Nr_of_records(1)" in all_logs, "Only one item should be inserted" + + dynamodb.update_item( + TableName=table_name, + Key={"id": {"S": "item 1"}}, + UpdateExpression="set #attr = :val", + ExpressionAttributeNames={"#attr": "new_attr"}, + ExpressionAttributeValues={":val": {"S": "new_val"}}, + ) + msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) + + assert msg_showed_up, ( + expected_msg + " was not found after updating DDB. All logs: " + str(all_logs) + ) + assert "Nr_of_records(1)" in all_logs, "Only one item should be updated" + assert ( + "Nr_of_records(2)" not in all_logs + ), "The inserted item should not show up again" + + +def wait_for_log_msg(expected_msg, log_group): + logs_conn = boto3.client("logs", region_name="us-east-1") + received_messages = [] start = time.time() - while (time.time() - start) < 30: - result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") + while (time.time() - start) < 10: + result = logs_conn.describe_log_streams(logGroupName=log_group) log_streams = result.get("logStreams") if not log_streams: time.sleep(1) continue - assert len(log_streams) == 1 - result = logs_conn.get_log_events( - logGroupName="/aws/lambda/testFunction", - logStreamName=log_streams[0]["logStreamName"], - ) - for event in result.get("events"): - if event["message"] == "get_test_zip_file3 success": - return + for log_stream in log_streams: + result = logs_conn.get_log_events( + logGroupName=log_group, logStreamName=log_stream["logStreamName"], + ) + received_messages.extend( + [event["message"] for event in result.get("events")] + ) + if expected_msg in received_messages: + return True, received_messages time.sleep(1) - - assert False, "Test Failed" + return False, received_messages +@pytest.mark.network @mock_logs @mock_lambda @mock_sqs @@ -1320,11 +1457,12 @@ def test_update_event_source_mapping(): assert response["State"] == "Enabled" mapping = conn.update_event_source_mapping( - UUID=response["UUID"], Enabled=False, BatchSize=15, FunctionName="testFunction2" + UUID=response["UUID"], Enabled=False, BatchSize=2, FunctionName="testFunction2" ) assert mapping["UUID"] == response["UUID"] assert mapping["FunctionArn"] == func2["FunctionArn"] assert mapping["State"] == "Disabled" + assert mapping["BatchSize"] == 2 @mock_lambda @@ -1363,12 +1501,15 @@ def test_delete_event_source_mapping(): @mock_lambda @mock_s3 def test_update_configuration(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) fxn = conn.create_function( FunctionName="testFunction", @@ -1395,6 +1536,7 @@ def test_update_configuration(): Handler="lambda_function.new_lambda_handler", Runtime="python3.6", Timeout=7, + VpcConfig={"SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"]}, Environment={"Variables": {"test_environment": "test_value"}}, ) @@ -1407,11 +1549,16 @@ def test_update_configuration(): assert updated_config["Environment"]["Variables"] == { "test_environment": "test_value" } + assert updated_config["VpcConfig"] == { + "SecurityGroupIds": ["sg-123abc"], + "SubnetIds": ["subnet-123abc"], + "VpcId": "vpc-123abc", + } @mock_lambda def test_update_function_zip(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content_one = get_test_zip_file1() @@ -1466,13 +1613,16 @@ def test_update_function_zip(): @mock_lambda @mock_s3 def test_update_function_s3(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file1() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) fxn = conn.create_function( FunctionName="testFunctionS3", @@ -1529,15 +1679,15 @@ def test_update_function_s3(): @mock_lambda def test_create_function_with_invalid_arn(): err = create_invalid_lambda("test-iam-role") - err.exception.response["Error"]["Message"].should.equal( - "1 validation error detected: Value 'test-iam-role' at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: arn:(aws[a-zA-Z-]*)?:iam::(\d{12}):role/?[a-zA-Z_0-9+=,.@\-_/]+" + err.value.response["Error"]["Message"].should.equal( + r"1 validation error detected: Value 'test-iam-role' at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: arn:(aws[a-zA-Z-]*)?:iam::(\d{12}):role/?[a-zA-Z_0-9+=,.@\-_/]+" ) @mock_lambda def test_create_function_with_arn_from_different_account(): err = create_invalid_lambda("arn:aws:iam::000000000000:role/example_role") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Message"].should.equal( "Cross-account pass role is not allowed." ) @@ -1547,15 +1697,127 @@ def test_create_function_with_unknown_arn(): err = create_invalid_lambda( "arn:aws:iam::" + str(ACCOUNT_ID) + ":role/service-role/unknown_role" ) - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Message"].should.equal( "The role defined for the function cannot be assumed by Lambda." ) -def create_invalid_lambda(role): - conn = boto3.client("lambda", "us-west-2") +@mock_lambda +def test_remove_function_permission(): + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() - with assert_raises(ClientError) as err: + conn.create_function( + FunctionName="testFunction", + Runtime="python2.7", + Role=(get_role_name()), + Handler="lambda_function.handler", + Code={"ZipFile": zip_content}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + + conn.add_permission( + FunctionName="testFunction", + StatementId="1", + Action="lambda:InvokeFunction", + Principal="432143214321", + SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld", + SourceAccount="123412341234", + EventSourceToken="blah", + Qualifier="2", + ) + + remove = conn.remove_permission( + FunctionName="testFunction", StatementId="1", Qualifier="2", + ) + remove["ResponseMetadata"]["HTTPStatusCode"].should.equal(204) + policy = conn.get_policy(FunctionName="testFunction", Qualifier="2")["Policy"] + policy = json.loads(policy) + policy["Statement"].should.equal([]) + + +@mock_lambda +def test_put_function_concurrency(): + expected_concurrency = 15 + function_name = "test" + + conn = boto3.client("lambda", _lambda_region) + conn.create_function( + FunctionName=function_name, + Runtime="python3.8", + Role=(get_role_name()), + Handler="lambda_function.handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + result = conn.put_function_concurrency( + FunctionName=function_name, ReservedConcurrentExecutions=expected_concurrency + ) + + result["ReservedConcurrentExecutions"].should.equal(expected_concurrency) + + +@mock_lambda +def test_delete_function_concurrency(): + function_name = "test" + + conn = boto3.client("lambda", _lambda_region) + conn.create_function( + FunctionName=function_name, + Runtime="python3.8", + Role=(get_role_name()), + Handler="lambda_function.handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + conn.put_function_concurrency( + FunctionName=function_name, ReservedConcurrentExecutions=15 + ) + + conn.delete_function_concurrency(FunctionName=function_name) + result = conn.get_function(FunctionName=function_name) + + result.doesnt.have.key("Concurrency") + + +@mock_lambda +def test_get_function_concurrency(): + expected_concurrency = 15 + function_name = "test" + + conn = boto3.client("lambda", _lambda_region) + conn.create_function( + FunctionName=function_name, + Runtime="python3.8", + Role=(get_role_name()), + Handler="lambda_function.handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + conn.put_function_concurrency( + FunctionName=function_name, ReservedConcurrentExecutions=expected_concurrency + ) + + result = conn.get_function_concurrency(FunctionName=function_name) + + result["ReservedConcurrentExecutions"].should.equal(expected_concurrency) + + +def create_invalid_lambda(role): + conn = boto3.client("lambda", _lambda_region) + zip_content = get_test_zip_file1() + with pytest.raises(ClientError) as err: conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -1572,7 +1834,7 @@ def create_invalid_lambda(role): def get_role_name(): with mock_iam(): - iam = boto3.client("iam", region_name="us-west-2") + iam = boto3.client("iam", region_name=_lambda_region) try: return iam.get_role(RoleName="my-role")["Role"]["Arn"] except ClientError: diff --git a/tests/test_awslambda/test_lambda_cloudformation.py b/tests/test_awslambda/test_lambda_cloudformation.py deleted file mode 100644 index a5d4d23fd..000000000 --- a/tests/test_awslambda/test_lambda_cloudformation.py +++ /dev/null @@ -1,138 +0,0 @@ -import boto3 -import io -import sure # noqa -import zipfile -from botocore.exceptions import ClientError -from moto import mock_cloudformation, mock_iam, mock_lambda, mock_s3 -from nose.tools import assert_raises -from string import Template -from uuid import uuid4 - - -def _process_lambda(func_str): - zip_output = io.BytesIO() - zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED) - zip_file.writestr("lambda_function.py", func_str) - zip_file.close() - zip_output.seek(0) - return zip_output.read() - - -def get_zip_file(): - pfunc = """ -def lambda_handler1(event, context): - return event -def lambda_handler2(event, context): - return event -""" - return _process_lambda(pfunc) - - -template = Template( - """{ - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "LF3ABOV": { - "Type": "AWS::Lambda::Function", - "Properties": { - "Handler": "$handler", - "Role": "$role_arn", - "Runtime": "$runtime", - "Code": { - "S3Bucket": "$bucket_name", - "S3Key": "$key" - }, - } - } - } -}""" -) - - -@mock_cloudformation -@mock_lambda -@mock_s3 -def test_lambda_can_be_updated_by_cloudformation(): - s3 = boto3.client("s3", "us-east-1") - cf = boto3.client("cloudformation", region_name="us-east-1") - lmbda = boto3.client("lambda", region_name="us-east-1") - body2, stack = create_stack(cf, s3) - created_fn_name = get_created_function_name(cf, stack) - # Verify function has been created - created_fn = lmbda.get_function(FunctionName=created_fn_name) - created_fn["Configuration"]["Handler"].should.equal( - "lambda_function.lambda_handler1" - ) - created_fn["Configuration"]["Runtime"].should.equal("python3.7") - created_fn["Code"]["Location"].should.match("/test1.zip") - # Update CF stack - cf.update_stack(StackName="teststack", TemplateBody=body2) - updated_fn_name = get_created_function_name(cf, stack) - # Verify function has been updated - updated_fn = lmbda.get_function(FunctionName=updated_fn_name) - updated_fn["Configuration"]["FunctionArn"].should.equal( - created_fn["Configuration"]["FunctionArn"] - ) - updated_fn["Configuration"]["Handler"].should.equal( - "lambda_function.lambda_handler2" - ) - updated_fn["Configuration"]["Runtime"].should.equal("python3.8") - updated_fn["Code"]["Location"].should.match("/test2.zip") - - -@mock_cloudformation -@mock_lambda -@mock_s3 -def test_lambda_can_be_deleted_by_cloudformation(): - s3 = boto3.client("s3", "us-east-1") - cf = boto3.client("cloudformation", region_name="us-east-1") - lmbda = boto3.client("lambda", region_name="us-east-1") - _, stack = create_stack(cf, s3) - created_fn_name = get_created_function_name(cf, stack) - # Delete Stack - cf.delete_stack(StackName=stack["StackId"]) - # Verify function was deleted - with assert_raises(ClientError) as e: - lmbda.get_function(FunctionName=created_fn_name) - e.exception.response["Error"]["Code"].should.equal("404") - - -def create_stack(cf, s3): - bucket_name = str(uuid4()) - s3.create_bucket(Bucket=bucket_name) - s3.put_object(Bucket=bucket_name, Key="test1.zip", Body=get_zip_file()) - s3.put_object(Bucket=bucket_name, Key="test2.zip", Body=get_zip_file()) - body1 = get_template(bucket_name, "1", "python3.7") - body2 = get_template(bucket_name, "2", "python3.8") - stack = cf.create_stack(StackName="teststack", TemplateBody=body1) - return body2, stack - - -def get_created_function_name(cf, stack): - res = cf.list_stack_resources(StackName=stack["StackId"]) - return res["StackResourceSummaries"][0]["PhysicalResourceId"] - - -def get_template(bucket_name, version, runtime): - key = "test" + version + ".zip" - handler = "lambda_function.lambda_handler" + version - return template.substitute( - bucket_name=bucket_name, - key=key, - handler=handler, - role_arn=get_role_arn(), - runtime=runtime, - ) - - -def get_role_arn(): - with mock_iam(): - iam = boto3.client("iam", region_name="us-west-2") - try: - return iam.get_role(RoleName="my-role")["Role"]["Arn"] - except ClientError: - return iam.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", - )["Role"]["Arn"] diff --git a/tests/test_batch/__init__.py b/tests/test_batch/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_batch/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 141d6b343..67f24bebc 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -6,20 +6,7 @@ import boto3 from botocore.exceptions import ClientError import sure # noqa from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs -import functools -import nose - - -def expected_failure(test): - @functools.wraps(test) - def inner(*args, **kwargs): - try: - test(*args, **kwargs) - except Exception as err: - raise nose.SkipTest - - return inner - +import pytest DEFAULT_REGION = "eu-central-1" @@ -692,12 +679,14 @@ def test_submit_job_by_name(): # SLOW TESTS -@expected_failure + + @mock_logs @mock_ec2 @mock_ecs @mock_iam @mock_batch +@pytest.mark.network def test_submit_job(): ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) @@ -720,13 +709,13 @@ def test_submit_job(): queue_arn = resp["jobQueueArn"] resp = batch_client.register_job_definition( - jobDefinitionName="sleep10", + jobDefinitionName="sayhellotomylittlefriend", type="container", containerProperties={ - "image": "busybox", + "image": "busybox:latest", "vcpus": 1, "memory": 128, - "command": ["sleep", "10"], + "command": ["echo", "hello"], }, ) job_def_arn = resp["jobDefinitionArn"] @@ -736,42 +725,26 @@ def test_submit_job(): ) job_id = resp["jobId"] - future = datetime.datetime.now() + datetime.timedelta(seconds=30) + _wait_for_job_status(batch_client, job_id, "SUCCEEDED") - while datetime.datetime.now() < future: - resp = batch_client.describe_jobs(jobs=[job_id]) - print( - "{0}:{1} {2}".format( - resp["jobs"][0]["jobName"], - resp["jobs"][0]["jobId"], - resp["jobs"][0]["status"], - ) - ) - - if resp["jobs"][0]["status"] == "FAILED": - raise RuntimeError("Batch job failed") - if resp["jobs"][0]["status"] == "SUCCEEDED": - break - time.sleep(0.5) - else: - raise RuntimeError("Batch job timed out") - - resp = logs_client.describe_log_streams(logGroupName="/aws/batch/job") + resp = logs_client.describe_log_streams( + logGroupName="/aws/batch/job", logStreamNamePrefix="sayhellotomylittlefriend" + ) len(resp["logStreams"]).should.equal(1) ls_name = resp["logStreams"][0]["logStreamName"] resp = logs_client.get_log_events( logGroupName="/aws/batch/job", logStreamName=ls_name ) - len(resp["events"]).should.be.greater_than(5) + [event["message"] for event in resp["events"]].should.equal(["hello"]) -@expected_failure @mock_logs @mock_ec2 @mock_ecs @mock_iam @mock_batch +@pytest.mark.network def test_list_jobs(): ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) @@ -794,13 +767,13 @@ def test_list_jobs(): queue_arn = resp["jobQueueArn"] resp = batch_client.register_job_definition( - jobDefinitionName="sleep10", + jobDefinitionName="sleep5", type="container", containerProperties={ - "image": "busybox", + "image": "busybox:latest", "vcpus": 1, "memory": 128, - "command": ["sleep", "10"], + "command": ["sleep", "5"], }, ) job_def_arn = resp["jobDefinitionArn"] @@ -814,26 +787,13 @@ def test_list_jobs(): ) job_id2 = resp["jobId"] - future = datetime.datetime.now() + datetime.timedelta(seconds=30) - resp_finished_jobs = batch_client.list_jobs( jobQueue=queue_arn, jobStatus="SUCCEEDED" ) # Wait only as long as it takes to run the jobs - while datetime.datetime.now() < future: - resp = batch_client.describe_jobs(jobs=[job_id1, job_id2]) - - any_failed_jobs = any([job["status"] == "FAILED" for job in resp["jobs"]]) - succeeded_jobs = all([job["status"] == "SUCCEEDED" for job in resp["jobs"]]) - - if any_failed_jobs: - raise RuntimeError("A Batch job failed") - if succeeded_jobs: - break - time.sleep(0.5) - else: - raise RuntimeError("Batch jobs timed out") + for job_id in [job_id1, job_id2]: + _wait_for_job_status(batch_client, job_id, "SUCCEEDED") resp_finished_jobs2 = batch_client.list_jobs( jobQueue=queue_arn, jobStatus="SUCCEEDED" @@ -843,7 +803,6 @@ def test_list_jobs(): len(resp_finished_jobs2["jobSummaryList"]).should.equal(2) -@expected_failure @mock_logs @mock_ec2 @mock_ecs @@ -871,13 +830,13 @@ def test_terminate_job(): queue_arn = resp["jobQueueArn"] resp = batch_client.register_job_definition( - jobDefinitionName="sleep10", + jobDefinitionName="echo-sleep-echo", type="container", containerProperties={ - "image": "busybox", + "image": "busybox:latest", "vcpus": 1, "memory": 128, - "command": ["sleep", "10"], + "command": ["sh", "-c", "echo start && sleep 30 && echo stop"], }, ) job_def_arn = resp["jobDefinitionArn"] @@ -887,13 +846,43 @@ def test_terminate_job(): ) job_id = resp["jobId"] - time.sleep(2) + _wait_for_job_status(batch_client, job_id, "RUNNING") batch_client.terminate_job(jobId=job_id, reason="test_terminate") - time.sleep(1) + _wait_for_job_status(batch_client, job_id, "FAILED") resp = batch_client.describe_jobs(jobs=[job_id]) resp["jobs"][0]["jobName"].should.equal("test1") resp["jobs"][0]["status"].should.equal("FAILED") resp["jobs"][0]["statusReason"].should.equal("test_terminate") + + resp = logs_client.describe_log_streams( + logGroupName="/aws/batch/job", logStreamNamePrefix="echo-sleep-echo" + ) + len(resp["logStreams"]).should.equal(1) + ls_name = resp["logStreams"][0]["logStreamName"] + + resp = logs_client.get_log_events( + logGroupName="/aws/batch/job", logStreamName=ls_name + ) + # Events should only contain 'start' because we interrupted + # the job before 'stop' was written to the logs. + resp["events"].should.have.length_of(1) + resp["events"][0]["message"].should.equal("start") + + +def _wait_for_job_status(client, job_id, status, seconds_to_wait=30): + wait_time = datetime.datetime.now() + datetime.timedelta(seconds=seconds_to_wait) + last_job_status = None + while datetime.datetime.now() < wait_time: + resp = client.describe_jobs(jobs=[job_id]) + last_job_status = resp["jobs"][0]["status"] + if last_job_status == status: + break + else: + raise RuntimeError( + "Time out waiting for job status {status}!\n Last status: {last_status}".format( + status=status, last_status=last_job_status + ) + ) diff --git a/tests/test_batch/test_cloudformation.py b/tests/test_batch/test_batch_cloudformation.py similarity index 92% rename from tests/test_batch/test_cloudformation.py rename to tests/test_batch/test_batch_cloudformation.py index a6baedb38..7935f3fe9 100644 --- a/tests/test_batch/test_cloudformation.py +++ b/tests/test_batch/test_batch_cloudformation.py @@ -14,7 +14,6 @@ from moto import ( mock_cloudformation, ) import functools -import nose import json DEFAULT_REGION = "eu-central-1" @@ -234,6 +233,7 @@ def test_create_job_def_cf(): "Vcpus": 2, "Memory": 2000, "Command": ["echo", "Hello world"], + "LinuxParameters": {"Devices": [{"HostPath": "test-path"}]}, }, "RetryStrategy": {"Attempts": 1}, }, @@ -262,3 +262,17 @@ def test_create_job_def_cf(): job_def_resource["PhysicalResourceId"].startswith("arn:aws:batch:") job_def_resource["PhysicalResourceId"].should.contain("test_stack-JobDef") job_def_resource["PhysicalResourceId"].should.contain("job-definition/") + + # Test the linux parameter device host path + # This ensures that batch is parsing the parameter dictionaries + # correctly by recursively converting the first character of all + # dict keys to lowercase. + batch_conn = boto3.client("batch", DEFAULT_REGION) + response = batch_conn.describe_job_definitions( + jobDefinitions=[job_def_resource["PhysicalResourceId"]] + ) + job_def_linux_device_host_path = response.get("jobDefinitions")[0][ + "containerProperties" + ]["linuxParameters"]["devices"][0]["hostPath"] + + job_def_linux_device_host_path.should.equal("test-path") diff --git a/tests/test_cloudformation/fixtures/vpc_eip.py b/tests/test_cloudformation/fixtures/vpc_eip.py index b5bd48c01..154d4c2d4 100644 --- a/tests/test_cloudformation/fixtures/vpc_eip.py +++ b/tests/test_cloudformation/fixtures/vpc_eip.py @@ -1,5 +1,5 @@ -from __future__ import unicode_literals - -template = { - "Resources": {"VPCEIP": {"Type": "AWS::EC2::EIP", "Properties": {"Domain": "vpc"}}} -} +from __future__ import unicode_literals + +template = { + "Resources": {"VPCEIP": {"Type": "AWS::EC2::EIP", "Properties": {"Domain": "vpc"}}} +} diff --git a/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py b/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py index ff7b75518..546f68cb4 100644 --- a/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py +++ b/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py @@ -1,276 +1,276 @@ -from __future__ import unicode_literals - -template = { - "Description": "AWS CloudFormation Sample Template vpc_single_instance_in_subnet.template: Sample template showing how to create a VPC and add an EC2 instance with an Elastic IP address and a security group. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "SSHLocation": { - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", - "Description": " The IP address range that can be used to SSH to the EC2 instances", - "Default": "0.0.0.0/0", - "MinLength": "9", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "MaxLength": "18", - "Type": "String", - }, - "KeyName": { - "Type": "String", - "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", - "MinLength": "1", - "AllowedPattern": "[\\x20-\\x7E]*", - "MaxLength": "255", - "ConstraintDescription": "can contain only ASCII characters.", - }, - "InstanceType": { - "Default": "m1.small", - "ConstraintDescription": "must be a valid EC2 instance type.", - "Type": "String", - "Description": "WebServer EC2 instance type", - "AllowedValues": [ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge", - ], - }, - }, - "AWSTemplateFormatVersion": "2010-09-09", - "Outputs": { - "URL": { - "Description": "Newly created application URL", - "Value": { - "Fn::Join": [ - "", - ["http://", {"Fn::GetAtt": ["WebServerInstance", "PublicIp"]}], - ] - }, - } - }, - "Resources": { - "Subnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": {"Ref": "VPC"}, - "CidrBlock": "10.0.0.0/24", - "Tags": [{"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}], - }, - }, - "WebServerWaitHandle": {"Type": "AWS::CloudFormation::WaitConditionHandle"}, - "Route": { - "Type": "AWS::EC2::Route", - "Properties": { - "GatewayId": {"Ref": "InternetGateway"}, - "DestinationCidrBlock": "0.0.0.0/0", - "RouteTableId": {"Ref": "RouteTable"}, - }, - "DependsOn": "AttachGateway", - }, - "SubnetRouteTableAssociation": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": {"Ref": "Subnet"}, - "RouteTableId": {"Ref": "RouteTable"}, - }, - }, - "InternetGateway": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [{"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}] - }, - }, - "RouteTable": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": {"Ref": "VPC"}, - "Tags": [{"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}], - }, - }, - "WebServerWaitCondition": { - "Type": "AWS::CloudFormation::WaitCondition", - "Properties": {"Handle": {"Ref": "WebServerWaitHandle"}, "Timeout": "300"}, - "DependsOn": "WebServerInstance", - }, - "VPC": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - "Tags": [{"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}], - }, - }, - "InstanceSecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "SecurityGroupIngress": [ - { - "ToPort": "22", - "IpProtocol": "tcp", - "CidrIp": {"Ref": "SSHLocation"}, - "FromPort": "22", - }, - { - "ToPort": "80", - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0", - "FromPort": "80", - }, - ], - "VpcId": {"Ref": "VPC"}, - "GroupDescription": "Enable SSH access via port 22", - }, - }, - "WebServerInstance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "UserData": { - "Fn::Base64": { - "Fn::Join": [ - "", - [ - "#!/bin/bash\n", - "yum update -y aws-cfn-bootstrap\n", - "# Helper function\n", - "function error_exit\n", - "{\n", - ' /opt/aws/bin/cfn-signal -e 1 -r "$1" \'', - {"Ref": "WebServerWaitHandle"}, - "'\n", - " exit 1\n", - "}\n", - "# Install the simple web page\n", - "/opt/aws/bin/cfn-init -s ", - {"Ref": "AWS::StackId"}, - " -r WebServerInstance ", - " --region ", - {"Ref": "AWS::Region"}, - " || error_exit 'Failed to run cfn-init'\n", - "# Start up the cfn-hup daemon to listen for changes to the Web Server metadata\n", - "/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup'\n", - "# All done so signal success\n", - '/opt/aws/bin/cfn-signal -e 0 -r "WebServer setup complete" \'', - {"Ref": "WebServerWaitHandle"}, - "'\n", - ], - ] - } - }, - "Tags": [ - {"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}, - {"Value": "Bar", "Key": "Foo"}, - ], - "SecurityGroupIds": [{"Ref": "InstanceSecurityGroup"}], - "KeyName": {"Ref": "KeyName"}, - "SubnetId": {"Ref": "Subnet"}, - "ImageId": { - "Fn::FindInMap": ["RegionMap", {"Ref": "AWS::Region"}, "AMI"] - }, - "InstanceType": {"Ref": "InstanceType"}, - }, - "Metadata": { - "Comment": "Install a simple PHP application", - "AWS::CloudFormation::Init": { - "config": { - "files": { - "/etc/cfn/cfn-hup.conf": { - "content": { - "Fn::Join": [ - "", - [ - "[main]\n", - "stack=", - {"Ref": "AWS::StackId"}, - "\n", - "region=", - {"Ref": "AWS::Region"}, - "\n", - ], - ] - }, - "owner": "root", - "group": "root", - "mode": "000400", - }, - "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { - "content": { - "Fn::Join": [ - "", - [ - "[cfn-auto-reloader-hook]\n", - "triggers=post.update\n", - "path=Resources.WebServerInstance.Metadata.AWS::CloudFormation::Init\n", - "action=/opt/aws/bin/cfn-init -s ", - {"Ref": "AWS::StackId"}, - " -r WebServerInstance ", - " --region ", - {"Ref": "AWS::Region"}, - "\n", - "runas=root\n", - ], - ] - } - }, - "/var/www/html/index.php": { - "content": { - "Fn::Join": [ - "", - [ - "AWS CloudFormation sample PHP application';\n", - "?>\n", - ], - ] - }, - "owner": "apache", - "group": "apache", - "mode": "000644", - }, - }, - "services": { - "sysvinit": { - "httpd": {"ensureRunning": "true", "enabled": "true"}, - "sendmail": { - "ensureRunning": "false", - "enabled": "false", - }, - } - }, - "packages": {"yum": {"httpd": [], "php": []}}, - } - }, - }, - }, - "IPAddress": { - "Type": "AWS::EC2::EIP", - "Properties": {"InstanceId": {"Ref": "WebServerInstance"}, "Domain": "vpc"}, - "DependsOn": "AttachGateway", - }, - "AttachGateway": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": {"Ref": "VPC"}, - "InternetGatewayId": {"Ref": "InternetGateway"}, - }, - }, - }, - "Mappings": { - "RegionMap": { - "ap-southeast-1": {"AMI": "ami-74dda626"}, - "ap-southeast-2": {"AMI": "ami-b3990e89"}, - "us-west-2": {"AMI": "ami-16fd7026"}, - "us-east-1": {"AMI": "ami-7f418316"}, - "ap-northeast-1": {"AMI": "ami-dcfa4edd"}, - "us-west-1": {"AMI": "ami-951945d0"}, - "eu-west-1": {"AMI": "ami-24506250"}, - "sa-east-1": {"AMI": "ami-3e3be423"}, - } - }, -} +from __future__ import unicode_literals + +template = { + "Description": "AWS CloudFormation Sample Template vpc_single_instance_in_subnet.template: Sample template showing how to create a VPC and add an EC2 instance with an Elastic IP address and a security group. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "SSHLocation": { + "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", + "Description": " The IP address range that can be used to SSH to the EC2 instances", + "Default": "0.0.0.0/0", + "MinLength": "9", + "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", + "MaxLength": "18", + "Type": "String", + }, + "KeyName": { + "Type": "String", + "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", + "MinLength": "1", + "AllowedPattern": "[\\x20-\\x7E]*", + "MaxLength": "255", + "ConstraintDescription": "can contain only ASCII characters.", + }, + "InstanceType": { + "Default": "m1.small", + "ConstraintDescription": "must be a valid EC2 instance type.", + "Type": "String", + "Description": "WebServer EC2 instance type", + "AllowedValues": [ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "m3.xlarge", + "m3.2xlarge", + "c1.medium", + "c1.xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "cg1.4xlarge", + ], + }, + }, + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "URL": { + "Description": "Newly created application URL", + "Value": { + "Fn::Join": [ + "", + ["http://", {"Fn::GetAtt": ["WebServerInstance", "PublicIp"]}], + ] + }, + } + }, + "Resources": { + "Subnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": {"Ref": "VPC"}, + "CidrBlock": "10.0.0.0/24", + "Tags": [{"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}], + }, + }, + "WebServerWaitHandle": {"Type": "AWS::CloudFormation::WaitConditionHandle"}, + "Route": { + "Type": "AWS::EC2::Route", + "Properties": { + "GatewayId": {"Ref": "InternetGateway"}, + "DestinationCidrBlock": "0.0.0.0/0", + "RouteTableId": {"Ref": "RouteTable"}, + }, + "DependsOn": "AttachGateway", + }, + "SubnetRouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "SubnetId": {"Ref": "Subnet"}, + "RouteTableId": {"Ref": "RouteTable"}, + }, + }, + "InternetGateway": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [{"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}] + }, + }, + "RouteTable": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": {"Ref": "VPC"}, + "Tags": [{"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}], + }, + }, + "WebServerWaitCondition": { + "Type": "AWS::CloudFormation::WaitCondition", + "Properties": {"Handle": {"Ref": "WebServerWaitHandle"}, "Timeout": "300"}, + "DependsOn": "WebServerInstance", + }, + "VPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "Tags": [{"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}], + }, + }, + "InstanceSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "SecurityGroupIngress": [ + { + "ToPort": "22", + "IpProtocol": "tcp", + "CidrIp": {"Ref": "SSHLocation"}, + "FromPort": "22", + }, + { + "ToPort": "80", + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0", + "FromPort": "80", + }, + ], + "VpcId": {"Ref": "VPC"}, + "GroupDescription": "Enable SSH access via port 22", + }, + }, + "WebServerInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash\n", + "yum update -y aws-cfn-bootstrap\n", + "# Helper function\n", + "function error_exit\n", + "{\n", + ' /opt/aws/bin/cfn-signal -e 1 -r "$1" \'', + {"Ref": "WebServerWaitHandle"}, + "'\n", + " exit 1\n", + "}\n", + "# Install the simple web page\n", + "/opt/aws/bin/cfn-init -s ", + {"Ref": "AWS::StackId"}, + " -r WebServerInstance ", + " --region ", + {"Ref": "AWS::Region"}, + " || error_exit 'Failed to run cfn-init'\n", + "# Start up the cfn-hup daemon to listen for changes to the Web Server metadata\n", + "/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup'\n", + "# All done so signal success\n", + '/opt/aws/bin/cfn-signal -e 0 -r "WebServer setup complete" \'', + {"Ref": "WebServerWaitHandle"}, + "'\n", + ], + ] + } + }, + "Tags": [ + {"Value": {"Ref": "AWS::StackId"}, "Key": "Application"}, + {"Value": "Bar", "Key": "Foo"}, + ], + "SecurityGroupIds": [{"Ref": "InstanceSecurityGroup"}], + "KeyName": {"Ref": "KeyName"}, + "SubnetId": {"Ref": "Subnet"}, + "ImageId": { + "Fn::FindInMap": ["RegionMap", {"Ref": "AWS::Region"}, "AMI"] + }, + "InstanceType": {"Ref": "InstanceType"}, + }, + "Metadata": { + "Comment": "Install a simple PHP application", + "AWS::CloudFormation::Init": { + "config": { + "files": { + "/etc/cfn/cfn-hup.conf": { + "content": { + "Fn::Join": [ + "", + [ + "[main]\n", + "stack=", + {"Ref": "AWS::StackId"}, + "\n", + "region=", + {"Ref": "AWS::Region"}, + "\n", + ], + ] + }, + "owner": "root", + "group": "root", + "mode": "000400", + }, + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { + "content": { + "Fn::Join": [ + "", + [ + "[cfn-auto-reloader-hook]\n", + "triggers=post.update\n", + "path=Resources.WebServerInstance.Metadata.AWS::CloudFormation::Init\n", + "action=/opt/aws/bin/cfn-init -s ", + {"Ref": "AWS::StackId"}, + " -r WebServerInstance ", + " --region ", + {"Ref": "AWS::Region"}, + "\n", + "runas=root\n", + ], + ] + } + }, + "/var/www/html/index.php": { + "content": { + "Fn::Join": [ + "", + [ + "AWS CloudFormation sample PHP application';\n", + "?>\n", + ], + ] + }, + "owner": "apache", + "group": "apache", + "mode": "000644", + }, + }, + "services": { + "sysvinit": { + "httpd": {"ensureRunning": "true", "enabled": "true"}, + "sendmail": { + "ensureRunning": "false", + "enabled": "false", + }, + } + }, + "packages": {"yum": {"httpd": [], "php": []}}, + } + }, + }, + }, + "IPAddress": { + "Type": "AWS::EC2::EIP", + "Properties": {"InstanceId": {"Ref": "WebServerInstance"}, "Domain": "vpc"}, + "DependsOn": "AttachGateway", + }, + "AttachGateway": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": {"Ref": "VPC"}, + "InternetGatewayId": {"Ref": "InternetGateway"}, + }, + }, + }, + "Mappings": { + "RegionMap": { + "ap-southeast-1": {"AMI": "ami-74dda626"}, + "ap-southeast-2": {"AMI": "ami-b3990e89"}, + "us-west-2": {"AMI": "ami-16fd7026"}, + "us-east-1": {"AMI": "ami-7f418316"}, + "ap-northeast-1": {"AMI": "ami-dcfa4edd"}, + "us-west-1": {"AMI": "ami-951945d0"}, + "eu-west-1": {"AMI": "ami-24506250"}, + "sa-east-1": {"AMI": "ami-3e3be423"}, + } + }, +} diff --git a/tests/test_cloudformation/test_cloudformation_depends_on.py b/tests/test_cloudformation/test_cloudformation_depends_on.py new file mode 100644 index 000000000..1b47b4064 --- /dev/null +++ b/tests/test_cloudformation/test_cloudformation_depends_on.py @@ -0,0 +1,143 @@ +import boto3 +from moto import mock_cloudformation, mock_ecs, mock_autoscaling, mock_s3 +import json + +depends_on_template_list = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "ECSCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "test-cluster"}, + }, + "AutoScalingGroup": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "test-scaling-group", + "DesiredCapacity": 1, + "MinSize": 1, + "MaxSize": 50, + "LaunchConfigurationName": "test-launch-config", + "AvailabilityZones": ["us-east-1a"], + }, + "DependsOn": ["ECSCluster", "LaunchConfig"], + }, + "LaunchConfig": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": {"LaunchConfigurationName": "test-launch-config",}, + }, + }, +} + +depends_on_template_string = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "AutoScalingGroup": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "test-scaling-group", + "DesiredCapacity": 1, + "MinSize": 1, + "MaxSize": 50, + "LaunchConfigurationName": "test-launch-config", + "AvailabilityZones": ["us-east-1a"], + }, + "DependsOn": "LaunchConfig", + }, + "LaunchConfig": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": {"LaunchConfigurationName": "test-launch-config",}, + }, + }, +} + + +def make_chained_depends_on_template(): + depends_on_template_linked_dependencies = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Bucket1": { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": "test-bucket-0-us-east-1"}, + }, + }, + } + + for i in range(1, 10): + depends_on_template_linked_dependencies["Resources"]["Bucket" + str(i)] = { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": "test-bucket-" + str(i) + "-us-east-1"}, + "DependsOn": ["Bucket" + str(i - 1)], + } + + return json.dumps(depends_on_template_linked_dependencies) + + +depends_on_template_list_json = json.dumps(depends_on_template_list) +depends_on_template_string_json = json.dumps(depends_on_template_string) + + +@mock_cloudformation +@mock_autoscaling +@mock_ecs +def test_create_stack_with_depends_on(): + boto3.client("cloudformation", region_name="us-east-1").create_stack( + StackName="depends_on_test", TemplateBody=depends_on_template_list_json + ) + + autoscaling = boto3.client("autoscaling", region_name="us-east-1") + autoscaling_group = autoscaling.describe_auto_scaling_groups()["AutoScalingGroups"][ + 0 + ] + assert autoscaling_group["AutoScalingGroupName"] == "test-scaling-group" + assert autoscaling_group["DesiredCapacity"] == 1 + assert autoscaling_group["MinSize"] == 1 + assert autoscaling_group["MaxSize"] == 50 + assert autoscaling_group["AvailabilityZones"] == ["us-east-1a"] + + launch_configuration = autoscaling.describe_launch_configurations()[ + "LaunchConfigurations" + ][0] + assert launch_configuration["LaunchConfigurationName"] == "test-launch-config" + + ecs = boto3.client("ecs", region_name="us-east-1") + cluster_arn = ecs.list_clusters()["clusterArns"][0] + assert cluster_arn == "arn:aws:ecs:us-east-1:012345678910:cluster/test-cluster" + + +@mock_cloudformation +@mock_autoscaling +def test_create_stack_with_depends_on_string(): + boto3.client("cloudformation", region_name="us-east-1").create_stack( + StackName="depends_on_string_test", TemplateBody=depends_on_template_string_json + ) + + autoscaling = boto3.client("autoscaling", region_name="us-east-1") + autoscaling_group = autoscaling.describe_auto_scaling_groups()["AutoScalingGroups"][ + 0 + ] + assert autoscaling_group["AutoScalingGroupName"] == "test-scaling-group" + assert autoscaling_group["DesiredCapacity"] == 1 + assert autoscaling_group["MinSize"] == 1 + assert autoscaling_group["MaxSize"] == 50 + assert autoscaling_group["AvailabilityZones"] == ["us-east-1a"] + + launch_configuration = autoscaling.describe_launch_configurations()[ + "LaunchConfigurations" + ][0] + assert launch_configuration["LaunchConfigurationName"] == "test-launch-config" + + +@mock_cloudformation +@mock_s3 +def test_create_chained_depends_on_stack(): + boto3.client("cloudformation", region_name="us-east-1").create_stack( + StackName="linked_depends_on_test", + TemplateBody=make_chained_depends_on_template(), + ) + + s3 = boto3.client("s3", region_name="us-east-1") + bucket_response = s3.list_buckets()["Buckets"] + + assert sorted([bucket["Name"] for bucket in bucket_response]) == [ + "test-bucket-" + str(i) + "-us-east-1" for i in range(1, 10) + ] diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 75f705ea7..40004f805 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -4,6 +4,7 @@ import os import json import boto +import boto3 import boto.iam import boto.s3 import boto.s3.key @@ -11,9 +12,7 @@ import boto.cloudformation from boto.exception import BotoServerError import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +import pytest from moto.core import ACCOUNT_ID from moto import ( @@ -21,6 +20,8 @@ from moto import ( mock_s3_deprecated, mock_route53_deprecated, mock_iam_deprecated, + mock_dynamodb2, + mock_cloudformation, ) from moto.cloudformation import cloudformation_backends @@ -45,6 +46,30 @@ dummy_template3 = { }, } +dummy_template4 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "myDynamoDBTable": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "AttributeDefinitions": [ + {"AttributeName": "Name", "AttributeType": "S"}, + {"AttributeName": "Age", "AttributeType": "S"}, + ], + "KeySchema": [ + {"AttributeName": "Name", "KeyType": "HASH"}, + {"AttributeName": "Age", "KeyType": "RANGE"}, + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5, + }, + "TableName": "Person", + }, + } + }, +} + dummy_template_json = json.dumps(dummy_template) dummy_template_json2 = json.dumps(dummy_template2) dummy_template_json3 = json.dumps(dummy_template3) @@ -98,12 +123,12 @@ def test_create_stack_hosted_zone_by_id(): }, } conn.create_stack( - "test_stack", template_body=json.dumps(dummy_template), parameters={}.items() + "test_stack1", template_body=json.dumps(dummy_template), parameters={}.items() ) r53_conn = boto.connect_route53() zone_id = r53_conn.get_zones()[0].id conn.create_stack( - "test_stack", + "test_stack2", template_body=json.dumps(dummy_template2), parameters={"ZoneId": zone_id}.items(), ) @@ -143,7 +168,7 @@ def test_create_stack_with_notification_arn(): @mock_s3_deprecated def test_create_stack_from_s3_url(): s3_conn = boto.s3.connect_to_region("us-west-1") - bucket = s3_conn.create_bucket("foobar") + bucket = s3_conn.create_bucket("foobar", location="us-west-1") key = boto.s3.key.Key(bucket) key.key = "template-key" key.set_contents_from_string(dummy_template_json) @@ -188,6 +213,34 @@ def test_describe_stack_by_stack_id(): stack_by_id.stack_name.should.equal("test_stack") +@mock_dynamodb2 +@mock_cloudformation_deprecated +def test_delete_stack_dynamo_template(): + conn = boto.connect_cloudformation() + dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") + conn.create_stack("test_stack", template_body=dummy_template4) + table_desc = dynamodb_client.list_tables() + len(table_desc.get("TableNames")).should.equal(1) + conn.delete_stack("test_stack") + table_desc = dynamodb_client.list_tables() + len(table_desc.get("TableNames")).should.equal(0) + conn.create_stack("test_stack", template_body=dummy_template4) + + +@mock_dynamodb2 +@mock_cloudformation +def test_delete_stack_dynamo_template(): + conn = boto3.client("cloudformation", region_name="us-east-1") + dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") + conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(dummy_template4)) + table_desc = dynamodb_client.list_tables() + len(table_desc.get("TableNames")).should.equal(1) + conn.delete_stack(StackName="test_stack") + table_desc = dynamodb_client.list_tables() + len(table_desc.get("TableNames")).should.equal(0) + conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(dummy_template4)) + + @mock_cloudformation_deprecated def test_describe_deleted_stack(): conn = boto.connect_cloudformation() @@ -233,6 +286,19 @@ def test_list_stacks(): stacks[0].template_description.should.equal("Stack 1") +@mock_cloudformation_deprecated +def test_list_stacks_with_filter(): + conn = boto.connect_cloudformation() + conn.create_stack("test_stack", template_body=dummy_template_json) + conn.create_stack("test_stack2", template_body=dummy_template_json) + conn.update_stack("test_stack", template_body=dummy_template_json2) + stacks = conn.list_stacks("CREATE_COMPLETE") + stacks.should.have.length_of(1) + stacks[0].template_description.should.equal("Stack 1") + stacks = conn.list_stacks("UPDATE_COMPLETE") + stacks.should.have.length_of(1) + + @mock_cloudformation_deprecated def test_delete_stack_by_name(): conn = boto.connect_cloudformation() @@ -251,7 +317,7 @@ def test_delete_stack_by_id(): conn.describe_stacks().should.have.length_of(1) conn.delete_stack(stack_id) conn.describe_stacks().should.have.length_of(0) - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.describe_stacks("test_stack") conn.describe_stacks(stack_id).should.have.length_of(1) @@ -270,7 +336,7 @@ def test_delete_stack_with_resource_missing_delete_attr(): @mock_cloudformation_deprecated def test_bad_describe_stack(): conn = boto.connect_cloudformation() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.describe_stacks("bad_stack") @@ -451,10 +517,10 @@ def test_update_stack_when_rolled_back(): stack_id ].status = "ROLLBACK_COMPLETE" - with assert_raises(BotoServerError) as err: + with pytest.raises(BotoServerError) as err: conn.update_stack("test_stack", dummy_template_json) - ex = err.exception + ex = err.value ex.body.should.match(r"is in ROLLBACK_COMPLETE state and can not be updated") ex.error_code.should.equal("ValidationError") ex.reason.should.equal("Bad Request") @@ -541,13 +607,14 @@ def test_create_stack_lambda_and_dynamodb(): "ReadCapacityUnits": 10, "WriteCapacityUnits": 10, }, + "StreamSpecification": {"StreamViewType": "KEYS_ONLY"}, }, }, "func1mapping": { "Type": "AWS::Lambda::EventSourceMapping", "Properties": { "FunctionName": {"Ref": "func1"}, - "EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000", + "EventSourceArn": {"Fn::GetAtt": ["tab1", "StreamArn"]}, "StartingPosition": "0", "BatchSize": 100, "Enabled": True, diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index b7e86a1d5..86b6f1a94 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -2,16 +2,18 @@ from __future__ import unicode_literals import json from collections import OrderedDict +from datetime import datetime, timedelta +import pytz import boto3 from botocore.exceptions import ClientError import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -from nose.tools import assert_raises +import pytest from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 from moto.core import ACCOUNT_ID +from .test_cloudformation_stack_crud import dummy_template_json2 dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -32,6 +34,14 @@ dummy_template = { }, } +dummy_template3 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 3", + "Resources": { + "VPC": {"Properties": {"CidrBlock": "192.168.0.0/16"}, "Type": "AWS::EC2::VPC"} + }, +} + dummy_template_yaml = """--- AWSTemplateFormatVersion: 2010-09-09 Description: Stack1 with yaml template @@ -216,6 +226,18 @@ def test_boto3_list_stacksets_length(): stacksets.should.have.length_of(2) +@mock_cloudformation +def test_boto3_filter_stacks(): + conn = boto3.client("cloudformation", region_name="us-east-1") + conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json) + conn.create_stack(StackName="test_stack2", TemplateBody=dummy_template_json) + conn.update_stack(StackName="test_stack", TemplateBody=dummy_template_json2) + stacks = conn.list_stacks(StackStatusFilter=["CREATE_COMPLETE"]) + stacks.get("StackSummaries").should.have.length_of(1) + stacks = conn.list_stacks(StackStatusFilter=["UPDATE_COMPLETE"]) + stacks.get("StackSummaries").should.have.length_of(1) + + @mock_cloudformation def test_boto3_list_stacksets_contents(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") @@ -525,7 +547,7 @@ def test_boto3_list_stack_set_operations(): @mock_cloudformation def test_boto3_bad_list_stack_resources(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): cf_conn.list_stack_resources(StackName="test_stack_set") @@ -569,11 +591,11 @@ def test_boto3_create_stack_set_with_yaml(): @mock_cloudformation @mock_s3 def test_create_stack_set_from_s3_url(): - s3 = boto3.client("s3") - s3_conn = boto3.resource("s3") - bucket = s3_conn.create_bucket(Bucket="foobar") + s3 = boto3.client("s3", region_name="us-east-1") + s3_conn = boto3.resource("s3", region_name="us-east-1") + s3_conn.create_bucket(Bucket="foobar") - key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) + s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) key_url = s3.generate_presigned_url( ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ) @@ -631,6 +653,31 @@ def test_boto3_create_stack(): ) +@mock_cloudformation +def test_boto3_create_stack_s3_long_name(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyLongStackName01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012" + + template = '{"Resources":{"HelloBucket":{"Type":"AWS::S3::Bucket"}}}' + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + + cf_conn.get_template(StackName=stack_name)["TemplateBody"].should.equal( + json.loads(template, object_pairs_hook=OrderedDict) + ) + provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_bucket_name = provisioned_resource["PhysicalResourceId"] + len(provisioned_bucket_name).should.be.lower_than(64) + logical_name_lower_case = provisioned_resource["LogicalResourceId"].lower() + bucket_name_stack_name_prefix = provisioned_bucket_name[ + : provisioned_bucket_name.index("-" + logical_name_lower_case) + ] + stack_name.lower().should.contain(bucket_name_stack_name_prefix) + + @mock_cloudformation def test_boto3_create_stack_with_yaml(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") @@ -653,6 +700,48 @@ def test_boto3_create_stack_with_short_form_func_yaml(): ) +@mock_s3 +@mock_cloudformation +def test_get_template_summary(): + s3 = boto3.client("s3", region_name="us-east-1") + s3_conn = boto3.resource("s3", region_name="us-east-1") + + conn = boto3.client("cloudformation", region_name="us-east-1") + result = conn.get_template_summary(TemplateBody=json.dumps(dummy_template3)) + + result["ResourceTypes"].should.equal(["AWS::EC2::VPC"]) + result["Version"].should.equal("2010-09-09") + result["Description"].should.equal("Stack 3") + + conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(dummy_template3)) + + result = conn.get_template_summary(StackName="test_stack") + + result["ResourceTypes"].should.equal(["AWS::EC2::VPC"]) + result["Version"].should.equal("2010-09-09") + result["Description"].should.equal("Stack 3") + + s3_conn.create_bucket(Bucket="foobar") + s3_conn.Object("foobar", "template-key").put(Body=json.dumps(dummy_template3)) + + key_url = s3.generate_presigned_url( + ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} + ) + + conn.create_stack(StackName="stack_from_url", TemplateURL=key_url) + result = conn.get_template_summary(TemplateURL=key_url) + result["ResourceTypes"].should.equal(["AWS::EC2::VPC"]) + result["Version"].should.equal("2010-09-09") + result["Description"].should.equal("Stack 3") + + conn = boto3.client("cloudformation", region_name="us-east-1") + result = conn.get_template_summary(TemplateBody=dummy_template_yaml) + + result["ResourceTypes"].should.equal(["AWS::EC2::Instance"]) + result["Version"].should.equal("2010-09-09") + result["Description"].should.equal("Stack1 with yaml template") + + @mock_cloudformation def test_boto3_create_stack_with_ref_yaml(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") @@ -712,11 +801,11 @@ def test_create_stack_with_role_arn(): @mock_cloudformation @mock_s3 def test_create_stack_from_s3_url(): - s3 = boto3.client("s3") - s3_conn = boto3.resource("s3") - bucket = s3_conn.create_bucket(Bucket="foobar") + s3 = boto3.client("s3", region_name="us-east-1") + s3_conn = boto3.resource("s3", region_name="us-east-1") + s3_conn.create_bucket(Bucket="foobar") - key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) + s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) key_url = s3.generate_presigned_url( ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ) @@ -767,8 +856,8 @@ def test_update_stack_with_previous_value(): @mock_s3 @mock_ec2 def test_update_stack_from_s3_url(): - s3 = boto3.client("s3") - s3_conn = boto3.resource("s3") + s3 = boto3.client("s3", region_name="us-east-1") + s3_conn = boto3.resource("s3", region_name="us-east-1") cf_conn = boto3.client("cloudformation", region_name="us-east-1") cf_conn.create_stack( @@ -796,11 +885,11 @@ def test_update_stack_from_s3_url(): @mock_cloudformation @mock_s3 def test_create_change_set_from_s3_url(): - s3 = boto3.client("s3") - s3_conn = boto3.resource("s3") - bucket = s3_conn.create_bucket(Bucket="foobar") + s3 = boto3.client("s3", region_name="us-east-1") + s3_conn = boto3.resource("s3", region_name="us-east-1") + s3_conn.create_bucket(Bucket="foobar") - key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) + s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) key_url = s3.generate_presigned_url( ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ) @@ -817,7 +906,7 @@ def test_create_change_set_from_s3_url(): in response["Id"] ) assert ( - "arn:aws:cloudformation:us-east-1:123456789:stack/NewStack" + "arn:aws:cloudformation:us-west-1:123456789:stack/NewStack" in response["StackId"] ) @@ -833,8 +922,34 @@ def test_describe_change_set(): ) stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet") + stack["ChangeSetName"].should.equal("NewChangeSet") stack["StackName"].should.equal("NewStack") + stack["Status"].should.equal("CREATE_COMPLETE") + stack["ExecutionStatus"].should.equal("AVAILABLE") + two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2) + assert ( + two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC) + ), "Change set should have been created recently" + stack["Changes"].should.have.length_of(1) + stack["Changes"][0].should.equal( + dict( + { + "Type": "Resource", + "ResourceChange": { + "Action": "Add", + "LogicalResourceId": "EC2Instance1", + "ResourceType": "AWS::EC2::Instance", + }, + } + ) + ) + + # Execute change set + cf_conn.execute_change_set(ChangeSetName="NewChangeSet") + # Verify that the changes have been applied + stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet") + stack["Changes"].should.have.length_of(1) cf_conn.create_change_set( StackName="NewStack", @@ -849,21 +964,36 @@ def test_describe_change_set(): @mock_cloudformation +@mock_ec2 def test_execute_change_set_w_arn(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") + ec2 = boto3.client("ec2", region_name="us-east-1") + # Verify no instances exist at the moment + ec2.describe_instances()["Reservations"].should.have.length_of(0) + # Create a Change set, and verify no resources have been created yet change_set = cf_conn.create_change_set( StackName="NewStack", TemplateBody=dummy_template_json, ChangeSetName="NewChangeSet", ChangeSetType="CREATE", ) + ec2.describe_instances()["Reservations"].should.have.length_of(0) + cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal( + "CREATE_COMPLETE" + ) + # Execute change set cf_conn.execute_change_set(ChangeSetName=change_set["Id"]) + # Verify that the status has changed, and the appropriate resources have been created + cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal( + "CREATE_COMPLETE" + ) + ec2.describe_instances()["Reservations"].should.have.length_of(1) @mock_cloudformation def test_execute_change_set_w_name(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") - change_set = cf_conn.create_change_set( + cf_conn.create_change_set( StackName="NewStack", TemplateBody=dummy_template_json, ChangeSetName="NewChangeSet", @@ -876,7 +1006,9 @@ def test_execute_change_set_w_name(): def test_describe_stack_pagination(): conn = boto3.client("cloudformation", region_name="us-east-1") for i in range(100): - conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json) + conn.create_stack( + StackName="test_stack_{}".format(i), TemplateBody=dummy_template_json + ) resp = conn.describe_stacks() stacks = resp["Stacks"] @@ -911,6 +1043,10 @@ def test_describe_stack_by_name(): stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0] stack["StackName"].should.equal("test_stack") + two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2) + assert ( + two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC) + ), "Stack should have been created recently" @mock_cloudformation @@ -1043,7 +1179,7 @@ def test_describe_updated_stack(): @mock_cloudformation def test_bad_describe_stack(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): cf_conn.describe_stacks(StackName="non_existent_stack") @@ -1164,7 +1300,8 @@ def test_list_exports_with_token(): # Add index to ensure name is unique dummy_output_template["Outputs"]["StackVPC"]["Export"]["Name"] += str(i) cf.create_stack( - StackName="test_stack", TemplateBody=json.dumps(dummy_output_template) + StackName="test_stack_{}".format(i), + TemplateBody=json.dumps(dummy_output_template), ) exports = cf.list_exports() exports["Exports"].should.have.length_of(100) @@ -1193,10 +1330,8 @@ def test_delete_stack_with_export(): @mock_cloudformation def test_export_names_must_be_unique(): cf = boto3.resource("cloudformation", region_name="us-east-1") - first_stack = cf.create_stack( - StackName="test_stack", TemplateBody=dummy_output_template_json - ) - with assert_raises(ClientError): + cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json) + with pytest.raises(ClientError): cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json) @@ -1209,9 +1344,7 @@ def test_stack_with_imports(): output_stack = cf.create_stack( StackName="test_stack1", TemplateBody=dummy_output_template_json ) - import_stack = cf.create_stack( - StackName="test_stack2", TemplateBody=dummy_import_template_json - ) + cf.create_stack(StackName="test_stack2", TemplateBody=dummy_import_template_json) output_stack.outputs.should.have.length_of(1) output = output_stack.outputs[0]["OutputValue"] @@ -1230,3 +1363,16 @@ def test_non_json_redrive_policy(): stack.Resource("MainQueue").resource_status.should.equal("CREATE_COMPLETE") stack.Resource("DeadLetterQueue").resource_status.should.equal("CREATE_COMPLETE") + + +@mock_cloudformation +def test_boto3_create_duplicate_stack(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=dummy_template_json, + ) + + with pytest.raises(ClientError): + cf_conn.create_stack( + StackName="test_stack", TemplateBody=dummy_template_json, + ) diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index e296ef2ed..9949bb4a5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals import json -import base64 from decimal import Decimal import boto @@ -18,35 +17,35 @@ import boto.sqs import boto.vpc import boto3 import sure # noqa +from string import Template from moto import ( mock_autoscaling_deprecated, + mock_autoscaling, mock_cloudformation, mock_cloudformation_deprecated, mock_datapipeline_deprecated, mock_dynamodb2, mock_ec2, mock_ec2_deprecated, - mock_elb, mock_elb_deprecated, + mock_events, mock_iam_deprecated, mock_kms, mock_lambda, + mock_logs, mock_rds_deprecated, mock_rds2, - mock_rds2_deprecated, - mock_redshift, mock_redshift_deprecated, mock_route53_deprecated, + mock_s3, mock_sns_deprecated, - mock_sqs, mock_sqs_deprecated, mock_elbv2, ) from moto.core import ACCOUNT_ID -from moto.dynamodb2.models import Table -from .fixtures import ( +from tests.test_cloudformation.fixtures import ( ec2_classic_eip, fn_join, rds_mysql_with_db_parameter_group, @@ -492,7 +491,7 @@ def test_autoscaling_group_with_elb(): "my-as-group": { "Type": "AWS::AutoScaling::AutoScalingGroup", "Properties": { - "AvailabilityZones": ["us-east1"], + "AvailabilityZones": ["us-east-1a"], "LaunchConfigurationName": {"Ref": "my-launch-config"}, "MinSize": "2", "MaxSize": "2", @@ -519,7 +518,7 @@ def test_autoscaling_group_with_elb(): "my-elb": { "Type": "AWS::ElasticLoadBalancing::LoadBalancer", "Properties": { - "AvailabilityZones": ["us-east1"], + "AvailabilityZones": ["us-east-1a"], "Listeners": [ { "LoadBalancerPort": "80", @@ -542,10 +541,10 @@ def test_autoscaling_group_with_elb(): web_setup_template_json = json.dumps(web_setup_template) - conn = boto.cloudformation.connect_to_region("us-west-1") + conn = boto.cloudformation.connect_to_region("us-east-1") conn.create_stack("web_stack", template_body=web_setup_template_json) - autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") + autoscale_conn = boto.ec2.autoscale.connect_to_region("us-east-1") autoscale_group = autoscale_conn.get_all_groups()[0] autoscale_group.launch_config_name.should.contain("my-launch-config") autoscale_group.load_balancers[0].should.equal("my-elb") @@ -554,7 +553,7 @@ def test_autoscaling_group_with_elb(): autoscale_conn.get_all_launch_configurations().should.have.length_of(1) # Confirm the ELB was actually created - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + elb_conn = boto.ec2.elb.connect_to_region("us-east-1") elb_conn.get_all_load_balancers().should.have.length_of(1) stack = conn.describe_stacks()[0] @@ -581,7 +580,7 @@ def test_autoscaling_group_with_elb(): elb_resource.physical_resource_id.should.contain("my-elb") # confirm the instances were created with the right tags - ec2_conn = boto.ec2.connect_to_region("us-west-1") + ec2_conn = boto.ec2.connect_to_region("us-east-1") reservations = ec2_conn.get_all_reservations() len(reservations).should.equal(1) reservation = reservations[0] @@ -601,7 +600,7 @@ def test_autoscaling_group_update(): "my-as-group": { "Type": "AWS::AutoScaling::AutoScalingGroup", "Properties": { - "AvailabilityZones": ["us-west-1"], + "AvailabilityZones": ["us-west-1a"], "LaunchConfigurationName": {"Ref": "my-launch-config"}, "MinSize": "2", "MaxSize": "2", @@ -871,7 +870,7 @@ def test_iam_roles(): } ] }, - "Path": "my-path", + "Path": "/my-path/", "Policies": [ { "PolicyDocument": { @@ -909,6 +908,7 @@ def test_iam_roles(): }, "my-role-no-path": { "Properties": { + "RoleName": "my-role-no-path-name", "AssumeRolePolicyDocument": { "Statement": [ { @@ -917,7 +917,7 @@ def test_iam_roles(): "Principal": {"Service": ["ec2.amazonaws.com"]}, } ] - } + }, }, "Type": "AWS::IAM::Role", }, @@ -936,13 +936,13 @@ def test_iam_roles(): role_name_to_id = {} for role_result in role_results: role = iam_conn.get_role(role_result.role_name) - role.role_name.should.contain("my-role") + # Role name is not specified, so randomly generated - can't check exact name if "with-path" in role.role_name: role_name_to_id["with-path"] = role.role_id - role.path.should.equal("my-path") + role.path.should.equal("/my-path/") else: role_name_to_id["no-path"] = role.role_id - role.role_name.should.contain("no-path") + role.role_name.should.equal("my-role-no-path-name") role.path.should.equal("/") instance_profile_responses = iam_conn.list_instance_profiles()[ @@ -1777,6 +1777,7 @@ def lambda_handler(event, context): "Role": {"Fn::GetAtt": ["MyRole", "Arn"]}, "Runtime": "python2.7", "Environment": {"Variables": {"TEST_ENV_KEY": "test-env-val"}}, + "ReservedConcurrentExecutions": 10, }, }, "MyRole": { @@ -1811,6 +1812,11 @@ def lambda_handler(event, context): {"Variables": {"TEST_ENV_KEY": "test-env-val"}} ) + function_name = result["Functions"][0]["FunctionName"] + result = conn.get_function(FunctionName=function_name) + + result["Concurrency"]["ReservedConcurrentExecutions"].should.equal(10) + @mock_cloudformation @mock_ec2 @@ -2303,6 +2309,7 @@ def test_stack_dynamodb_resources_integration(): }, } ], + "StreamSpecification": {"StreamViewType": "KEYS_ONLY"}, }, } }, @@ -2315,6 +2322,12 @@ def test_stack_dynamodb_resources_integration(): StackName="dynamodb_stack", TemplateBody=dynamodb_template_json ) + dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") + table_desc = dynamodb_client.describe_table(TableName="myTableName")["Table"] + table_desc["StreamSpecification"].should.equal( + {"StreamEnabled": True, "StreamViewType": "KEYS_ONLY",} + ) + dynamodb_conn = boto3.resource("dynamodb", region_name="us-east-1") table = dynamodb_conn.Table("myTableName") table.name.should.equal("myTableName") @@ -2329,3 +2342,470 @@ def test_stack_dynamodb_resources_integration(): response["Item"]["Sales"].should.equal(Decimal("10")) response["Item"]["NumberOfSongs"].should.equal(Decimal("5")) response["Item"]["Album"].should.equal("myAlbum") + + +@mock_cloudformation +@mock_logs +@mock_s3 +def test_create_log_group_using_fntransform(): + s3_resource = boto3.resource("s3") + s3_resource.create_bucket( + Bucket="owi-common-cf", + CreateBucketConfiguration={"LocationConstraint": "us-west-2"}, + ) + s3_resource.Object("owi-common-cf", "snippets/test.json").put( + Body=json.dumps({"lgname": {"name": "some-log-group"}}) + ) + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Mappings": { + "EnvironmentMapping": { + "Fn::Transform": { + "Name": "AWS::Include", + "Parameters": {"Location": "s3://owi-common-cf/snippets/test.json"}, + } + } + }, + "Resources": { + "LogGroup": { + "Properties": { + "LogGroupName": { + "Fn::FindInMap": ["EnvironmentMapping", "lgname", "name"] + }, + "RetentionInDays": 90, + }, + "Type": "AWS::Logs::LogGroup", + } + }, + } + + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(template)) + + logs_conn = boto3.client("logs", region_name="us-west-2") + log_group = logs_conn.describe_log_groups()["logGroups"][0] + log_group["logGroupName"].should.equal("some-log-group") + log_group["retentionInDays"].should.be.equal(90) + + +@mock_cloudformation +@mock_events +def test_stack_events_create_rule_integration(): + events_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "Name": "quick-fox", + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(events_template) + ) + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"].should.have.length_of(1) + rules["Rules"][0]["Name"].should.equal("quick-fox") + rules["Rules"][0]["State"].should.equal("ENABLED") + rules["Rules"][0]["ScheduleExpression"].should.equal("rate(5 minutes)") + + +@mock_cloudformation +@mock_events +def test_stack_events_delete_rule_integration(): + events_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "Name": "quick-fox", + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(events_template) + ) + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"].should.have.length_of(1) + + cf_conn.delete_stack(StackName="test_stack") + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"].should.have.length_of(0) + + +@mock_cloudformation +@mock_events +def test_stack_events_create_rule_without_name_integration(): + events_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(events_template) + ) + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"][0]["Name"].should.contain("test_stack-Event-") + + +@mock_cloudformation +@mock_events +@mock_logs +def test_stack_events_create_rule_as_target(): + events_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "SecurityGroup": { + "Type": "AWS::Logs::LogGroup", + "Properties": { + "LogGroupName": {"Fn::GetAtt": ["Event", "Arn"]}, + "RetentionInDays": 3, + }, + }, + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + }, + }, + } + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(events_template) + ) + + rules = boto3.client("events", "us-west-2").list_rules() + log_groups = boto3.client("logs", "us-west-2").describe_log_groups() + + rules["Rules"][0]["Name"].should.contain("test_stack-Event-") + + log_groups["logGroups"][0]["logGroupName"].should.equal(rules["Rules"][0]["Arn"]) + log_groups["logGroups"][0]["retentionInDays"].should.equal(3) + + +@mock_cloudformation +@mock_events +def test_stack_events_update_rule_integration(): + events_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "Name": "$Name", + "State": "$State", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } """ + ) + + cf_conn = boto3.client("cloudformation", "us-west-2") + + original_template = events_template.substitute(Name="Foo", State="ENABLED") + cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template) + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"].should.have.length_of(1) + rules["Rules"][0]["Name"].should.equal("Foo") + rules["Rules"][0]["State"].should.equal("ENABLED") + + update_template = events_template.substitute(Name="Bar", State="DISABLED") + cf_conn.update_stack(StackName="test_stack", TemplateBody=update_template) + + rules = boto3.client("events", "us-west-2").list_rules() + + rules["Rules"].should.have.length_of(1) + rules["Rules"][0]["Name"].should.equal("Bar") + rules["Rules"][0]["State"].should.equal("DISABLED") + + +@mock_cloudformation +@mock_autoscaling +def test_autoscaling_propagate_tags(): + autoscaling_group_with_tags = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "AutoScalingGroup": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "test-scaling-group", + "DesiredCapacity": 1, + "MinSize": 1, + "MaxSize": 50, + "LaunchConfigurationName": "test-launch-config", + "AvailabilityZones": ["us-east-1a"], + "Tags": [ + { + "Key": "test-key-propagate", + "Value": "test", + "PropagateAtLaunch": True, + }, + { + "Key": "test-key-no-propagate", + "Value": "test", + "PropagateAtLaunch": False, + }, + ], + }, + "DependsOn": "LaunchConfig", + }, + "LaunchConfig": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": {"LaunchConfigurationName": "test-launch-config"}, + }, + }, + } + boto3.client("cloudformation", "us-east-1").create_stack( + StackName="propagate_tags_test", + TemplateBody=json.dumps(autoscaling_group_with_tags), + ) + + autoscaling = boto3.client("autoscaling", "us-east-1") + + autoscaling_group_tags = autoscaling.describe_auto_scaling_groups()[ + "AutoScalingGroups" + ][0]["Tags"] + propagation_dict = { + tag["Key"]: tag["PropagateAtLaunch"] for tag in autoscaling_group_tags + } + + assert propagation_dict["test-key-propagate"] + assert not propagation_dict["test-key-no-propagate"] + + +@mock_cloudformation +@mock_events +def test_stack_eventbus_create_from_cfn_integration(): + eventbus_template = """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "EventBus": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "MyCustomEventBus" + }, + } + }, + }""" + + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack(StackName="test_stack", TemplateBody=eventbus_template) + + event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + + event_buses["EventBuses"].should.have.length_of(1) + event_buses["EventBuses"][0]["Name"].should.equal("MyCustomEventBus") + + +@mock_cloudformation +@mock_events +def test_stack_events_delete_eventbus_integration(): + eventbus_template = """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "EventBus": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "MyCustomEventBus" + }, + } + }, + }""" + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack(StackName="test_stack", TemplateBody=eventbus_template) + + event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + event_buses["EventBuses"].should.have.length_of(1) + + cf_conn.delete_stack(StackName="test_stack") + + event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + event_buses["EventBuses"].should.have.length_of(0) + + +@mock_cloudformation +@mock_events +def test_stack_events_delete_from_cfn_integration(): + eventbus_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "$resource_name": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "$name" + }, + } + }, + }""" + ) + + cf_conn = boto3.client("cloudformation", "us-west-2") + + original_template = eventbus_template.substitute( + {"resource_name": "original", "name": "MyCustomEventBus"} + ) + cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template) + + original_event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + original_event_buses["EventBuses"].should.have.length_of(1) + + original_eventbus = original_event_buses["EventBuses"][0] + + updated_template = eventbus_template.substitute( + {"resource_name": "updated", "name": "AnotherEventBus"} + ) + cf_conn.update_stack(StackName="test_stack", TemplateBody=updated_template) + + update_event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="AnotherEventBus" + ) + update_event_buses["EventBuses"].should.have.length_of(1) + update_event_buses["EventBuses"][0]["Arn"].shouldnt.equal(original_eventbus["Arn"]) + + +@mock_cloudformation +@mock_events +def test_stack_events_update_from_cfn_integration(): + eventbus_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "EventBus": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "$name" + }, + } + }, + }""" + ) + + cf_conn = boto3.client("cloudformation", "us-west-2") + + original_template = eventbus_template.substitute({"name": "MyCustomEventBus"}) + cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template) + + original_event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + original_event_buses["EventBuses"].should.have.length_of(1) + + original_eventbus = original_event_buses["EventBuses"][0] + + updated_template = eventbus_template.substitute({"name": "NewEventBus"}) + cf_conn.update_stack(StackName="test_stack", TemplateBody=updated_template) + + update_event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="NewEventBus" + ) + update_event_buses["EventBuses"].should.have.length_of(1) + update_event_buses["EventBuses"][0]["Name"].should.equal("NewEventBus") + update_event_buses["EventBuses"][0]["Arn"].shouldnt.equal(original_eventbus["Arn"]) + + +@mock_cloudformation +@mock_events +def test_stack_events_get_attribute_integration(): + eventbus_template = """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "EventBus": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "MyEventBus" + }, + } + }, + "Outputs": { + "bus_arn": {"Value": {"Fn::GetAtt": ["EventBus", "Arn"]}}, + "bus_name": {"Value": {"Fn::GetAtt": ["EventBus", "Name"]}}, + } + }""" + + cf = boto3.client("cloudformation", "us-west-2") + events = boto3.client("events", "us-west-2") + + cf.create_stack(StackName="test_stack", TemplateBody=eventbus_template) + + stack = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + outputs = stack["Outputs"] + + output_arn = list(filter(lambda item: item["OutputKey"] == "bus_arn", outputs))[0] + output_name = list(filter(lambda item: item["OutputKey"] == "bus_name", outputs))[0] + + event_bus = events.list_event_buses(NamePrefix="MyEventBus")["EventBuses"][0] + + output_arn["OutputValue"].should.equal(event_bus["Arn"]) + output_name["OutputValue"].should.equal(event_bus["Name"]) + + +@mock_cloudformation +@mock_dynamodb2 +def test_dynamodb_table_creation(): + CFN_TEMPLATE = { + "Outputs": {"MyTableName": {"Value": {"Ref": "MyTable"}},}, + "Resources": { + "MyTable": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"} + ], + "BillingMode": "PAY_PER_REQUEST", + }, + }, + }, + } + stack_name = "foobar" + cfn = boto3.client("cloudformation", "us-west-2") + cfn.create_stack(StackName=stack_name, TemplateBody=json.dumps(CFN_TEMPLATE)) + # Wait until moto creates the stack + waiter = cfn.get_waiter("stack_create_complete") + waiter.wait(StackName=stack_name) + # Verify the TableName is part of the outputs + stack = cfn.describe_stacks(StackName=stack_name)["Stacks"][0] + outputs = stack["Outputs"] + outputs.should.have.length_of(1) + outputs[0]["OutputKey"].should.equal("MyTableName") + outputs[0]["OutputValue"].should.contain("foobar") + # Assert the table is created + ddb = boto3.client("dynamodb", "us-west-2") + table_names = ddb.list_tables()["TableNames"] + table_names.should.equal([outputs[0]["OutputValue"]]) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 85df76592..9692e36cb 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -15,7 +15,7 @@ from moto.cloudformation.parsing import ( from moto.sqs.models import Queue from moto.s3.models import FakeBucket from moto.cloudformation.utils import yaml_tag_constructor -from boto.cloudformation.stack import Output +from moto.packages.boto.cloudformation.stack import Output dummy_template = { @@ -38,6 +38,16 @@ name_type_template = { }, } +name_type_template_with_tabs_json = """ +\t{ +\t\t"AWSTemplateFormatVersion": "2010-09-09", +\t\t"Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", +\t\t"Resources": { +\t\t\t"Queue": {"Type": "AWS::SQS::Queue", "Properties": {"VisibilityTimeout": 60}} +\t\t} +\t} +""" + output_dict = { "Outputs": { "Output1": {"Value": {"Ref": "Queue"}, "Description": "This is a description."} @@ -57,6 +67,8 @@ get_availability_zones_output = {"Outputs": {"Output1": {"Value": {"Fn::GetAZs": parameters = { "Parameters": { "Param": {"Type": "String"}, + "NumberParam": {"Type": "Number"}, + "NumberListParam": {"Type": "List"}, "NoEchoParam": {"Type": "String", "NoEcho": True}, } } @@ -186,6 +198,21 @@ def test_parse_stack_with_name_type_resource(): queue.should.be.a(Queue) +def test_parse_stack_with_tabbed_json_template(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=name_type_template_with_tabs_json, + parameters={}, + region_name="us-west-1", + ) + + stack.resource_map.should.have.length_of(1) + list(stack.resource_map.keys())[0].should.equal("Queue") + queue = list(stack.resource_map.values())[0] + queue.should.be.a(Queue) + + def test_parse_stack_with_yaml_template(): stack = FakeStack( stack_id="test_id", @@ -278,12 +305,23 @@ def test_parse_stack_with_parameters(): stack_id="test_id", name="test_stack", template=parameters_template_json, - parameters={"Param": "visible value", "NoEchoParam": "hidden value"}, + parameters={ + "Param": "visible value", + "NumberParam": "42", + "NumberListParam": "42,3.14159", + "NoEchoParam": "hidden value", + }, region_name="us-west-1", ) stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam") stack.resource_map.no_echo_parameter_keys.should_not.have("Param") + stack.resource_map.no_echo_parameter_keys.should_not.have("NumberParam") + stack.resource_map.no_echo_parameter_keys.should_not.have("NumberListParam") + stack.resource_map.resolved_parameters["NumberParam"].should.equal(42) + stack.resource_map.resolved_parameters["NumberListParam"].should.equal( + [42, 3.14159] + ) def test_parse_equals_condition(): diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py index 4dd4d7e08..a4c65a4c7 100644 --- a/tests/test_cloudformation/test_validate.py +++ b/tests/test_cloudformation/test_validate.py @@ -3,8 +3,8 @@ import json import yaml import os import boto3 -from nose.tools import raises import botocore +import sure # noqa from moto.cloudformation.exceptions import ValidationError @@ -39,6 +39,16 @@ json_template = { }, } +json_valid_template_with_tabs = """ +{ +\t"AWSTemplateFormatVersion": "2010-09-09", +\t"Description": "Stack 2", +\t"Resources": { +\t\t"Queue": {"Type": "AWS::SQS::Queue", "Properties": {"VisibilityTimeout": 60}} +\t} +} +""" + # One resource is required json_bad_template = {"AWSTemplateFormatVersion": "2010-09-09", "Description": "Stack 1"} @@ -55,6 +65,15 @@ def test_boto3_json_validate_successful(): assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 +@mock_cloudformation +def test_boto3_json_with_tabs_validate_successful(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + response = cf_conn.validate_template(TemplateBody=json_valid_template_with_tabs) + assert response["Description"] == "Stack 2" + assert response["Parameters"] == [] + assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 + + @mock_cloudformation def test_boto3_json_invalid_missing_resource(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") @@ -62,10 +81,9 @@ def test_boto3_json_invalid_missing_resource(): cf_conn.validate_template(TemplateBody=dummy_bad_template_json) assert False except botocore.exceptions.ClientError as e: - assert ( - str(e) - == "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack" - " with id Missing top level item Resources to file module does not exist" + str(e).should.contain( + "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack" + " with id Missing top level" ) assert True @@ -96,6 +114,25 @@ def test_boto3_yaml_validate_successful(): assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 +@mock_cloudformation +@mock_s3 +def test_boto3_yaml_validate_template_url_successful(): + s3 = boto3.client("s3", region_name="us-east-1") + s3_conn = boto3.resource("s3", region_name="us-east-1") + s3_conn.create_bucket(Bucket="foobar") + + s3_conn.Object("foobar", "template-key").put(Body=yaml_template) + key_url = s3.generate_presigned_url( + ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} + ) + + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + response = cf_conn.validate_template(TemplateURL=key_url) + assert response["Description"] == "Simple CloudFormation Test Template" + assert response["Parameters"] == [] + assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 + + @mock_cloudformation def test_boto3_yaml_invalid_missing_resource(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") @@ -103,9 +140,8 @@ def test_boto3_yaml_invalid_missing_resource(): cf_conn.validate_template(TemplateBody=yaml_bad_template) assert False except botocore.exceptions.ClientError as e: - assert ( - str(e) - == "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack" - " with id Missing top level item Resources to file module does not exist" + str(e).should.contain( + "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack" + " with id Missing top level" ) assert True diff --git a/tests/test_cloudwatch/__init__.py b/tests/test_cloudwatch/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_cloudwatch/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index f1a2e3fd6..92e1cd498 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -1,8 +1,12 @@ import boto from boto.ec2.cloudwatch.alarm import MetricAlarm +from boto.s3.key import Key +from datetime import datetime import sure # noqa +from moto.cloudwatch.utils import make_arn_for_alarm +from moto.core import ACCOUNT_ID -from moto import mock_cloudwatch_deprecated +from moto import mock_cloudwatch_deprecated, mock_s3_deprecated def alarm_fixture(name="tester", action=None): @@ -49,6 +53,7 @@ def test_create_alarm(): list(alarm.ok_actions).should.equal(["arn:ok"]) list(alarm.insufficient_data_actions).should.equal(["arn:insufficient"]) alarm.unit.should.equal("Seconds") + assert "tester" in alarm.alarm_arn @mock_cloudwatch_deprecated @@ -82,7 +87,8 @@ def test_put_metric_data(): ) metrics = conn.list_metrics() - metrics.should.have.length_of(1) + metric_names = [m for m in metrics if m.name == "metric"] + metric_names.should.have(1) metric = metrics[0] metric.namespace.should.equal("tester") metric.name.should.equal("metric") @@ -101,17 +107,103 @@ def test_describe_alarms(): conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) + enabled = alarm_fixture(name="enabled1", action=["abarfoo"]) + enabled.add_alarm_action("arn:alarm") + conn.create_alarm(enabled) + alarms = conn.describe_alarms() - alarms.should.have.length_of(4) + alarms.should.have.length_of(5) alarms = conn.describe_alarms(alarm_name_prefix="nfoo") alarms.should.have.length_of(2) alarms = conn.describe_alarms(alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) alarms.should.have.length_of(3) alarms = conn.describe_alarms(action_prefix="afoo") alarms.should.have.length_of(2) + alarms = conn.describe_alarms(alarm_name_prefix="enabled") + alarms.should.have.length_of(1) + alarms[0].actions_enabled.should.equal("true") for alarm in conn.describe_alarms(): alarm.delete() alarms = conn.describe_alarms() alarms.should.have.length_of(0) + + +@mock_cloudwatch_deprecated +def test_describe_alarms_for_metric(): + conn = boto.connect_cloudwatch() + + conn.create_alarm(alarm_fixture(name="nfoobar", action="afoobar")) + conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) + conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) + conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) + + alarms = conn.describe_alarms_for_metric("nbarfoo_metric", "nbarfoo_namespace") + alarms.should.have.length_of(1) + + alarms = conn.describe_alarms_for_metric("nbazfoo_metric", "nbazfoo_namespace") + alarms.should.have.length_of(1) + + +@mock_cloudwatch_deprecated +def test_get_metric_statistics(): + conn = boto.connect_cloudwatch() + + metric_timestamp = datetime(2018, 4, 9, 13, 0, 0, 0) + + conn.put_metric_data( + namespace="tester", + name="metric", + value=1.5, + dimensions={"InstanceId": ["i-0123456,i-0123457"]}, + timestamp=metric_timestamp, + ) + + metric_kwargs = dict( + namespace="tester", + metric_name="metric", + start_time=metric_timestamp, + end_time=datetime.now(), + period=3600, + statistics=["Minimum"], + ) + + datapoints = conn.get_metric_statistics(**metric_kwargs) + datapoints.should.have.length_of(1) + datapoint = datapoints[0] + datapoint.should.have.key("Minimum").which.should.equal(1.5) + datapoint.should.have.key("Timestamp").which.should.equal(metric_timestamp) + + +# TODO: THIS IS CURRENTLY BROKEN! +# @mock_s3_deprecated +# @mock_cloudwatch_deprecated +# def test_cloudwatch_return_s3_metrics(): +# +# region = "us-east-1" +# +# cw = boto.ec2.cloudwatch.connect_to_region(region) +# s3 = boto.s3.connect_to_region(region) +# +# bucket_name_1 = "test-bucket-1" +# bucket_name_2 = "test-bucket-2" +# +# bucket1 = s3.create_bucket(bucket_name=bucket_name_1) +# key = Key(bucket1) +# key.key = "the-key" +# key.set_contents_from_string("foobar" * 4) +# s3.create_bucket(bucket_name=bucket_name_2) +# +# metrics_s3_bucket_1 = cw.list_metrics(dimensions={"BucketName": bucket_name_1}) +# # Verify that the OOTB S3 metrics are available for the created buckets +# len(metrics_s3_bucket_1).should.be(2) +# metric_names = [m.name for m in metrics_s3_bucket_1] +# sorted(metric_names).should.equal( +# ["Metric:BucketSizeBytes", "Metric:NumberOfObjects"] +# ) +# +# # Explicit clean up - the metrics for these buckets are messing with subsequent tests +# key.delete() +# s3.delete_bucket(bucket_name_1) +# s3.delete_bucket(bucket_name_2) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 5bd9ed13d..55f0878d4 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -3,12 +3,15 @@ import boto3 from botocore.exceptions import ClientError from datetime import datetime, timedelta -from nose.tools import assert_raises +from freezegun import freeze_time +import pytest from uuid import uuid4 import pytz import sure # noqa from moto import mock_cloudwatch +from moto.cloudwatch.utils import make_arn_for_alarm +from moto.core import ACCOUNT_ID @mock_cloudwatch @@ -91,6 +94,141 @@ def test_get_dashboard_fail(): raise RuntimeError("Should of raised error") +@mock_cloudwatch +def test_delete_invalid_alarm(): + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + + cloudwatch.put_metric_alarm( + AlarmName="testalarm1", + MetricName="cpu", + Namespace="blah", + Period=10, + EvaluationPeriods=5, + Statistic="Average", + Threshold=2, + ComparisonOperator="GreaterThanThreshold", + ActionsEnabled=True, + ) + + # trying to delete an alarm which is not created along with valid alarm. + with pytest.raises(ClientError) as e: + cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName", "testalarm1"]) + e.value.response["Error"]["Code"].should.equal("ResourceNotFound") + + resp = cloudwatch.describe_alarms(AlarmNames=["testalarm1"]) + # making sure other alarms are not deleted in case of an error. + len(resp["MetricAlarms"]).should.equal(1) + + # test to check if the error raises if only one invalid alarm is tried to delete. + with pytest.raises(ClientError) as e: + cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName"]) + e.value.response["Error"]["Code"].should.equal("ResourceNotFound") + + +@mock_cloudwatch +def test_describe_alarms_for_metric(): + conn = boto3.client("cloudwatch", region_name="eu-central-1") + conn.put_metric_alarm( + AlarmName="testalarm1", + MetricName="cpu", + Namespace="blah", + Period=10, + EvaluationPeriods=5, + Statistic="Average", + Threshold=2, + ComparisonOperator="GreaterThanThreshold", + ActionsEnabled=True, + ) + alarms = conn.describe_alarms_for_metric(MetricName="cpu", Namespace="blah") + alarms.get("MetricAlarms").should.have.length_of(1) + + assert "testalarm1" in alarms.get("MetricAlarms")[0].get("AlarmArn") + + +@mock_cloudwatch +def test_describe_alarms(): + conn = boto3.client("cloudwatch", region_name="eu-central-1") + conn.put_metric_alarm( + AlarmName="testalarm1", + MetricName="cpu", + Namespace="blah", + Period=10, + EvaluationPeriods=5, + Statistic="Average", + Threshold=2, + ComparisonOperator="GreaterThanThreshold", + ActionsEnabled=True, + ) + metric_data_queries = [ + { + "Id": "metricA", + "Expression": "metricB + metricC", + "Label": "metricA", + "ReturnData": True, + }, + { + "Id": "metricB", + "MetricStat": { + "Metric": { + "Namespace": "ns", + "MetricName": "metricB", + "Dimensions": [{"Name": "Name", "Value": "B"}], + }, + "Period": 60, + "Stat": "Sum", + }, + "ReturnData": False, + }, + { + "Id": "metricC", + "MetricStat": { + "Metric": { + "Namespace": "AWS/Lambda", + "MetricName": "metricC", + "Dimensions": [{"Name": "Name", "Value": "C"}], + }, + "Period": 60, + "Stat": "Sum", + "Unit": "Seconds", + }, + "ReturnData": False, + }, + ] + conn.put_metric_alarm( + AlarmName="testalarm2", + EvaluationPeriods=1, + DatapointsToAlarm=1, + Metrics=metric_data_queries, + ComparisonOperator="GreaterThanThreshold", + Threshold=1.0, + ) + alarms = conn.describe_alarms() + metric_alarms = alarms.get("MetricAlarms") + metric_alarms.should.have.length_of(2) + single_metric_alarm = [ + alarm for alarm in metric_alarms if alarm["AlarmName"] == "testalarm1" + ][0] + multiple_metric_alarm = [ + alarm for alarm in metric_alarms if alarm["AlarmName"] == "testalarm2" + ][0] + + single_metric_alarm["MetricName"].should.equal("cpu") + single_metric_alarm.shouldnt.have.property("Metrics") + single_metric_alarm["Namespace"].should.equal("blah") + single_metric_alarm["Period"].should.equal(10) + single_metric_alarm["EvaluationPeriods"].should.equal(5) + single_metric_alarm["Statistic"].should.equal("Average") + single_metric_alarm["ComparisonOperator"].should.equal("GreaterThanThreshold") + single_metric_alarm["Threshold"].should.equal(2) + + multiple_metric_alarm.shouldnt.have.property("MetricName") + multiple_metric_alarm["EvaluationPeriods"].should.equal(1) + multiple_metric_alarm["DatapointsToAlarm"].should.equal(1) + multiple_metric_alarm["Metrics"].should.equal(metric_data_queries) + multiple_metric_alarm["ComparisonOperator"].should.equal("GreaterThanThreshold") + multiple_metric_alarm["Threshold"].should.equal(1.0) + + @mock_cloudwatch def test_alarm_state(): client = boto3.client("cloudwatch", region_name="eu-central-1") @@ -104,6 +242,7 @@ def test_alarm_state(): Statistic="Average", Threshold=2, ComparisonOperator="GreaterThanThreshold", + ActionsEnabled=True, ) client.put_metric_alarm( AlarmName="testalarm2", @@ -128,11 +267,13 @@ def test_alarm_state(): len(resp["MetricAlarms"]).should.equal(1) resp["MetricAlarms"][0]["AlarmName"].should.equal("testalarm1") resp["MetricAlarms"][0]["StateValue"].should.equal("ALARM") + resp["MetricAlarms"][0]["ActionsEnabled"].should.equal(True) resp = client.describe_alarms(StateValue="OK") len(resp["MetricAlarms"]).should.equal(1) resp["MetricAlarms"][0]["AlarmName"].should.equal("testalarm2") resp["MetricAlarms"][0]["StateValue"].should.equal("OK") + resp["MetricAlarms"][0]["ActionsEnabled"].should.equal(False) # Just for sanity resp = client.describe_alarms() @@ -208,6 +349,152 @@ def test_get_metric_statistics(): datapoint["Sum"].should.equal(1.5) +@mock_cloudwatch +def test_duplicate_put_metric_data(): + conn = boto3.client("cloudwatch", region_name="us-east-1") + utc_now = datetime.now(tz=pytz.utc) + + conn.put_metric_data( + Namespace="tester", + MetricData=[ + dict( + MetricName="metric", + Dimensions=[{"Name": "Name", "Value": "B"}], + Value=1.5, + Timestamp=utc_now, + ) + ], + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}] + )["Metrics"] + len(result).should.equal(1) + + conn.put_metric_data( + Namespace="tester", + MetricData=[ + dict( + MetricName="metric", + Dimensions=[{"Name": "Name", "Value": "B"}], + Value=1.5, + Timestamp=utc_now, + ) + ], + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}] + )["Metrics"] + len(result).should.equal(1) + + conn.put_metric_data( + Namespace="tester", + MetricData=[ + dict( + MetricName="metric", + Dimensions=[{"Name": "Name", "Value": "B"}], + Value=1.5, + Timestamp=utc_now, + ) + ], + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}] + )["Metrics"] + result.should.equal( + [ + { + "Namespace": "tester", + "MetricName": "metric", + "Dimensions": [{"Name": "Name", "Value": "B"}], + } + ] + ) + + conn.put_metric_data( + Namespace="tester", + MetricData=[ + dict( + MetricName="metric", + Dimensions=[ + {"Name": "Name", "Value": "B"}, + {"Name": "Name", "Value": "C"}, + ], + Value=1.5, + Timestamp=utc_now, + ) + ], + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}] + )["Metrics"] + result.should.equal( + [ + { + "Namespace": "tester", + "MetricName": "metric", + "Dimensions": [{"Name": "Name", "Value": "B"}], + }, + { + "Namespace": "tester", + "MetricName": "metric", + "Dimensions": [ + {"Name": "Name", "Value": "B"}, + {"Name": "Name", "Value": "C"}, + ], + }, + ] + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "C"}] + )["Metrics"] + result.should.equal( + [ + { + "Namespace": "tester", + "MetricName": "metric", + "Dimensions": [ + {"Name": "Name", "Value": "B"}, + {"Name": "Name", "Value": "C"}, + ], + } + ] + ) + + +@mock_cloudwatch +@freeze_time("2020-02-10 18:44:05") +def test_custom_timestamp(): + utc_now = datetime.now(tz=pytz.utc) + time = "2020-02-10T18:44:09Z" + cw = boto3.client("cloudwatch", "eu-west-1") + + cw.put_metric_data( + Namespace="tester", + MetricData=[dict(MetricName="metric1", Value=1.5, Timestamp=time)], + ) + + cw.put_metric_data( + Namespace="tester", + MetricData=[ + dict(MetricName="metric2", Value=1.5, Timestamp=datetime(2020, 2, 10)) + ], + ) + + stats = cw.get_metric_statistics( + Namespace="tester", + MetricName="metric", + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + Period=60, + Statistics=["SampleCount", "Sum"], + ) + + @mock_cloudwatch def test_list_metrics(): cloudwatch = boto3.client("cloudwatch", "eu-west-1") @@ -230,8 +517,16 @@ def test_list_metrics(): # Verify format res.should.equal( [ - {u"Namespace": "list_test_1/", u"Dimensions": [], u"MetricName": "metric1"}, - {u"Namespace": "list_test_1/", u"Dimensions": [], u"MetricName": "metric1"}, + { + u"Namespace": "list_test_1/", + u"Dimensions": [], + u"MetricName": "metric1", + }, + { + u"Namespace": "list_test_1/", + u"Dimensions": [], + u"MetricName": "metric1", + }, ] ) # Verify unknown namespace still has no results @@ -245,9 +540,9 @@ def test_list_metrics_paginated(): # Verify that only a single page of metrics is returned cloudwatch.list_metrics()["Metrics"].should.be.empty # Verify we can't pass a random NextToken - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: cloudwatch.list_metrics(NextToken=str(uuid4())) - e.exception.response["Error"]["Message"].should.equal( + e.value.response["Error"]["Message"].should.equal( "Request parameter NextToken is invalid" ) # Add a boatload of metrics @@ -274,9 +569,9 @@ def test_list_metrics_paginated(): len(third_page["Metrics"]).should.equal(100) third_page.shouldnt.contain("NextToken") # Verify that we can't reuse an existing token - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: cloudwatch.list_metrics(NextToken=first_page["NextToken"]) - e.exception.response["Error"]["Message"].should.equal( + e.value.response["Error"]["Message"].should.equal( "Request parameter NextToken is invalid" ) @@ -289,3 +584,232 @@ def create_metrics(cloudwatch, namespace, metrics=5, data_points=5): Namespace=namespace, MetricData=[{"MetricName": metric_name, "Value": j, "Unit": "Seconds"}], ) + + +@mock_cloudwatch +def test_get_metric_data_within_timeframe(): + utc_now = datetime.now(tz=pytz.utc) + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + namespace1 = "my_namespace/" + # put metric data + values = [0, 2, 4, 3.5, 7, 100] + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + {"MetricName": "metric1", "Value": val, "Unit": "Seconds"} for val in values + ], + ) + # get_metric_data + stats = ["Average", "Sum", "Minimum", "Maximum"] + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + "Id": "result_" + stat, + "MetricStat": { + "Metric": {"Namespace": namespace1, "MetricName": "metric1"}, + "Period": 60, + "Stat": stat, + }, + } + for stat in stats + ], + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + ) + # + # Assert Average/Min/Max/Sum is returned as expected + avg = [ + res for res in response["MetricDataResults"] if res["Id"] == "result_Average" + ][0] + avg["Label"].should.equal("metric1 Average") + avg["StatusCode"].should.equal("Complete") + [int(val) for val in avg["Values"]].should.equal([19]) + + sum_ = [res for res in response["MetricDataResults"] if res["Id"] == "result_Sum"][ + 0 + ] + sum_["Label"].should.equal("metric1 Sum") + sum_["StatusCode"].should.equal("Complete") + [val for val in sum_["Values"]].should.equal([sum(values)]) + + min_ = [ + res for res in response["MetricDataResults"] if res["Id"] == "result_Minimum" + ][0] + min_["Label"].should.equal("metric1 Minimum") + min_["StatusCode"].should.equal("Complete") + [int(val) for val in min_["Values"]].should.equal([0]) + + max_ = [ + res for res in response["MetricDataResults"] if res["Id"] == "result_Maximum" + ][0] + max_["Label"].should.equal("metric1 Maximum") + max_["StatusCode"].should.equal("Complete") + [int(val) for val in max_["Values"]].should.equal([100]) + + +@mock_cloudwatch +def test_get_metric_data_partially_within_timeframe(): + utc_now = datetime.now(tz=pytz.utc) + yesterday = utc_now - timedelta(days=1) + last_week = utc_now - timedelta(days=7) + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + namespace1 = "my_namespace/" + # put metric data + values = [0, 2, 4, 3.5, 7, 100] + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + { + "MetricName": "metric1", + "Value": 10, + "Unit": "Seconds", + "Timestamp": utc_now, + } + ], + ) + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + { + "MetricName": "metric1", + "Value": 20, + "Unit": "Seconds", + "Timestamp": yesterday, + } + ], + ) + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + { + "MetricName": "metric1", + "Value": 50, + "Unit": "Seconds", + "Timestamp": last_week, + } + ], + ) + # get_metric_data + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + "Id": "result", + "MetricStat": { + "Metric": {"Namespace": namespace1, "MetricName": "metric1"}, + "Period": 60, + "Stat": "Sum", + }, + } + ], + StartTime=yesterday - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + ) + # + # Assert Last week's data is not returned + len(response["MetricDataResults"]).should.equal(1) + sum_ = response["MetricDataResults"][0] + sum_["Label"].should.equal("metric1 Sum") + sum_["StatusCode"].should.equal("Complete") + sum_["Values"].should.equal([30.0]) + + +@mock_cloudwatch +def test_get_metric_data_outside_timeframe(): + utc_now = datetime.now(tz=pytz.utc) + last_week = utc_now - timedelta(days=7) + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + namespace1 = "my_namespace/" + # put metric data + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + { + "MetricName": "metric1", + "Value": 50, + "Unit": "Seconds", + "Timestamp": last_week, + } + ], + ) + # get_metric_data + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + "Id": "result", + "MetricStat": { + "Metric": {"Namespace": namespace1, "MetricName": "metric1"}, + "Period": 60, + "Stat": "Sum", + }, + } + ], + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + ) + # + # Assert Last week's data is not returned + len(response["MetricDataResults"]).should.equal(1) + response["MetricDataResults"][0]["Id"].should.equal("result") + response["MetricDataResults"][0]["StatusCode"].should.equal("Complete") + response["MetricDataResults"][0]["Values"].should.equal([]) + + +@mock_cloudwatch +def test_get_metric_data_for_multiple_metrics(): + utc_now = datetime.now(tz=pytz.utc) + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + namespace = "my_namespace/" + # put metric data + cloudwatch.put_metric_data( + Namespace=namespace, + MetricData=[ + { + "MetricName": "metric1", + "Value": 50, + "Unit": "Seconds", + "Timestamp": utc_now, + } + ], + ) + cloudwatch.put_metric_data( + Namespace=namespace, + MetricData=[ + { + "MetricName": "metric2", + "Value": 25, + "Unit": "Seconds", + "Timestamp": utc_now, + } + ], + ) + # get_metric_data + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + "Id": "result1", + "MetricStat": { + "Metric": {"Namespace": namespace, "MetricName": "metric1"}, + "Period": 60, + "Stat": "Sum", + }, + }, + { + "Id": "result2", + "MetricStat": { + "Metric": {"Namespace": namespace, "MetricName": "metric2"}, + "Period": 60, + "Stat": "Sum", + }, + }, + ], + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + ) + # + len(response["MetricDataResults"]).should.equal(2) + + res1 = [res for res in response["MetricDataResults"] if res["Id"] == "result1"][0] + res1["Values"].should.equal([50.0]) + + res2 = [res for res in response["MetricDataResults"] if res["Id"] == "result2"][0] + res2["Values"].should.equal([25.0]) diff --git a/tests/test_codecommit/test_codecommit.py b/tests/test_codecommit/test_codecommit.py index 6e916f20a..4c38252ff 100644 --- a/tests/test_codecommit/test_codecommit.py +++ b/tests/test_codecommit/test_codecommit.py @@ -2,9 +2,9 @@ import boto3 import sure # noqa from moto import mock_codecommit -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest @mock_codecommit @@ -81,12 +81,12 @@ def test_create_repository_repository_name_exists(): client.create_repository(repositoryName="repository_two") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_repository( repositoryName="repository_two", repositoryDescription="description repo two", ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("RepositoryNameExistsException") @@ -99,9 +99,9 @@ def test_create_repository_repository_name_exists(): def test_create_repository_invalid_repository_name(): client = boto3.client("codecommit", region_name="eu-central-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_repository(repositoryName="in_123_valid_@#$_characters") - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidRepositoryNameException") @@ -156,9 +156,9 @@ def test_get_repository(): client = boto3.client("codecommit", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_repository(repositoryName=repository_name) - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("RepositoryDoesNotExistException") @@ -171,9 +171,9 @@ def test_get_repository(): def test_get_repository_invalid_repository_name(): client = boto3.client("codecommit", region_name="eu-central-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_repository(repositoryName="repository_one-@#@") - ex = e.exception + ex = e.value ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidRepositoryNameException") ex.response["Error"]["Message"].should.equal( @@ -207,9 +207,9 @@ def test_delete_repository(): def test_delete_repository_invalid_repository_name(): client = boto3.client("codecommit", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_repository(repositoryName="_rep@ository_one") - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeleteRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidRepositoryNameException") diff --git a/tests/test_codepipeline/test_codepipeline.py b/tests/test_codepipeline/test_codepipeline.py index a40efa05c..ca1094582 100644 --- a/tests/test_codepipeline/test_codepipeline.py +++ b/tests/test_codepipeline/test_codepipeline.py @@ -4,7 +4,7 @@ from datetime import datetime import boto3 import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_codepipeline, mock_iam @@ -77,9 +77,9 @@ def test_create_pipeline_errors(): client_iam = boto3.client("iam", region_name="us-east-1") create_basic_codepipeline(client, "test-pipeline") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: create_basic_codepipeline(client, "test-pipeline") - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidStructureException") @@ -87,7 +87,7 @@ def test_create_pipeline_errors(): "A pipeline with the name 'test-pipeline' already exists in account '123456789012'" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_pipeline( pipeline={ "name": "invalid-pipeline", @@ -115,7 +115,7 @@ def test_create_pipeline_errors(): ], } ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidStructureException") @@ -139,7 +139,7 @@ def test_create_pipeline_errors(): ), )["Role"]["Arn"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_pipeline( pipeline={ "name": "invalid-pipeline", @@ -167,7 +167,7 @@ def test_create_pipeline_errors(): ], } ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidStructureException") @@ -175,7 +175,7 @@ def test_create_pipeline_errors(): "CodePipeline is not authorized to perform AssumeRole on role arn:aws:iam::123456789012:role/wrong-role" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_pipeline( pipeline={ "name": "invalid-pipeline", @@ -203,7 +203,7 @@ def test_create_pipeline_errors(): ], } ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidStructureException") @@ -282,9 +282,9 @@ def test_get_pipeline(): def test_get_pipeline_errors(): client = boto3.client("codepipeline", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_pipeline(name="not-existing") - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetPipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("PipelineNotFoundException") @@ -410,7 +410,7 @@ def test_update_pipeline(): def test_update_pipeline_errors(): client = boto3.client("codepipeline", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.update_pipeline( pipeline={ "name": "not-existing", @@ -456,7 +456,7 @@ def test_update_pipeline_errors(): ], } ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("UpdatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -517,11 +517,11 @@ def test_list_tags_for_resource(): def test_list_tags_for_resource_errors(): client = boto3.client("codepipeline", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_tags_for_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListTagsForResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -555,12 +555,12 @@ def test_tag_resource_errors(): name = "test-pipeline" create_basic_codepipeline(client, name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.tag_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing", tags=[{"key": "key-2", "value": "value-2"}], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -568,12 +568,12 @@ def test_tag_resource_errors(): "The account with id '123456789012' does not include a pipeline with the name 'not-existing'" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.tag_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), tags=[{"key": "aws:key", "value": "value"}], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidTagsException") @@ -583,7 +583,7 @@ def test_tag_resource_errors(): "msg=[Caller is an end user and not allowed to mutate system tags]" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.tag_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), tags=[ @@ -591,7 +591,7 @@ def test_tag_resource_errors(): for i in range(50) ], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("TooManyTagsException") @@ -634,12 +634,12 @@ def test_untag_resource(): def test_untag_resource_errors(): client = boto3.client("codepipeline", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.untag_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing", tagKeys=["key"], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("UntagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") diff --git a/tests/test_cognitoidentity/__init__.py b/tests/test_cognitoidentity/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_cognitoidentity/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index 8eae183c6..cfe673cdf 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -1,12 +1,14 @@ from __future__ import unicode_literals import boto3 +import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_cognitoidentity from moto.cognitoidentity.utils import get_random_identity_id from moto.core import ACCOUNT_ID +from uuid import UUID @mock_cognitoidentity @@ -73,18 +75,20 @@ def test_describe_identity_pool(): def test_describe_identity_pool_with_invalid_id_raises_error(): conn = boto3.client("cognito-identity", "us-west-2") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.describe_identity_pool(IdentityPoolId="us-west-2_non-existent") - cm.exception.operation_name.should.equal("DescribeIdentityPool") - cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.operation_name.should.equal("DescribeIdentityPool") + cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # testing a helper function def test_get_random_identity_id(): - assert len(get_random_identity_id("us-west-2")) > 0 - assert len(get_random_identity_id("us-west-2").split(":")[1]) == 19 + identity_id = get_random_identity_id("us-west-2") + region, id = identity_id.split(":") + region.should.equal("us-west-2") + UUID(id, version=4) # Will throw an error if it's not a valid UUID @mock_cognitoidentity @@ -96,7 +100,6 @@ def test_get_id(): IdentityPoolId="us-west-2:12345", Logins={"someurl": "12345"}, ) - print(result) assert ( result.get("IdentityId", "").startswith("us-west-2") or result.get("ResponseMetadata").get("HTTPStatusCode") == 200 diff --git a/tests/test_cognitoidentity/test_server.py b/tests/test_cognitoidentity/test_server.py index 903dae290..8c4229f06 100644 --- a/tests/test_cognitoidentity/test_server.py +++ b/tests/test_cognitoidentity/test_server.py @@ -48,6 +48,5 @@ def test_get_id(): }, ) - print(res.data) json_data = json.loads(res.data.decode("utf-8")) assert ":" in json_data["IdentityId"] diff --git a/tests/test_cognitoidp/__init__.py b/tests/test_cognitoidp/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_cognitoidp/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index d0a462c5c..c61be4aa4 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -3,6 +3,12 @@ from __future__ import unicode_literals import json import os import random +import re +import hmac +import hashlib +import base64 + +import requests import uuid import boto3 @@ -10,10 +16,11 @@ import boto3 # noinspection PyUnresolvedReferences import sure # noqa from botocore.exceptions import ClientError -from jose import jws -from nose.tools import assert_raises +from jose import jws, jwk, jwt +import pytest -from moto import mock_cognitoidp +from moto import mock_cognitoidp, settings +from moto.cognitoidp.utils import create_id from moto.core import ACCOUNT_ID @@ -27,6 +34,11 @@ def test_create_user_pool(): result["UserPool"]["Id"].should_not.be.none result["UserPool"]["Id"].should.match(r"[\w-]+_[0-9a-zA-Z]+") + result["UserPool"]["Arn"].should.equal( + "arn:aws:cognito-idp:us-west-2:{}:userpool/{}".format( + ACCOUNT_ID, result["UserPool"]["Id"] + ) + ) result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) @@ -205,8 +217,31 @@ def test_create_user_pool_client(): ) result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) - result["UserPoolClient"]["ClientId"].should_not.be.none + bool(re.match(r"^[0-9a-z]{26}$", result["UserPoolClient"]["ClientId"])).should.be.ok result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"].should_not.have.key("ClientSecret") + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) + + +@mock_cognitoidp +def test_create_user_pool_client_returns_secret(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=client_name, + GenerateSecret=True, + CallbackURLs=[value], + ) + + result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) + bool(re.match(r"^[0-9a-z]{26}$", result["UserPoolClient"]["ClientId"])).should.be.ok + result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"]["ClientSecret"].should_not.be.none result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) @@ -325,6 +360,37 @@ def test_update_user_pool_client(): ) result["UserPoolClient"]["ClientName"].should.equal(new_client_name) + result["UserPoolClient"].should_not.have.key("ClientSecret") + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) + + +@mock_cognitoidp +def test_update_user_pool_client_returns_secret(): + conn = boto3.client("cognito-idp", "us-west-2") + + old_client_name = str(uuid.uuid4()) + new_client_name = str(uuid.uuid4()) + old_value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=old_client_name, + GenerateSecret=True, + CallbackURLs=[old_value], + ) + client_secret = client_details["UserPoolClient"]["ClientSecret"] + + result = conn.update_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ClientName=new_client_name, + CallbackURLs=[new_value], + ) + + result["UserPoolClient"]["ClientName"].should.equal(new_client_name) + result["UserPoolClient"]["ClientSecret"].should.equal(client_secret) result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) @@ -537,14 +603,14 @@ def test_update_identity_provider_no_user_pool(): new_value = str(uuid.uuid4()) - with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + with pytest.raises(conn.exceptions.ResourceNotFoundException) as cm: conn.update_identity_provider( UserPoolId="foo", ProviderName="bar", ProviderDetails={"thing": new_value} ) - cm.exception.operation_name.should.equal("UpdateIdentityProvider") - cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.operation_name.should.equal("UpdateIdentityProvider") + cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @@ -557,16 +623,16 @@ def test_update_identity_provider_no_identity_provider(): new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + with pytest.raises(conn.exceptions.ResourceNotFoundException) as cm: conn.update_identity_provider( UserPoolId=user_pool_id, ProviderName="foo", ProviderDetails={"thing": new_value}, ) - cm.exception.operation_name.should.equal("UpdateIdentityProvider") - cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.operation_name.should.equal("UpdateIdentityProvider") + cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @@ -633,11 +699,11 @@ def test_create_group_with_duplicate_name_raises_error(): conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) - cm.exception.operation_name.should.equal("CreateGroup") - cm.exception.response["Error"]["Code"].should.equal("GroupExistsException") - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.operation_name.should.equal("CreateGroup") + cm.value.response["Error"]["Code"].should.equal("GroupExistsException") + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @@ -681,9 +747,9 @@ def test_delete_group(): result = conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) - cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_cognitoidp @@ -1019,6 +1085,13 @@ def test_list_users(): result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username_bis) + # checking Filter with space + result = conn.list_users( + UserPoolId=user_pool_id, Filter='phone_number = "+33666666666' + ) + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username_bis) + @mock_cognitoidp def test_list_users_returns_limit_items(): @@ -1185,6 +1258,137 @@ def test_authentication_flow(): authentication_flow(conn) +def user_authentication_flow(conn): + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + user_attribute_name = str(uuid.uuid4()) + user_attribute_value = str(uuid.uuid4()) + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + ReadAttributes=[user_attribute_name], + GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + + conn.sign_up( + ClientId=client_id, Username=username, Password=password, + ) + + client_secret = conn.describe_user_pool_client( + UserPoolId=user_pool_id, ClientId=client_id, + )["UserPoolClient"]["ClientSecret"] + + conn.confirm_sign_up( + ClientId=client_id, Username=username, ConfirmationCode="123456", + ) + + # generating secret hash + key = bytes(str(client_secret).encode("latin-1")) + msg = bytes(str(username + client_id).encode("latin-1")) + new_digest = hmac.new(key, msg, hashlib.sha256).digest() + secret_hash = base64.b64encode(new_digest).decode() + + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": secret_hash, + }, + ) + + result = conn.respond_to_auth_challenge( + ClientId=client_id, + ChallengeName=result["ChallengeName"], + ChallengeResponses={ + "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), + "PASSWORD_CLAIM_SECRET_BLOCK": result["Session"], + "TIMESTAMP": str(uuid.uuid4()), + "USERNAME": username, + }, + ) + + refresh_token = result["AuthenticationResult"]["RefreshToken"] + + # add mfa token + conn.associate_software_token( + AccessToken=result["AuthenticationResult"]["AccessToken"], + ) + + conn.verify_software_token( + AccessToken=result["AuthenticationResult"]["AccessToken"], UserCode="123456", + ) + + conn.set_user_mfa_preference( + AccessToken=result["AuthenticationResult"]["AccessToken"], + SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True,}, + ) + + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="REFRESH_TOKEN", + AuthParameters={"SECRET_HASH": secret_hash, "REFRESH_TOKEN": refresh_token,}, + ) + + result["AuthenticationResult"]["IdToken"].should_not.be.none + result["AuthenticationResult"]["AccessToken"].should_not.be.none + + # authenticate user once again this time with mfa token + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": secret_hash, + }, + ) + + result = conn.respond_to_auth_challenge( + ClientId=client_id, + ChallengeName=result["ChallengeName"], + ChallengeResponses={ + "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), + "PASSWORD_CLAIM_SECRET_BLOCK": result["Session"], + "TIMESTAMP": str(uuid.uuid4()), + "USERNAME": username, + }, + ) + + result = conn.respond_to_auth_challenge( + ClientId=client_id, + Session=result["Session"], + ChallengeName=result["ChallengeName"], + ChallengeResponses={ + "SOFTWARE_TOKEN_MFA_CODE": "123456", + "USERNAME": username, + "SECRET_HASH": secret_hash, + }, + ) + + return { + "user_pool_id": user_pool_id, + "client_id": client_id, + "client_secret": client_secret, + "secret_hash": secret_hash, + "id_token": result["AuthenticationResult"]["IdToken"], + "access_token": result["AuthenticationResult"]["AccessToken"], + "refresh_token": refresh_token, + "username": username, + "password": password, + "additional_fields": {user_attribute_name: user_attribute_value}, + } + + +@mock_cognitoidp +def test_user_authentication_flow(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_authentication_flow(conn) + + @mock_cognitoidp def test_token_legitimacy(): conn = boto3.client("cognito-idp", "us-west-2") @@ -1238,13 +1442,43 @@ def test_change_password(): result["AuthenticationResult"].should_not.be.none +@mock_cognitoidp +def test_change_password__using_custom_user_agent_header(): + # https://github.com/spulec/moto/issues/3098 + # As the admin_initiate_auth-method is unauthenticated, we use the user-agent header to pass in the region + # This test verifies this works, even if we pass in our own user-agent header + from botocore.config import Config + + my_config = Config(user_agent_extra="more/info", signature_version="v4") + conn = boto3.client("cognito-idp", "us-west-2", config=my_config) + + outputs = authentication_flow(conn) + + # Take this opportunity to test change_password, which requires an access token. + newer_password = str(uuid.uuid4()) + conn.change_password( + AccessToken=outputs["access_token"], + PreviousPassword=outputs["password"], + ProposedPassword=newer_password, + ) + + # Log in again, which should succeed without a challenge because the user is no + # longer in the force-new-password state. + result = conn.admin_initiate_auth( + UserPoolId=outputs["user_pool_id"], + ClientId=outputs["client_id"], + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={"USERNAME": outputs["username"], "PASSWORD": newer_password}, + ) + + result["AuthenticationResult"].should_not.be.none + + @mock_cognitoidp def test_forgot_password(): conn = boto3.client("cognito-idp", "us-west-2") - result = conn.forgot_password( - ClientId=str(uuid.uuid4()), Username=str(uuid.uuid4()) - ) + result = conn.forgot_password(ClientId=create_id(), Username=str(uuid.uuid4())) result["CodeDeliveryDetails"].should_not.be.none @@ -1304,3 +1538,405 @@ def test_admin_update_user_attributes(): val.should.equal("Doe") elif attr["Name"] == "given_name": val.should.equal("Jane") + + +@mock_cognitoidp +def test_resource_server(): + + client = boto3.client("cognito-idp", "us-west-2") + name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + res = client.create_user_pool(PoolName=name) + + user_pool_id = res["UserPool"]["Id"] + identifier = "http://localhost.localdomain" + name = "local server" + scopes = [ + {"ScopeName": "app:write", "ScopeDescription": "write scope"}, + {"ScopeName": "app:read", "ScopeDescription": "read scope"}, + ] + + res = client.create_resource_server( + UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes + ) + + res["ResourceServer"]["UserPoolId"].should.equal(user_pool_id) + res["ResourceServer"]["Identifier"].should.equal(identifier) + res["ResourceServer"]["Name"].should.equal(name) + res["ResourceServer"]["Scopes"].should.equal(scopes) + + with pytest.raises(ClientError) as ex: + client.create_resource_server( + UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes + ) + + ex.value.operation_name.should.equal("CreateResourceServer") + ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") + ex.value.response["Error"]["Message"].should.equal( + "%s already exists in user pool %s." % (identifier, user_pool_id) + ) + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@mock_cognitoidp +def test_sign_up(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), + )["UserPoolClient"]["ClientId"] + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + result = conn.sign_up(ClientId=client_id, Username=username, Password=password) + result["UserConfirmed"].should.be.false + result["UserSub"].should_not.be.none + + +@mock_cognitoidp +def test_confirm_sign_up(): + conn = boto3.client("cognito-idp", "us-west-2") + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + conn.sign_up(ClientId=client_id, Username=username, Password=password) + + conn.confirm_sign_up( + ClientId=client_id, Username=username, ConfirmationCode="123456", + ) + + result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + result["UserStatus"].should.equal("CONFIRMED") + + +@mock_cognitoidp +def test_initiate_auth_USER_SRP_AUTH(): + conn = boto3.client("cognito-idp", "us-west-2") + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + conn.sign_up(ClientId=client_id, Username=username, Password=password) + client_secret = conn.describe_user_pool_client( + UserPoolId=user_pool_id, ClientId=client_id, + )["UserPoolClient"]["ClientSecret"] + conn.confirm_sign_up( + ClientId=client_id, Username=username, ConfirmationCode="123456", + ) + + key = bytes(str(client_secret).encode("latin-1")) + msg = bytes(str(username + client_id).encode("latin-1")) + new_digest = hmac.new(key, msg, hashlib.sha256).digest() + secret_hash = base64.b64encode(new_digest).decode() + + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": secret_hash, + }, + ) + + result["ChallengeName"].should.equal("PASSWORD_VERIFIER") + + +@mock_cognitoidp +def test_initiate_auth_REFRESH_TOKEN(): + conn = boto3.client("cognito-idp", "us-west-2") + result = user_authentication_flow(conn) + result = conn.initiate_auth( + ClientId=result["client_id"], + AuthFlow="REFRESH_TOKEN", + AuthParameters={ + "REFRESH_TOKEN": result["refresh_token"], + "SECRET_HASH": result["secret_hash"], + }, + ) + + result["AuthenticationResult"]["AccessToken"].should_not.be.none + + +@mock_cognitoidp +def test_initiate_auth_for_unconfirmed_user(): + conn = boto3.client("cognito-idp", "us-west-2") + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + conn.sign_up(ClientId=client_id, Username=username, Password=password) + client_secret = conn.describe_user_pool_client( + UserPoolId=user_pool_id, ClientId=client_id, + )["UserPoolClient"]["ClientSecret"] + + key = bytes(str(client_secret).encode("latin-1")) + msg = bytes(str(username + client_id).encode("latin-1")) + new_digest = hmac.new(key, msg, hashlib.sha256).digest() + secret_hash = base64.b64encode(new_digest).decode() + + caught = False + try: + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": secret_hash, + }, + ) + except conn.exceptions.UserNotConfirmedException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_initiate_auth_with_invalid_secret_hash(): + conn = boto3.client("cognito-idp", "us-west-2") + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + conn.sign_up(ClientId=client_id, Username=username, Password=password) + client_secret = conn.describe_user_pool_client( + UserPoolId=user_pool_id, ClientId=client_id, + )["UserPoolClient"]["ClientSecret"] + conn.confirm_sign_up( + ClientId=client_id, Username=username, ConfirmationCode="123456", + ) + + invalid_secret_hash = str(uuid.uuid4()) + + caught = False + try: + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": invalid_secret_hash, + }, + ) + except conn.exceptions.NotAuthorizedException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_setting_mfa(): + conn = boto3.client("cognito-idp", "us-west-2") + result = authentication_flow(conn) + conn.associate_software_token(AccessToken=result["access_token"]) + conn.verify_software_token(AccessToken=result["access_token"], UserCode="123456") + conn.set_user_mfa_preference( + AccessToken=result["access_token"], + SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True}, + ) + result = conn.admin_get_user( + UserPoolId=result["user_pool_id"], Username=result["username"] + ) + + result["UserMFASettingList"].should.have.length_of(1) + + +@mock_cognitoidp +def test_setting_mfa_when_token_not_verified(): + conn = boto3.client("cognito-idp", "us-west-2") + result = authentication_flow(conn) + conn.associate_software_token(AccessToken=result["access_token"]) + + caught = False + try: + conn.set_user_mfa_preference( + AccessToken=result["access_token"], + SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True}, + ) + except conn.exceptions.InvalidParameterException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_respond_to_auth_challenge_with_invalid_secret_hash(): + conn = boto3.client("cognito-idp", "us-west-2") + result = user_authentication_flow(conn) + + valid_secret_hash = result["secret_hash"] + invalid_secret_hash = str(uuid.uuid4()) + + challenge = conn.initiate_auth( + ClientId=result["client_id"], + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": result["username"], + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": valid_secret_hash, + }, + ) + + challenge = conn.respond_to_auth_challenge( + ClientId=result["client_id"], + ChallengeName=challenge["ChallengeName"], + ChallengeResponses={ + "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), + "PASSWORD_CLAIM_SECRET_BLOCK": challenge["Session"], + "TIMESTAMP": str(uuid.uuid4()), + "USERNAME": result["username"], + }, + ) + + caught = False + try: + conn.respond_to_auth_challenge( + ClientId=result["client_id"], + Session=challenge["Session"], + ChallengeName=challenge["ChallengeName"], + ChallengeResponses={ + "SOFTWARE_TOKEN_MFA_CODE": "123456", + "USERNAME": result["username"], + "SECRET_HASH": invalid_secret_hash, + }, + ) + except conn.exceptions.NotAuthorizedException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_admin_set_user_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[{"Name": "thing", "Value": value}], + ) + conn.admin_set_user_password( + UserPoolId=user_pool_id, Username=username, Password=password, Permanent=True + ) + result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + result["Username"].should.equal(username) + result["UserAttributes"].should.have.length_of(1) + result["UserAttributes"][0]["Name"].should.equal("thing") + result["UserAttributes"][0]["Value"].should.equal(value) + result["UserStatus"].should.equal("CONFIRMED") + + +@mock_cognitoidp +def test_change_password_with_invalid_token_raises_error(): + client = boto3.client("cognito-idp", "us-west-2") + with pytest.raises(ClientError) as ex: + client.change_password( + AccessToken=str(uuid.uuid4()), + PreviousPassword="previous_password", + ProposedPassword="newer_password", + ) + ex.value.response["Error"]["Code"].should.equal("NotAuthorizedException") + + +@mock_cognitoidp +def test_confirm_forgot_password_with_non_existent_client_id_raises_error(): + client = boto3.client("cognito-idp", "us-west-2") + with pytest.raises(ClientError) as ex: + client.confirm_forgot_password( + ClientId="non-existent-client-id", + Username="not-existent-username", + ConfirmationCode=str(uuid.uuid4()), + Password=str(uuid.uuid4()), + ) + ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + + +# Test will retrieve public key from cognito.amazonaws.com/.well-known/jwks.json, +# which isnt mocked in ServerMode +if not settings.TEST_SERVER_MODE: + + @mock_cognitoidp + def test_idtoken_contains_kid_header(): + # https://github.com/spulec/moto/issues/3078 + # Setup + cognito = boto3.client("cognito-idp", "us-west-2") + user_pool_id = cognito.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"][ + "Id" + ] + client = cognito.create_user_pool_client( + UserPoolId=user_pool_id, + ExplicitAuthFlows=[ + "ALLOW_ADMIN_USER_PASSWORD_AUTH", + "ALLOW_REFRESH_TOKEN_AUTH", + "ALLOW_ADMIN_NO_SRP_AUTH", + ], + AllowedOAuthFlows=["code", "implicit"], + ClientName=str(uuid.uuid4()), + CallbackURLs=["https://example.com"], + ) + client_id = client["UserPoolClient"]["ClientId"] + username = str(uuid.uuid4()) + temporary_password = "1TemporaryP@ssword" + cognito.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + TemporaryPassword=temporary_password, + ) + result = cognito.admin_initiate_auth( + UserPoolId=user_pool_id, + ClientId=client_id, + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={"USERNAME": username, "PASSWORD": temporary_password}, + ) + + # A newly created user is forced to set a new password + # This sets a new password and logs the user in (creates tokens) + password = "1F@kePassword" + result = cognito.respond_to_auth_challenge( + Session=result["Session"], + ClientId=client_id, + ChallengeName="NEW_PASSWORD_REQUIRED", + ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": password}, + ) + # + id_token = result["AuthenticationResult"]["IdToken"] + + # Verify the KID header is present in the token, and corresponds to the KID supplied by the public JWT + verify_kid_header(id_token) + + +def verify_kid_header(token): + """Verifies the kid-header is corresponds with the public key""" + headers = jwt.get_unverified_headers(token) + kid = headers["kid"] + + key_index = -1 + keys = fetch_public_keys() + for i in range(len(keys)): + if kid == keys[i]["kid"]: + key_index = i + break + if key_index == -1: + raise Exception("Public key (kid) not found in jwks.json") + + +def fetch_public_keys(): + keys_url = "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format( + "us-west-2", "someuserpoolid" + ) + response = requests.get(keys_url).json() + return response["keys"] diff --git a/tests/test_config/__init__.py b/tests/test_config/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_config/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index d5ec8f0bc..41774c2fa 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -1,37 +1,42 @@ import json +import os +import time from datetime import datetime, timedelta import boto3 from botocore.exceptions import ClientError -from nose.tools import assert_raises +from unittest import SkipTest +import pytest from moto import mock_s3 from moto.config import mock_config from moto.core import ACCOUNT_ID +import sure # noqa + @mock_config def test_put_configuration_recorder(): client = boto3.client("config", region_name="us-west-2") # Try without a name supplied: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder(ConfigurationRecorder={"roleARN": "somearn"}) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "InvalidConfigurationRecorderNameException" ) - assert "is not valid, blank string." in ce.exception.response["Error"]["Message"] + assert "is not valid, blank string." in ce.value.response["Error"]["Message"] # Try with a really long name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={"name": "a" * 257, "roleARN": "somearn"} ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" assert ( "Member must have length less than or equal to 256" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With resource types and flags set to True: @@ -63,7 +68,7 @@ def test_put_configuration_recorder(): ] for bg in bad_groups: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={ "name": "default", @@ -71,16 +76,14 @@ def test_put_configuration_recorder(): "recordingGroup": bg, } ) + assert ce.value.response["Error"]["Code"] == "InvalidRecordingGroupException" assert ( - ce.exception.response["Error"]["Code"] == "InvalidRecordingGroupException" - ) - assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "The recording group provided is not valid" ) # With an invalid Resource Type: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={ "name": "default", @@ -98,11 +101,11 @@ def test_put_configuration_recorder(): }, } ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" assert "2 validation error detected: Value '['LOLNO', 'LOLSTILLNO']" in str( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] ) - assert "AWS::EC2::Instance" in ce.exception.response["Error"]["Message"] + assert "AWS::EC2::Instance" in ce.value.response["Error"]["Message"] # Create a proper one: client.put_configuration_recorder( @@ -161,7 +164,7 @@ def test_put_configuration_recorder(): assert not result[0]["recordingGroup"].get("resourceTypes") # Can currently only have exactly 1 Config Recorder in an account/region: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={ "name": "someotherrecorder", @@ -173,12 +176,12 @@ def test_put_configuration_recorder(): } ) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "MaxNumberOfConfigurationRecordersExceededException" ) assert ( "maximum number of configuration recorders: 1 is reached." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) @@ -187,7 +190,7 @@ def test_put_configuration_aggregator(): client = boto3.client("config", region_name="us-west-2") # With too many aggregation sources: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -203,12 +206,12 @@ def test_put_configuration_aggregator(): ) assert ( "Member must have length less than or equal to 1" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # With an invalid region config (no regions defined): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -220,11 +223,11 @@ def test_put_configuration_aggregator(): ) assert ( "Your request does not specify any regions" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", OrganizationAggregationSource={ @@ -233,12 +236,12 @@ def test_put_configuration_aggregator(): ) assert ( "Your request does not specify any regions" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" # With both region flags defined: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -250,12 +253,11 @@ def test_put_configuration_aggregator(): ], ) assert ( - "You must choose one of these options" - in ce.exception.response["Error"]["Message"] + "You must choose one of these options" in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", OrganizationAggregationSource={ @@ -265,24 +267,23 @@ def test_put_configuration_aggregator(): }, ) assert ( - "You must choose one of these options" - in ce.exception.response["Error"]["Message"] + "You must choose one of these options" in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" # Name too long: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="a" * 257, AccountAggregationSources=[ {"AccountIds": ["012345678910"], "AllAwsRegions": True} ], ) - assert "configurationAggregatorName" in ce.exception.response["Error"]["Message"] - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert "configurationAggregatorName" in ce.value.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "ValidationException" # Too many tags (>50): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -294,12 +295,12 @@ def test_put_configuration_aggregator(): ) assert ( "Member must have length less than or equal to 50" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Tag key is too big (>128 chars): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -309,12 +310,12 @@ def test_put_configuration_aggregator(): ) assert ( "Member must have length less than or equal to 128" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Tag value is too big (>256 chars): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -324,12 +325,12 @@ def test_put_configuration_aggregator(): ) assert ( "Member must have length less than or equal to 256" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Duplicate Tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -337,11 +338,11 @@ def test_put_configuration_aggregator(): ], Tags=[{"Key": "a", "Value": "a"}, {"Key": "a", "Value": "a"}], ) - assert "Duplicate tag keys found." in ce.exception.response["Error"]["Message"] - assert ce.exception.response["Error"]["Code"] == "InvalidInput" + assert "Duplicate tag keys found." in ce.value.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidInput" # Invalid characters in the tag key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -351,12 +352,12 @@ def test_put_configuration_aggregator(): ) assert ( "Member must satisfy regular expression pattern:" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # If it contains both the AccountAggregationSources and the OrganizationAggregationSource - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -369,18 +370,18 @@ def test_put_configuration_aggregator(): ) assert ( "AccountAggregationSource and the OrganizationAggregationSource" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" # If it contains neither: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator(ConfigurationAggregatorName="testing") assert ( "AccountAggregationSource or the OrganizationAggregationSource" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" # Just make one: account_aggregation_source = { @@ -461,31 +462,29 @@ def test_describe_configuration_aggregators(): ) # Describe with an incorrect name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_aggregators( ConfigurationAggregatorNames=["DoesNotExist"] ) assert ( "The configuration aggregator does not exist." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) assert ( - ce.exception.response["Error"]["Code"] - == "NoSuchConfigurationAggregatorException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) # Error describe with more than 1 item in the list: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_aggregators( ConfigurationAggregatorNames=["testing0", "DoesNotExist"] ) assert ( "At least one of the configuration aggregators does not exist." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) assert ( - ce.exception.response["Error"]["Code"] - == "NoSuchConfigurationAggregatorException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) # Get the normal list: @@ -546,12 +545,10 @@ def test_describe_configuration_aggregators(): ) # Test with an invalid filter: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_aggregators(NextToken="WRONG") - assert ( - "The nextToken provided is invalid" == ce.exception.response["Error"]["Message"] - ) - assert ce.exception.response["Error"]["Code"] == "InvalidNextTokenException" + assert "The nextToken provided is invalid" == ce.value.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidNextTokenException" @mock_config @@ -559,7 +556,7 @@ def test_put_aggregation_authorization(): client = boto3.client("config", region_name="us-west-2") # Too many tags (>50): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -569,12 +566,12 @@ def test_put_aggregation_authorization(): ) assert ( "Member must have length less than or equal to 50" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Tag key is too big (>128 chars): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -582,12 +579,12 @@ def test_put_aggregation_authorization(): ) assert ( "Member must have length less than or equal to 128" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Tag value is too big (>256 chars): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -595,22 +592,22 @@ def test_put_aggregation_authorization(): ) assert ( "Member must have length less than or equal to 256" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Duplicate Tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", Tags=[{"Key": "a", "Value": "a"}, {"Key": "a", "Value": "a"}], ) - assert "Duplicate tag keys found." in ce.exception.response["Error"]["Message"] - assert ce.exception.response["Error"]["Code"] == "InvalidInput" + assert "Duplicate tag keys found." in ce.value.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidInput" # Invalid characters in the tag key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -618,9 +615,9 @@ def test_put_aggregation_authorization(): ) assert ( "Member must satisfy regular expression pattern:" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Put a normal one there: result = client.put_aggregation_authorization( @@ -703,12 +700,10 @@ def test_describe_aggregation_authorizations(): ] == ["{}".format(str(x) * 12) for x in range(8, 10)] # Test with an invalid filter: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_aggregation_authorizations(NextToken="WRONG") - assert ( - "The nextToken provided is invalid" == ce.exception.response["Error"]["Message"] - ) - assert ce.exception.response["Error"]["Code"] == "InvalidNextTokenException" + assert "The nextToken provided is invalid" == ce.value.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidNextTokenException" @mock_config @@ -746,15 +741,14 @@ def test_delete_configuration_aggregator(): client.delete_configuration_aggregator(ConfigurationAggregatorName="testing") # And again to confirm that it's deleted: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_configuration_aggregator(ConfigurationAggregatorName="testing") assert ( "The configuration aggregator does not exist." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) assert ( - ce.exception.response["Error"]["Code"] - == "NoSuchConfigurationAggregatorException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) @@ -791,22 +785,18 @@ def test_describe_configurations(): ) # Specify an incorrect name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_recorders(ConfigurationRecorderNames=["wrong"]) - assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) - assert "wrong" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + assert "wrong" in ce.value.response["Error"]["Message"] # And with both a good and wrong name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_recorders( ConfigurationRecorderNames=["testrecorder", "wrong"] ) - assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) - assert "wrong" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + assert "wrong" in ce.value.response["Error"]["Message"] @mock_config @@ -814,14 +804,14 @@ def test_delivery_channels(): client = boto3.client("config", region_name="us-west-2") # Try without a config recorder: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={}) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "NoAvailableConfigurationRecorderException" ) assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "Configuration recorder is not available to " "put delivery channel." ) @@ -840,43 +830,41 @@ def test_delivery_channels(): ) # Try without a name supplied: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={}) - assert ( - ce.exception.response["Error"]["Code"] == "InvalidDeliveryChannelNameException" - ) - assert "is not valid, blank string." in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidDeliveryChannelNameException" + assert "is not valid, blank string." in ce.value.response["Error"]["Message"] # Try with a really long name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={"name": "a" * 257}) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" assert ( "Member must have length less than or equal to 256" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Without specifying a bucket name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={"name": "testchannel"}) - assert ce.exception.response["Error"]["Code"] == "NoSuchBucketException" + assert ce.value.response["Error"]["Code"] == "NoSuchBucketException" assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "Cannot find a S3 bucket with an empty bucket name." ) - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={"name": "testchannel", "s3BucketName": ""} ) - assert ce.exception.response["Error"]["Code"] == "NoSuchBucketException" + assert ce.value.response["Error"]["Code"] == "NoSuchBucketException" assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "Cannot find a S3 bucket with an empty bucket name." ) # With an empty string for the S3 key prefix: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={ "name": "testchannel", @@ -884,11 +872,11 @@ def test_delivery_channels(): "s3KeyPrefix": "", } ) - assert ce.exception.response["Error"]["Code"] == "InvalidS3KeyPrefixException" - assert "empty s3 key prefix." in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidS3KeyPrefixException" + assert "empty s3 key prefix." in ce.value.response["Error"]["Message"] # With an empty string for the SNS ARN: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={ "name": "testchannel", @@ -896,11 +884,11 @@ def test_delivery_channels(): "snsTopicARN": "", } ) - assert ce.exception.response["Error"]["Code"] == "InvalidSNSTopicARNException" - assert "The sns topic arn" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidSNSTopicARNException" + assert "The sns topic arn" in ce.value.response["Error"]["Message"] # With an invalid delivery frequency: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={ "name": "testchannel", @@ -908,9 +896,9 @@ def test_delivery_channels(): "configSnapshotDeliveryProperties": {"deliveryFrequency": "WRONG"}, } ) - assert ce.exception.response["Error"]["Code"] == "InvalidDeliveryFrequency" - assert "WRONG" in ce.exception.response["Error"]["Message"] - assert "TwentyFour_Hours" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidDeliveryFrequency" + assert "WRONG" in ce.value.response["Error"]["Message"] + assert "TwentyFour_Hours" in ce.value.response["Error"]["Message"] # Create a proper one: client.put_delivery_channel( @@ -945,17 +933,17 @@ def test_delivery_channels(): ) # Can only have 1: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={"name": "testchannel2", "s3BucketName": "somebucket"} ) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "MaxNumberOfDeliveryChannelsExceededException" ) assert ( "because the maximum number of delivery channels: 1 is reached." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) @@ -1010,16 +998,16 @@ def test_describe_delivery_channels(): ) # Specify an incorrect name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_delivery_channels(DeliveryChannelNames=["wrong"]) - assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" - assert "wrong" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "NoSuchDeliveryChannelException" + assert "wrong" in ce.value.response["Error"]["Message"] # And with both a good and wrong name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_delivery_channels(DeliveryChannelNames=["testchannel", "wrong"]) - assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" - assert "wrong" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "NoSuchDeliveryChannelException" + assert "wrong" in ce.value.response["Error"]["Message"] @mock_config @@ -1027,11 +1015,9 @@ def test_start_configuration_recorder(): client = boto3.client("config", region_name="us-west-2") # Without a config recorder: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") - assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" # Make the config recorder; client.put_configuration_recorder( @@ -1047,11 +1033,9 @@ def test_start_configuration_recorder(): ) # Without a delivery channel: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") - assert ( - ce.exception.response["Error"]["Code"] == "NoAvailableDeliveryChannelException" - ) + assert ce.value.response["Error"]["Code"] == "NoAvailableDeliveryChannelException" # Make the delivery channel: client.put_delivery_channel( @@ -1085,11 +1069,9 @@ def test_stop_configuration_recorder(): client = boto3.client("config", region_name="us-west-2") # Without a config recorder: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.stop_configuration_recorder(ConfigurationRecorderName="testrecorder") - assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" # Make the config recorder; client.put_configuration_recorder( @@ -1175,14 +1157,12 @@ def test_describe_configuration_recorder_status(): assert not result[0]["recording"] # Invalid name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_recorder_status( ConfigurationRecorderNames=["testrecorder", "wrong"] ) - assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) - assert "wrong" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + assert "wrong" in ce.value.response["Error"]["Message"] @mock_config @@ -1206,11 +1186,9 @@ def test_delete_configuration_recorder(): client.delete_configuration_recorder(ConfigurationRecorderName="testrecorder") # Try again -- it should be deleted: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_configuration_recorder(ConfigurationRecorderName="testrecorder") - assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" @mock_config @@ -1235,15 +1213,14 @@ def test_delete_delivery_channel(): client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") # With the recorder enabled: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_delivery_channel(DeliveryChannelName="testchannel") assert ( - ce.exception.response["Error"]["Code"] - == "LastDeliveryChannelDeleteFailedException" + ce.value.response["Error"]["Code"] == "LastDeliveryChannelDeleteFailedException" ) assert ( "because there is a running configuration recorder." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Stop recording: @@ -1253,16 +1230,16 @@ def test_delete_delivery_channel(): client.delete_delivery_channel(DeliveryChannelName="testchannel") # Verify: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_delivery_channel(DeliveryChannelName="testchannel") - assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" + assert ce.value.response["Error"]["Code"] == "NoSuchDeliveryChannelException" @mock_config @mock_s3 def test_list_discovered_resource(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "list_config_service_resources" function. + for that individual service's "list_config_service_resources" function. """ client = boto3.client("config", region_name="us-west-2") @@ -1336,12 +1313,12 @@ def test_list_discovered_resource(): )["resourceIdentifiers"] # Test with an invalid page num > 100: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_discovered_resources(resourceType="AWS::S3::Bucket", limit=101) - assert "101" in ce.exception.response["Error"]["Message"] + assert "101" in ce.value.response["Error"]["Message"] # Test by supplying both resourceName and also resourceIds: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_discovered_resources( resourceType="AWS::S3::Bucket", resourceName="whats", @@ -1349,18 +1326,18 @@ def test_list_discovered_resource(): ) assert ( "Both Resource ID and Resource Name cannot be specified in the request" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # More than 20 resourceIds: resource_ids = ["{}".format(x) for x in range(0, 21)] - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_discovered_resources( resourceType="AWS::S3::Bucket", resourceIds=resource_ids ) assert ( "The specified list had more than 20 resource ID's." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) @@ -1368,18 +1345,18 @@ def test_list_discovered_resource(): @mock_s3 def test_list_aggregate_discovered_resource(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "list_config_service_resources" function. + for that individual service's "list_config_service_resources" function. """ client = boto3.client("config", region_name="us-west-2") # Without an aggregator: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_aggregate_discovered_resources( ConfigurationAggregatorName="lolno", ResourceType="AWS::S3::Bucket" ) assert ( "The configuration aggregator does not exist" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Create the aggregator: @@ -1499,40 +1476,40 @@ def test_list_aggregate_discovered_resource(): )["ResourceIdentifiers"] # Test with an invalid page num > 100: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_aggregate_discovered_resources( ConfigurationAggregatorName="testing", ResourceType="AWS::S3::Bucket", Limit=101, ) - assert "101" in ce.exception.response["Error"]["Message"] + assert "101" in ce.value.response["Error"]["Message"] @mock_config @mock_s3 def test_get_resource_config_history(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "get_config_resource" function. + for that individual service's "get_config_resource" function. """ client = boto3.client("config", region_name="us-west-2") # With an invalid resource type: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_resource_config_history( resourceType="NOT::A::RESOURCE", resourceId="notcreatedyet" ) - assert ce.exception.response["Error"] == { + assert ce.value.response["Error"] == { "Message": "Resource notcreatedyet of resourceType:NOT::A::RESOURCE is unknown or has " "not been discovered", "Code": "ResourceNotDiscoveredException", } # With nothing created yet: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="notcreatedyet" ) - assert ce.exception.response["Error"] == { + assert ce.value.response["Error"] == { "Message": "Resource notcreatedyet of resourceType:AWS::S3::Bucket is unknown or has " "not been discovered", "Code": "ResourceNotDiscoveredException", @@ -1560,23 +1537,23 @@ def test_get_resource_config_history(): Bucket="eu-bucket", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}, ) - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="eu-bucket" ) - assert ce.exception.response["Error"]["Code"] == "ResourceNotDiscoveredException" + assert ce.value.response["Error"]["Code"] == "ResourceNotDiscoveredException" @mock_config @mock_s3 def test_batch_get_resource_config(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "get_config_resource" function. + for that individual service's "get_config_resource" function. """ client = boto3.client("config", region_name="us-west-2") # With more than 100 resourceKeys: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.batch_get_resource_config( resourceKeys=[ {"resourceType": "AWS::S3::Bucket", "resourceId": "someBucket"} @@ -1585,7 +1562,7 @@ def test_batch_get_resource_config(): ) assert ( "Member must have length less than or equal to 100" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With invalid resource types and resources that don't exist: @@ -1635,7 +1612,7 @@ def test_batch_get_resource_config(): @mock_s3 def test_batch_get_aggregate_resource_config(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "get_config_resource" function. + for that individual service's "get_config_resource" function. """ from moto.config.models import DEFAULT_ACCOUNT_ID @@ -1648,13 +1625,13 @@ def test_batch_get_aggregate_resource_config(): "ResourceType": "NOT::A::RESOURCE", "ResourceId": "nope", } - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.batch_get_aggregate_resource_config( ConfigurationAggregatorName="lolno", ResourceIdentifiers=[bad_ri] ) assert ( "The configuration aggregator does not exist" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Create the aggregator: @@ -1668,13 +1645,13 @@ def test_batch_get_aggregate_resource_config(): ) # With more than 100 items: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.batch_get_aggregate_resource_config( ConfigurationAggregatorName="testing", ResourceIdentifiers=[bad_ri] * 101 ) assert ( "Member must have length less than or equal to 100" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Create some S3 buckets: @@ -1802,3 +1779,382 @@ def test_batch_get_aggregate_resource_config(): len(result["UnprocessedResourceIdentifiers"]) == 1 and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1" ) + + +@mock_config +def test_put_evaluations(): + client = boto3.client("config", region_name="us-west-2") + + # Try without Evaluations supplied: + with pytest.raises(ClientError) as ce: + client.put_evaluations(Evaluations=[], ResultToken="test", TestMode=True) + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" + assert ( + "The Evaluations object in your request cannot be null" + in ce.value.response["Error"]["Message"] + ) + + # Try without a ResultToken supplied: + with pytest.raises(ClientError) as ce: + client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + ResultToken="", + TestMode=True, + ) + assert ce.value.response["Error"]["Code"] == "InvalidResultTokenException" + + if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + raise SkipTest("Does not work in server mode due to error in Workzeug") + else: + # Try without TestMode supplied: + with pytest.raises(NotImplementedError): + client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + ResultToken="test", + ) + + # Now with proper params: + response = client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + TestMode=True, + ResultToken="test", + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + {"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},} + ) + + +@mock_config +def test_put_organization_conformance_pack(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + response = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + ) + + # then + arn = response["OrganizationConformancePackArn"] + arn.should.match( + r"arn:aws:config:us-east-1:\d{12}:organization-conformance-pack/test-pack-\w{8}" + ) + + # putting an organization conformance pack with the same name should result in an update + # when + response = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack-2.yaml", + ) + + # then + response["OrganizationConformancePackArn"].should.equal(arn) + + +@mock_config +def test_put_organization_conformance_pack_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with pytest.raises(ClientError) as e: + client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + ) + + # then + ex = e.value + ex.operation_name.should.equal("PutOrganizationConformancePack") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ValidationException") + ex.response["Error"]["Message"].should.equal("Template body is invalid") + + # when + with pytest.raises(ClientError) as e: + client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="invalid-s3-uri", + ) + + # then + ex = e.value + ex.operation_name.should.equal("PutOrganizationConformancePack") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ValidationException") + ex.response["Error"]["Message"].should.equal( + "1 validation error detected: " + "Value 'invalid-s3-uri' at 'templateS3Uri' failed to satisfy constraint: " + "Member must satisfy regular expression pattern: " + "s3://.*" + ) + + +@mock_config +def test_describe_organization_conformance_packs(): + # given + client = boto3.client("config", region_name="us-east-1") + arn = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + )["OrganizationConformancePackArn"] + + # when + response = client.describe_organization_conformance_packs( + OrganizationConformancePackNames=["test-pack"] + ) + + # then + response["OrganizationConformancePacks"].should.have.length_of(1) + pack = response["OrganizationConformancePacks"][0] + pack["OrganizationConformancePackName"].should.equal("test-pack") + pack["OrganizationConformancePackArn"].should.equal(arn) + pack["DeliveryS3Bucket"].should.equal("awsconfigconforms-test-bucket") + pack["ConformancePackInputParameters"].should.have.length_of(0) + pack["ExcludedAccounts"].should.have.length_of(0) + pack["LastUpdateTime"].should.be.a("datetime.datetime") + + +@mock_config +def test_describe_organization_conformance_packs_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with pytest.raises(ClientError) as e: + client.describe_organization_conformance_packs( + OrganizationConformancePackNames=["not-existing"] + ) + + # then + ex = e.value + ex.operation_name.should.equal("DescribeOrganizationConformancePacks") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain( + "NoSuchOrganizationConformancePackException" + ) + ex.response["Error"]["Message"].should.equal( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + +@mock_config +def test_describe_organization_conformance_pack_statuses(): + # given + client = boto3.client("config", region_name="us-east-1") + arn = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + )["OrganizationConformancePackArn"] + + # when + response = client.describe_organization_conformance_pack_statuses( + OrganizationConformancePackNames=["test-pack"] + ) + + # then + response["OrganizationConformancePackStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackStatuses"][0] + status["OrganizationConformancePackName"].should.equal("test-pack") + status["Status"].should.equal("CREATE_SUCCESSFUL") + update_time = status["LastUpdateTime"] + update_time.should.be.a("datetime.datetime") + + # when + response = client.describe_organization_conformance_pack_statuses() + + # then + response["OrganizationConformancePackStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackStatuses"][0] + status["OrganizationConformancePackName"].should.equal("test-pack") + status["Status"].should.equal("CREATE_SUCCESSFUL") + status["LastUpdateTime"].should.equal(update_time) + + # when + time.sleep(1) + client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack-2.yaml", + ) + + # then + response = client.describe_organization_conformance_pack_statuses( + OrganizationConformancePackNames=["test-pack"] + ) + response["OrganizationConformancePackStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackStatuses"][0] + status["OrganizationConformancePackName"].should.equal("test-pack") + status["Status"].should.equal("UPDATE_SUCCESSFUL") + status["LastUpdateTime"].should.be.greater_than(update_time) + + +@mock_config +def test_describe_organization_conformance_pack_statuses_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with pytest.raises(ClientError) as e: + client.describe_organization_conformance_pack_statuses( + OrganizationConformancePackNames=["not-existing"] + ) + + # then + ex = e.value + ex.operation_name.should.equal("DescribeOrganizationConformancePackStatuses") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain( + "NoSuchOrganizationConformancePackException" + ) + ex.response["Error"]["Message"].should.equal( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + +@mock_config +def test_get_organization_conformance_pack_detailed_status(): + # given + client = boto3.client("config", region_name="us-east-1") + arn = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + )["OrganizationConformancePackArn"] + + # when + response = client.get_organization_conformance_pack_detailed_status( + OrganizationConformancePackName="test-pack" + ) + + # then + response["OrganizationConformancePackDetailedStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackDetailedStatuses"][0] + status["AccountId"].should.equal(ACCOUNT_ID) + status["ConformancePackName"].should.equal( + "OrgConformsPack-{}".format(arn[arn.rfind("/") + 1 :]) + ) + status["Status"].should.equal("CREATE_SUCCESSFUL") + update_time = status["LastUpdateTime"] + update_time.should.be.a("datetime.datetime") + + # when + time.sleep(1) + client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack-2.yaml", + ) + + # then + response = client.get_organization_conformance_pack_detailed_status( + OrganizationConformancePackName="test-pack" + ) + response["OrganizationConformancePackDetailedStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackDetailedStatuses"][0] + status["AccountId"].should.equal(ACCOUNT_ID) + status["ConformancePackName"].should.equal( + "OrgConformsPack-{}".format(arn[arn.rfind("/") + 1 :]) + ) + status["Status"].should.equal("UPDATE_SUCCESSFUL") + status["LastUpdateTime"].should.be.greater_than(update_time) + + +@mock_config +def test_get_organization_conformance_pack_detailed_status_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with pytest.raises(ClientError) as e: + client.get_organization_conformance_pack_detailed_status( + OrganizationConformancePackName="not-existing" + ) + + # then + ex = e.value + ex.operation_name.should.equal("GetOrganizationConformancePackDetailedStatus") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain( + "NoSuchOrganizationConformancePackException" + ) + ex.response["Error"]["Message"].should.equal( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + +@mock_config +def test_delete_organization_conformance_pack(): + # given + client = boto3.client("config", region_name="us-east-1") + arn = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + )["OrganizationConformancePackArn"] + + # when + response = client.delete_organization_conformance_pack( + OrganizationConformancePackName="test-pack" + ) + + # then + response = client.describe_organization_conformance_pack_statuses() + response["OrganizationConformancePackStatuses"].should.have.length_of(0) + + +@mock_config +def test_delete_organization_conformance_pack_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with pytest.raises(ClientError) as e: + client.delete_organization_conformance_pack( + OrganizationConformancePackName="not-existing" + ) + + # then + ex = e.value + ex.operation_name.should.equal("DeleteOrganizationConformancePack") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain( + "NoSuchOrganizationConformancePackException" + ) + ex.response["Error"]["Message"].should.equal( + "Could not find an OrganizationConformancePack for given request with resourceName not-existing" + ) diff --git a/tests/test_core/__init__.py b/tests/test_core/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_core/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index a8fde5d8c..b6fc8a135 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -4,9 +4,7 @@ import boto3 import sure # noqa from botocore.exceptions import ClientError -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest from moto import mock_iam, mock_ec2, mock_s3, mock_sts, mock_elbv2, mock_rds2 from moto.core import set_initial_no_auth_action_count @@ -179,11 +177,11 @@ def test_invalid_client_token_id(): aws_access_key_id="invalid", aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_user() - ex.exception.response["Error"]["Code"].should.equal("InvalidClientTokenId") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidClientTokenId") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "The security token included in the request is invalid." ) @@ -197,11 +195,11 @@ def test_auth_failure(): aws_access_key_id="invalid", aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instances() - ex.exception.response["Error"]["Code"].should.equal("AuthFailure") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AuthFailure") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) + ex.value.response["Error"]["Message"].should.equal( "AWS was not able to validate the provided access credentials" ) @@ -216,11 +214,11 @@ def test_signature_does_not_match(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_user() - ex.exception.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "The request signature we calculated does not match the signature you provided. Check your AWS Secret Access Key and signing method. Consult the service documentation for details." ) @@ -235,11 +233,11 @@ def test_auth_failure_with_valid_access_key_id(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instances() - ex.exception.response["Error"]["Code"].should.equal("AuthFailure") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AuthFailure") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) + ex.value.response["Error"]["Message"].should.equal( "AWS was not able to validate the provided access credentials" ) @@ -255,11 +253,11 @@ def test_access_denied_with_no_policy(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instances() - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, user_name=user_name, @@ -271,6 +269,40 @@ def test_access_denied_with_no_policy(): @set_initial_no_auth_action_count(3) @mock_ec2 def test_access_denied_with_not_allowing_policy(): + user_name = "test-user" + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [{"Effect": "Allow", "Action": ["ec2:Run*"], "Resource": "*"}], + } + access_key = create_user_with_access_key_and_inline_policy( + user_name, inline_policy_document + ) + client = boto3.client( + "ec2", + region_name="us-east-1", + aws_access_key_id=access_key["AccessKeyId"], + aws_secret_access_key=access_key["SecretAccessKey"], + ) + with pytest.raises(ClientError) as ex: + client.describe_instances() + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( + "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:DescribeInstances", + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_ec2 +def test_access_denied_for_run_instances(): + # https://github.com/spulec/moto/issues/2774 + # The run-instances method was broken between botocore versions 1.15.8 and 1.15.12 + # This was due to the inclusion of '"idempotencyToken":true' in the response, somehow altering the signature and breaking the authentication + # Keeping this test in place in case botocore decides to break again user_name = "test-user" inline_policy_document = { "Version": "2012-10-17", @@ -287,13 +319,13 @@ def test_access_denied_with_not_allowing_policy(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.run_instances(MaxCount=1, MinCount=1) - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances" + account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances", ) ) @@ -318,11 +350,11 @@ def test_access_denied_with_denying_policy(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_vpc(CidrBlock="10.0.0.0/16") - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:CreateVpc" ) @@ -418,11 +450,11 @@ def test_s3_access_denied_with_denying_attached_group_policy(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.list_buckets() - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal("Access Denied") + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal("Access Denied") @set_initial_no_auth_action_count(6) @@ -452,11 +484,11 @@ def test_s3_access_denied_with_denying_inline_group_policy(): aws_secret_access_key=access_key["SecretAccessKey"], ) client.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_object(Bucket=bucket_name, Key="sdfsdf") - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal("Access Denied") + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal("Access Denied") @set_initial_no_auth_action_count(10) @@ -498,11 +530,11 @@ def test_access_denied_with_many_irrelevant_policies(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_key_pair(KeyName="TestKey") - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:CreateKeyPair" ) @@ -597,15 +629,15 @@ def test_access_denied_with_temporary_credentials(): aws_secret_access_key=credentials["SecretAccessKey"], aws_session_token=credentials["SessionToken"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_db_instance( DBInstanceIdentifier="test-db-instance", DBInstanceClass="db.t3", Engine="aurora-postgresql", ) - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, role_name=role_name, @@ -644,11 +676,11 @@ def test_s3_invalid_access_key_id(): aws_access_key_id="invalid", aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.list_buckets() - ex.exception.response["Error"]["Code"].should.equal("InvalidAccessKeyId") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidAccessKeyId") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "The AWS Access Key Id you provided does not exist in our records." ) @@ -666,11 +698,11 @@ def test_s3_signature_does_not_match(): aws_secret_access_key="invalid", ) client.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_object(Bucket=bucket_name, Key="abc") - ex.exception.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "The request signature we calculated does not match the signature you provided. Check your key and signing method." ) @@ -702,11 +734,11 @@ def test_s3_access_denied_not_action(): aws_secret_access_key=access_key["SecretAccessKey"], ) client.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.delete_object(Bucket=bucket_name, Key="sdfsdf") - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal("Access Denied") + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal("Access Denied") @set_initial_no_auth_action_count(4) @@ -742,10 +774,10 @@ def test_s3_invalid_token_with_temporary_credentials(): aws_session_token="invalid", ) client.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.list_bucket_metrics_configurations(Bucket=bucket_name) - ex.exception.response["Error"]["Code"].should.equal("InvalidToken") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidToken") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "The provided token is malformed or otherwise invalid." ) diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 408ca6819..c57d62485 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -4,8 +4,7 @@ from boto.exception import EC2ResponseError import sure # noqa import unittest -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +import pytest from moto import mock_ec2_deprecated, mock_s3_deprecated @@ -25,23 +24,25 @@ def test_basic_decorator(): list(conn.get_all_instances()).should.equal([]) +@pytest.mark.network def test_context_manager(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn.get_all_instances() with mock_ec2_deprecated(): conn = boto.connect_ec2("the_key", "the_secret") list(conn.get_all_instances()).should.equal([]) - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn = boto.connect_ec2("the_key", "the_secret") conn.get_all_instances() +@pytest.mark.network def test_decorator_start_and_stop(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn.get_all_instances() mock = mock_ec2_deprecated() @@ -50,7 +51,7 @@ def test_decorator_start_and_stop(): list(conn.get_all_instances()).should.equal([]) mock.stop() - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn.get_all_instances() diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index d30138d5d..9870f0df5 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import sure # noqa -from nose.tools import assert_raises +import pytest import requests from moto import mock_ec2, settings diff --git a/tests/test_core/test_moto_api.py b/tests/test_core/test_moto_api.py index 6482d903e..648510475 100644 --- a/tests/test_core/test_moto_api.py +++ b/tests/test_core/test_moto_api.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import sure # noqa -from nose.tools import assert_raises +import pytest import requests import boto3 diff --git a/tests/test_core/test_request_mocking.py b/tests/test_core/test_request_mocking.py index 2c44d52ce..3c56c7242 100644 --- a/tests/test_core/test_request_mocking.py +++ b/tests/test_core/test_request_mocking.py @@ -1,4 +1,5 @@ import requests +import pytest import sure # noqa import boto3 @@ -6,6 +7,7 @@ from moto import mock_sqs, settings @mock_sqs +@pytest.mark.network def test_passthrough_requests(): conn = boto3.client("sqs", region_name="us-west-1") conn.create_queue(QueueName="queue1") diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index 5514223af..205a2ad0f 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -46,4 +46,4 @@ def test_domain_dispatched_with_service(): dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"}) keys = set(backend_app.view_functions.keys()) - keys.should.contain("ResponseObject.key_response") + keys.should.contain("ResponseObject.key_or_control_response") diff --git a/tests/test_core/test_url_mapping.py b/tests/test_core/test_url_mapping.py index 4dccc4f21..9d0632e05 100644 --- a/tests/test_core/test_url_mapping.py +++ b/tests/test_core/test_url_mapping.py @@ -14,10 +14,10 @@ def test_flask_path_converting_simple(): def test_flask_path_converting_regex(): - convert_regex_to_flask_path("/(?P[a-zA-Z0-9\-_]+)").should.equal( - '/' + convert_regex_to_flask_path(r"/(?P[a-zA-Z0-9\-_]+)").should.equal( + r'/' ) - convert_regex_to_flask_path("(?P\d+)/(?P.*)$").should.equal( - '/' - ) + convert_regex_to_flask_path( + r"(?P\d+)/(?P.*)$" + ).should.equal(r'/') diff --git a/tests/test_datapipeline/__init__.py b/tests/test_datapipeline/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_datapipeline/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_datapipeline/test_datapipeline.py b/tests/test_datapipeline/test_datapipeline.py index 42063b506..b540d120e 100644 --- a/tests/test_datapipeline/test_datapipeline.py +++ b/tests/test_datapipeline/test_datapipeline.py @@ -1,182 +1,182 @@ -from __future__ import unicode_literals - -import boto.datapipeline -import sure # noqa - -from moto import mock_datapipeline_deprecated -from moto.datapipeline.utils import remove_capitalization_of_dict_keys - - -def get_value_from_fields(key, fields): - for field in fields: - if field["key"] == key: - return field["stringValue"] - - -@mock_datapipeline_deprecated -def test_create_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - - res = conn.create_pipeline("mypipeline", "some-unique-id") - - pipeline_id = res["pipelineId"] - pipeline_descriptions = conn.describe_pipelines([pipeline_id])[ - "pipelineDescriptionList" - ] - pipeline_descriptions.should.have.length_of(1) - - pipeline_description = pipeline_descriptions[0] - pipeline_description["name"].should.equal("mypipeline") - pipeline_description["pipelineId"].should.equal(pipeline_id) - fields = pipeline_description["fields"] - - get_value_from_fields("@pipelineState", fields).should.equal("PENDING") - get_value_from_fields("uniqueId", fields).should.equal("some-unique-id") - - -PIPELINE_OBJECTS = [ - { - "id": "Default", - "name": "Default", - "fields": [{"key": "workerGroup", "stringValue": "workerGroup"}], - }, - { - "id": "Schedule", - "name": "Schedule", - "fields": [ - {"key": "startDateTime", "stringValue": "2012-12-12T00:00:00"}, - {"key": "type", "stringValue": "Schedule"}, - {"key": "period", "stringValue": "1 hour"}, - {"key": "endDateTime", "stringValue": "2012-12-21T18:00:00"}, - ], - }, - { - "id": "SayHello", - "name": "SayHello", - "fields": [ - {"key": "type", "stringValue": "ShellCommandActivity"}, - {"key": "command", "stringValue": "echo hello"}, - {"key": "parent", "refValue": "Default"}, - {"key": "schedule", "refValue": "Schedule"}, - ], - }, -] - - -@mock_datapipeline_deprecated -def test_creating_pipeline_definition(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) - - pipeline_definition = conn.get_pipeline_definition(pipeline_id) - pipeline_definition["pipelineObjects"].should.have.length_of(3) - default_object = pipeline_definition["pipelineObjects"][0] - default_object["name"].should.equal("Default") - default_object["id"].should.equal("Default") - default_object["fields"].should.equal( - [{"key": "workerGroup", "stringValue": "workerGroup"}] - ) - - -@mock_datapipeline_deprecated -def test_describing_pipeline_objects(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) - - objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[ - "pipelineObjects" - ] - - objects.should.have.length_of(2) - default_object = [x for x in objects if x["id"] == "Default"][0] - default_object["name"].should.equal("Default") - default_object["fields"].should.equal( - [{"key": "workerGroup", "stringValue": "workerGroup"}] - ) - - -@mock_datapipeline_deprecated -def test_activate_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - - res = conn.create_pipeline("mypipeline", "some-unique-id") - - pipeline_id = res["pipelineId"] - conn.activate_pipeline(pipeline_id) - - pipeline_descriptions = conn.describe_pipelines([pipeline_id])[ - "pipelineDescriptionList" - ] - pipeline_descriptions.should.have.length_of(1) - pipeline_description = pipeline_descriptions[0] - fields = pipeline_description["fields"] - - get_value_from_fields("@pipelineState", fields).should.equal("SCHEDULED") - - -@mock_datapipeline_deprecated -def test_delete_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.delete_pipeline(pipeline_id) - - response = conn.list_pipelines() - - response["pipelineIdList"].should.have.length_of(0) - - -@mock_datapipeline_deprecated -def test_listing_pipelines(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res1 = conn.create_pipeline("mypipeline1", "some-unique-id1") - res2 = conn.create_pipeline("mypipeline2", "some-unique-id2") - - response = conn.list_pipelines() - - response["hasMoreResults"].should.be(False) - response["marker"].should.be.none - response["pipelineIdList"].should.have.length_of(2) - response["pipelineIdList"].should.contain( - {"id": res1["pipelineId"], "name": "mypipeline1"} - ) - response["pipelineIdList"].should.contain( - {"id": res2["pipelineId"], "name": "mypipeline2"} - ) - - -@mock_datapipeline_deprecated -def test_listing_paginated_pipelines(): - conn = boto.datapipeline.connect_to_region("us-west-2") - for i in range(100): - conn.create_pipeline("mypipeline%d" % i, "some-unique-id%d" % i) - - response = conn.list_pipelines() - - response["hasMoreResults"].should.be(True) - response["marker"].should.equal(response["pipelineIdList"][-1]["id"]) - response["pipelineIdList"].should.have.length_of(50) - - -# testing a helper function -def test_remove_capitalization_of_dict_keys(): - result = remove_capitalization_of_dict_keys( - { - "Id": "IdValue", - "Fields": [{"Key": "KeyValue", "StringValue": "StringValueValue"}], - } - ) - - result.should.equal( - { - "id": "IdValue", - "fields": [{"key": "KeyValue", "stringValue": "StringValueValue"}], - } - ) +from __future__ import unicode_literals + +import boto.datapipeline +import sure # noqa + +from moto import mock_datapipeline_deprecated +from moto.datapipeline.utils import remove_capitalization_of_dict_keys + + +def get_value_from_fields(key, fields): + for field in fields: + if field["key"] == key: + return field["stringValue"] + + +@mock_datapipeline_deprecated +def test_create_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + + res = conn.create_pipeline("mypipeline", "some-unique-id") + + pipeline_id = res["pipelineId"] + pipeline_descriptions = conn.describe_pipelines([pipeline_id])[ + "pipelineDescriptionList" + ] + pipeline_descriptions.should.have.length_of(1) + + pipeline_description = pipeline_descriptions[0] + pipeline_description["name"].should.equal("mypipeline") + pipeline_description["pipelineId"].should.equal(pipeline_id) + fields = pipeline_description["fields"] + + get_value_from_fields("@pipelineState", fields).should.equal("PENDING") + get_value_from_fields("uniqueId", fields).should.equal("some-unique-id") + + +PIPELINE_OBJECTS = [ + { + "id": "Default", + "name": "Default", + "fields": [{"key": "workerGroup", "stringValue": "workerGroup"}], + }, + { + "id": "Schedule", + "name": "Schedule", + "fields": [ + {"key": "startDateTime", "stringValue": "2012-12-12T00:00:00"}, + {"key": "type", "stringValue": "Schedule"}, + {"key": "period", "stringValue": "1 hour"}, + {"key": "endDateTime", "stringValue": "2012-12-21T18:00:00"}, + ], + }, + { + "id": "SayHello", + "name": "SayHello", + "fields": [ + {"key": "type", "stringValue": "ShellCommandActivity"}, + {"key": "command", "stringValue": "echo hello"}, + {"key": "parent", "refValue": "Default"}, + {"key": "schedule", "refValue": "Schedule"}, + ], + }, +] + + +@mock_datapipeline_deprecated +def test_creating_pipeline_definition(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) + + pipeline_definition = conn.get_pipeline_definition(pipeline_id) + pipeline_definition["pipelineObjects"].should.have.length_of(3) + default_object = pipeline_definition["pipelineObjects"][0] + default_object["name"].should.equal("Default") + default_object["id"].should.equal("Default") + default_object["fields"].should.equal( + [{"key": "workerGroup", "stringValue": "workerGroup"}] + ) + + +@mock_datapipeline_deprecated +def test_describing_pipeline_objects(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) + + objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[ + "pipelineObjects" + ] + + objects.should.have.length_of(2) + default_object = [x for x in objects if x["id"] == "Default"][0] + default_object["name"].should.equal("Default") + default_object["fields"].should.equal( + [{"key": "workerGroup", "stringValue": "workerGroup"}] + ) + + +@mock_datapipeline_deprecated +def test_activate_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + + res = conn.create_pipeline("mypipeline", "some-unique-id") + + pipeline_id = res["pipelineId"] + conn.activate_pipeline(pipeline_id) + + pipeline_descriptions = conn.describe_pipelines([pipeline_id])[ + "pipelineDescriptionList" + ] + pipeline_descriptions.should.have.length_of(1) + pipeline_description = pipeline_descriptions[0] + fields = pipeline_description["fields"] + + get_value_from_fields("@pipelineState", fields).should.equal("SCHEDULED") + + +@mock_datapipeline_deprecated +def test_delete_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.delete_pipeline(pipeline_id) + + response = conn.list_pipelines() + + response["pipelineIdList"].should.have.length_of(0) + + +@mock_datapipeline_deprecated +def test_listing_pipelines(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res1 = conn.create_pipeline("mypipeline1", "some-unique-id1") + res2 = conn.create_pipeline("mypipeline2", "some-unique-id2") + + response = conn.list_pipelines() + + response["hasMoreResults"].should.be(False) + response["marker"].should.be.none + response["pipelineIdList"].should.have.length_of(2) + response["pipelineIdList"].should.contain( + {"id": res1["pipelineId"], "name": "mypipeline1"} + ) + response["pipelineIdList"].should.contain( + {"id": res2["pipelineId"], "name": "mypipeline2"} + ) + + +@mock_datapipeline_deprecated +def test_listing_paginated_pipelines(): + conn = boto.datapipeline.connect_to_region("us-west-2") + for i in range(100): + conn.create_pipeline("mypipeline%d" % i, "some-unique-id%d" % i) + + response = conn.list_pipelines() + + response["hasMoreResults"].should.be(True) + response["marker"].should.equal(response["pipelineIdList"][-1]["id"]) + response["pipelineIdList"].should.have.length_of(50) + + +# testing a helper function +def test_remove_capitalization_of_dict_keys(): + result = remove_capitalization_of_dict_keys( + { + "Id": "IdValue", + "Fields": [{"Key": "KeyValue", "StringValue": "StringValueValue"}], + } + ) + + result.should.equal( + { + "id": "IdValue", + "fields": [{"key": "KeyValue", "stringValue": "StringValueValue"}], + } + ) diff --git a/tests/test_datasync/test_datasync.py b/tests/test_datasync/test_datasync.py index e3ea87675..2214032c9 100644 --- a/tests/test_datasync/test_datasync.py +++ b/tests/test_datasync/test_datasync.py @@ -4,11 +4,11 @@ import boto import boto3 from botocore.exceptions import ClientError from moto import mock_datasync -from nose.tools import assert_raises +import pytest def create_locations(client, create_smb=False, create_s3=False): - """ + """ Convenience function for creating locations. Locations must exist before tasks can be created. """ @@ -101,7 +101,7 @@ def test_describe_location_wrong(): Password="", AgentArns=agent_arns, ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_location_s3(LocationArn=response["LocationArn"]) @@ -139,7 +139,7 @@ def test_delete_location(): response = client.list_locations() assert len(response["Locations"]) == 0 - with assert_raises(ClientError) as e: + with pytest.raises(ClientError): response = client.delete_location(LocationArn=location_arn) @@ -159,11 +159,11 @@ def test_create_task_fail(): """ Test that Locations must exist before a Task can be created """ client = boto3.client("datasync", region_name="us-east-1") locations = create_locations(client, create_smb=True, create_s3=True) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.create_task( SourceLocationArn="1", DestinationLocationArn=locations["s3_arn"] ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.create_task( SourceLocationArn=locations["smb_arn"], DestinationLocationArn="2" ) @@ -220,7 +220,7 @@ def test_describe_task(): def test_describe_task_not_exist(): client = boto3.client("datasync", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.describe_task(TaskArn="abc") @@ -262,7 +262,7 @@ def test_update_task(): assert response["Name"] == updated_name assert response["Options"] == updated_options - with assert_raises(ClientError) as e: + with pytest.raises(ClientError): client.update_task(TaskArn="doesnt_exist") @@ -286,7 +286,7 @@ def test_delete_task(): response = client.list_tasks() assert len(response["Tasks"]) == 0 - with assert_raises(ClientError) as e: + with pytest.raises(ClientError): response = client.delete_task(TaskArn=task_arn) @@ -328,7 +328,7 @@ def test_start_task_execution_twice(): assert "TaskExecutionArn" in response task_execution_arn = response["TaskExecutionArn"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.start_task_execution(TaskArn=task_arn) @@ -392,7 +392,7 @@ def test_describe_task_execution(): def test_describe_task_execution_not_exist(): client = boto3.client("datasync", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.describe_task_execution(TaskExecutionArn="abc") diff --git a/tests/test_dynamodb/__init__.py b/tests/test_dynamodb/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_dynamodb/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index 931e57e06..3e1092025 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -4,8 +4,7 @@ import boto import boto.dynamodb import sure # noqa import requests -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest from moto import mock_dynamodb, mock_dynamodb_deprecated from moto.dynamodb import dynamodb_backend @@ -38,7 +37,7 @@ def test_list_tables_layer_1(): @mock_dynamodb_deprecated def test_describe_missing_table(): conn = boto.connect_dynamodb("the_key", "the_secret") - with assert_raises(DynamoDBResponseError): + with pytest.raises(DynamoDBResponseError): conn.describe_table("messages") diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index 40301025f..6986ae9b3 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -1,470 +1,470 @@ -from __future__ import unicode_literals - -import boto -import sure # noqa -from freezegun import freeze_time - -from moto import mock_dynamodb_deprecated - -from boto.dynamodb import condition -from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError -from boto.exception import DynamoDBResponseError - - -def create_table(conn): - message_table_schema = conn.create_schema( - hash_key_name="forum_name", - hash_key_proto_value=str, - range_key_name="subject", - range_key_proto_value=str, - ) - - table = conn.create_table( - name="messages", schema=message_table_schema, read_units=10, write_units=10 - ) - return table - - -@freeze_time("2012-01-14") -@mock_dynamodb_deprecated -def test_create_table(): - conn = boto.connect_dynamodb() - create_table(conn) - - expected = { - "Table": { - "CreationDateTime": 1326499200.0, - "ItemCount": 0, - "KeySchema": { - "HashKeyElement": {"AttributeName": "forum_name", "AttributeType": "S"}, - "RangeKeyElement": {"AttributeName": "subject", "AttributeType": "S"}, - }, - "ProvisionedThroughput": { - "ReadCapacityUnits": 10, - "WriteCapacityUnits": 10, - }, - "TableName": "messages", - "TableSizeBytes": 0, - "TableStatus": "ACTIVE", - } - } - conn.describe_table("messages").should.equal(expected) - - -@mock_dynamodb_deprecated -def test_delete_table(): - conn = boto.connect_dynamodb() - create_table(conn) - conn.list_tables().should.have.length_of(1) - - conn.layer1.delete_table("messages") - conn.list_tables().should.have.length_of(0) - - conn.layer1.delete_table.when.called_with("messages").should.throw( - DynamoDBResponseError - ) - - -@mock_dynamodb_deprecated -def test_update_table_throughput(): - conn = boto.connect_dynamodb() - table = create_table(conn) - table.read_units.should.equal(10) - table.write_units.should.equal(10) - - table.update_throughput(5, 6) - table.refresh() - - table.read_units.should.equal(5) - table.write_units.should.equal(6) - - -@mock_dynamodb_deprecated -def test_item_add_and_describe_and_update(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item( - hash_key="LOLCat Forum", range_key="Check this out!", attrs=item_data - ) - item.put() - - table.has_item("LOLCat Forum", "Check this out!").should.equal(True) - - returned_item = table.get_item( - hash_key="LOLCat Forum", - range_key="Check this out!", - attributes_to_get=["Body", "SentBy"], - ) - dict(returned_item).should.equal( - { - "forum_name": "LOLCat Forum", - "subject": "Check this out!", - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - } - ) - - item["SentBy"] = "User B" - item.put() - - returned_item = table.get_item( - hash_key="LOLCat Forum", - range_key="Check this out!", - attributes_to_get=["Body", "SentBy"], - ) - dict(returned_item).should.equal( - { - "forum_name": "LOLCat Forum", - "subject": "Check this out!", - "Body": "http://url_to_lolcat.gif", - "SentBy": "User B", - } - ) - - -@mock_dynamodb_deprecated -def test_item_put_without_table(): - conn = boto.connect_dynamodb() - - conn.layer1.put_item.when.called_with( - table_name="undeclared-table", - item=dict(hash_key="LOLCat Forum", range_key="Check this out!"), - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_get_missing_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - table.get_item.when.called_with(hash_key="tester", range_key="other").should.throw( - DynamoDBKeyNotFoundError - ) - table.has_item("foobar", "more").should.equal(False) - - -@mock_dynamodb_deprecated -def test_get_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.get_item.when.called_with( - table_name="undeclared-table", - key={"HashKeyElement": {"S": "tester"}, "RangeKeyElement": {"S": "test-range"}}, - ).should.throw(DynamoDBKeyNotFoundError) - - -@mock_dynamodb_deprecated -def test_get_item_without_range_key(): - conn = boto.connect_dynamodb() - message_table_schema = conn.create_schema( - hash_key_name="test_hash", - hash_key_proto_value=int, - range_key_name="test_range", - range_key_proto_value=int, - ) - table = conn.create_table( - name="messages", schema=message_table_schema, read_units=10, write_units=10 - ) - - hash_key = 3241526475 - range_key = 1234567890987 - new_item = table.new_item(hash_key=hash_key, range_key=range_key) - new_item.put() - - table.get_item.when.called_with(hash_key=hash_key).should.throw( - DynamoDBValidationError - ) - - -@mock_dynamodb_deprecated -def test_delete_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item( - hash_key="LOLCat Forum", range_key="Check this out!", attrs=item_data - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete() - response.should.equal({"Attributes": [], "ConsumedCapacityUnits": 0.5}) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_attribute_response(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item( - hash_key="LOLCat Forum", range_key="Check this out!", attrs=item_data - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete(return_values="ALL_OLD") - response.should.equal( - { - "Attributes": { - "Body": "http://url_to_lolcat.gif", - "forum_name": "LOLCat Forum", - "ReceivedTime": "12/9/2011 11:36:03 PM", - "SentBy": "User A", - "subject": "Check this out!", - }, - "ConsumedCapacityUnits": 0.5, - } - ) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.delete_item.when.called_with( - table_name="undeclared-table", - key={"HashKeyElement": {"S": "tester"}, "RangeKeyElement": {"S": "test-range"}}, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_query(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="the-key", range_key="456", attrs=item_data) - item.put() - - item = table.new_item(hash_key="the-key", range_key="123", attrs=item_data) - item.put() - - item = table.new_item(hash_key="the-key", range_key="789", attrs=item_data) - item.put() - - results = table.query(hash_key="the-key", range_key_condition=condition.GT("1")) - results.response["Items"].should.have.length_of(3) - - results = table.query(hash_key="the-key", range_key_condition=condition.GT("234")) - results.response["Items"].should.have.length_of(2) - - results = table.query(hash_key="the-key", range_key_condition=condition.GT("9999")) - results.response["Items"].should.have.length_of(0) - - results = table.query( - hash_key="the-key", range_key_condition=condition.CONTAINS("12") - ) - results.response["Items"].should.have.length_of(1) - - results = table.query( - hash_key="the-key", range_key_condition=condition.BEGINS_WITH("7") - ) - results.response["Items"].should.have.length_of(1) - - results = table.query( - hash_key="the-key", range_key_condition=condition.BETWEEN("567", "890") - ) - results.response["Items"].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_query_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.query.when.called_with( - table_name="undeclared-table", - hash_key_value={"S": "the-key"}, - range_key_conditions={ - "AttributeValueList": [{"S": "User B"}], - "ComparisonOperator": "EQ", - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="the-key", range_key="456", attrs=item_data) - item.put() - - item = table.new_item(hash_key="the-key", range_key="123", attrs=item_data) - item.put() - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User B", - "ReceivedTime": "12/9/2011 11:36:03 PM", - "Ids": set([1, 2, 3]), - "PK": 7, - } - item = table.new_item(hash_key="the-key", range_key="789", attrs=item_data) - item.put() - - results = table.scan() - results.response["Items"].should.have.length_of(3) - - results = table.scan(scan_filter={"SentBy": condition.EQ("User B")}) - results.response["Items"].should.have.length_of(1) - - results = table.scan(scan_filter={"Body": condition.BEGINS_WITH("http")}) - results.response["Items"].should.have.length_of(3) - - results = table.scan(scan_filter={"Ids": condition.CONTAINS(2)}) - results.response["Items"].should.have.length_of(1) - - results = table.scan(scan_filter={"Ids": condition.NOT_NULL()}) - results.response["Items"].should.have.length_of(1) - - results = table.scan(scan_filter={"Ids": condition.NULL()}) - results.response["Items"].should.have.length_of(2) - - results = table.scan(scan_filter={"PK": condition.BETWEEN(8, 9)}) - results.response["Items"].should.have.length_of(0) - - results = table.scan(scan_filter={"PK": condition.BETWEEN(5, 8)}) - results.response["Items"].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_scan_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.scan.when.called_with( - table_name="undeclared-table", - scan_filter={ - "SentBy": { - "AttributeValueList": [{"S": "User B"}], - "ComparisonOperator": "EQ", - } - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan_after_has_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - list(table.scan()).should.equal([]) - - table.has_item(hash_key="the-key", range_key="123") - - list(table.scan()).should.equal([]) - - -@mock_dynamodb_deprecated -def test_write_batch(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - batch_list = conn.new_batch_write_list() - - items = [] - items.append( - table.new_item( - hash_key="the-key", - range_key="123", - attrs={ - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - }, - ) - ) - - items.append( - table.new_item( - hash_key="the-key", - range_key="789", - attrs={ - "Body": "http://url_to_lolcat.gif", - "SentBy": "User B", - "ReceivedTime": "12/9/2011 11:36:03 PM", - "Ids": set([1, 2, 3]), - "PK": 7, - }, - ) - ) - - batch_list.add_batch(table, puts=items) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(2) - - batch_list = conn.new_batch_write_list() - batch_list.add_batch(table, deletes=[("the-key", "789")]) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(1) - - -@mock_dynamodb_deprecated -def test_batch_read(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="the-key", range_key="456", attrs=item_data) - item.put() - - item = table.new_item(hash_key="the-key", range_key="123", attrs=item_data) - item.put() - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User B", - "ReceivedTime": "12/9/2011 11:36:03 PM", - "Ids": set([1, 2, 3]), - "PK": 7, - } - item = table.new_item(hash_key="another-key", range_key="789", attrs=item_data) - item.put() - - items = table.batch_get_item([("the-key", "123"), ("another-key", "789")]) - # Iterate through so that batch_item gets called - count = len([x for x in items]) - count.should.equal(2) +from __future__ import unicode_literals + +import boto +import sure # noqa +from freezegun import freeze_time + +from moto import mock_dynamodb_deprecated + +from boto.dynamodb import condition +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError +from boto.exception import DynamoDBResponseError + + +def create_table(conn): + message_table_schema = conn.create_schema( + hash_key_name="forum_name", + hash_key_proto_value=str, + range_key_name="subject", + range_key_proto_value=str, + ) + + table = conn.create_table( + name="messages", schema=message_table_schema, read_units=10, write_units=10 + ) + return table + + +@freeze_time("2012-01-14") +@mock_dynamodb_deprecated +def test_create_table(): + conn = boto.connect_dynamodb() + create_table(conn) + + expected = { + "Table": { + "CreationDateTime": 1326499200.0, + "ItemCount": 0, + "KeySchema": { + "HashKeyElement": {"AttributeName": "forum_name", "AttributeType": "S"}, + "RangeKeyElement": {"AttributeName": "subject", "AttributeType": "S"}, + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10, + }, + "TableName": "messages", + "TableSizeBytes": 0, + "TableStatus": "ACTIVE", + } + } + conn.describe_table("messages").should.equal(expected) + + +@mock_dynamodb_deprecated +def test_delete_table(): + conn = boto.connect_dynamodb() + create_table(conn) + conn.list_tables().should.have.length_of(1) + + conn.layer1.delete_table("messages") + conn.list_tables().should.have.length_of(0) + + conn.layer1.delete_table.when.called_with("messages").should.throw( + DynamoDBResponseError + ) + + +@mock_dynamodb_deprecated +def test_update_table_throughput(): + conn = boto.connect_dynamodb() + table = create_table(conn) + table.read_units.should.equal(10) + table.write_units.should.equal(10) + + table.update_throughput(5, 6) + table.refresh() + + table.read_units.should.equal(5) + table.write_units.should.equal(6) + + +@mock_dynamodb_deprecated +def test_item_add_and_describe_and_update(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item( + hash_key="LOLCat Forum", range_key="Check this out!", attrs=item_data + ) + item.put() + + table.has_item("LOLCat Forum", "Check this out!").should.equal(True) + + returned_item = table.get_item( + hash_key="LOLCat Forum", + range_key="Check this out!", + attributes_to_get=["Body", "SentBy"], + ) + dict(returned_item).should.equal( + { + "forum_name": "LOLCat Forum", + "subject": "Check this out!", + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + } + ) + + item["SentBy"] = "User B" + item.put() + + returned_item = table.get_item( + hash_key="LOLCat Forum", + range_key="Check this out!", + attributes_to_get=["Body", "SentBy"], + ) + dict(returned_item).should.equal( + { + "forum_name": "LOLCat Forum", + "subject": "Check this out!", + "Body": "http://url_to_lolcat.gif", + "SentBy": "User B", + } + ) + + +@mock_dynamodb_deprecated +def test_item_put_without_table(): + conn = boto.connect_dynamodb() + + conn.layer1.put_item.when.called_with( + table_name="undeclared-table", + item=dict(hash_key="LOLCat Forum", range_key="Check this out!"), + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_get_missing_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + table.get_item.when.called_with(hash_key="tester", range_key="other").should.throw( + DynamoDBKeyNotFoundError + ) + table.has_item("foobar", "more").should.equal(False) + + +@mock_dynamodb_deprecated +def test_get_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.get_item.when.called_with( + table_name="undeclared-table", + key={"HashKeyElement": {"S": "tester"}, "RangeKeyElement": {"S": "test-range"}}, + ).should.throw(DynamoDBKeyNotFoundError) + + +@mock_dynamodb_deprecated +def test_get_item_without_range_key(): + conn = boto.connect_dynamodb() + message_table_schema = conn.create_schema( + hash_key_name="test_hash", + hash_key_proto_value=int, + range_key_name="test_range", + range_key_proto_value=int, + ) + table = conn.create_table( + name="messages", schema=message_table_schema, read_units=10, write_units=10 + ) + + hash_key = 3241526475 + range_key = 1234567890987 + new_item = table.new_item(hash_key=hash_key, range_key=range_key) + new_item.put() + + table.get_item.when.called_with(hash_key=hash_key).should.throw( + DynamoDBValidationError + ) + + +@mock_dynamodb_deprecated +def test_delete_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item( + hash_key="LOLCat Forum", range_key="Check this out!", attrs=item_data + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete() + response.should.equal({"Attributes": [], "ConsumedCapacityUnits": 0.5}) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_attribute_response(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item( + hash_key="LOLCat Forum", range_key="Check this out!", attrs=item_data + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete(return_values="ALL_OLD") + response.should.equal( + { + "Attributes": { + "Body": "http://url_to_lolcat.gif", + "forum_name": "LOLCat Forum", + "ReceivedTime": "12/9/2011 11:36:03 PM", + "SentBy": "User A", + "subject": "Check this out!", + }, + "ConsumedCapacityUnits": 0.5, + } + ) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.delete_item.when.called_with( + table_name="undeclared-table", + key={"HashKeyElement": {"S": "tester"}, "RangeKeyElement": {"S": "test-range"}}, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_query(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="the-key", range_key="456", attrs=item_data) + item.put() + + item = table.new_item(hash_key="the-key", range_key="123", attrs=item_data) + item.put() + + item = table.new_item(hash_key="the-key", range_key="789", attrs=item_data) + item.put() + + results = table.query(hash_key="the-key", range_key_condition=condition.GT("1")) + results.response["Items"].should.have.length_of(3) + + results = table.query(hash_key="the-key", range_key_condition=condition.GT("234")) + results.response["Items"].should.have.length_of(2) + + results = table.query(hash_key="the-key", range_key_condition=condition.GT("9999")) + results.response["Items"].should.have.length_of(0) + + results = table.query( + hash_key="the-key", range_key_condition=condition.CONTAINS("12") + ) + results.response["Items"].should.have.length_of(1) + + results = table.query( + hash_key="the-key", range_key_condition=condition.BEGINS_WITH("7") + ) + results.response["Items"].should.have.length_of(1) + + results = table.query( + hash_key="the-key", range_key_condition=condition.BETWEEN("567", "890") + ) + results.response["Items"].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_query_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.query.when.called_with( + table_name="undeclared-table", + hash_key_value={"S": "the-key"}, + range_key_conditions={ + "AttributeValueList": [{"S": "User B"}], + "ComparisonOperator": "EQ", + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="the-key", range_key="456", attrs=item_data) + item.put() + + item = table.new_item(hash_key="the-key", range_key="123", attrs=item_data) + item.put() + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User B", + "ReceivedTime": "12/9/2011 11:36:03 PM", + "Ids": set([1, 2, 3]), + "PK": 7, + } + item = table.new_item(hash_key="the-key", range_key="789", attrs=item_data) + item.put() + + results = table.scan() + results.response["Items"].should.have.length_of(3) + + results = table.scan(scan_filter={"SentBy": condition.EQ("User B")}) + results.response["Items"].should.have.length_of(1) + + results = table.scan(scan_filter={"Body": condition.BEGINS_WITH("http")}) + results.response["Items"].should.have.length_of(3) + + results = table.scan(scan_filter={"Ids": condition.CONTAINS(2)}) + results.response["Items"].should.have.length_of(1) + + results = table.scan(scan_filter={"Ids": condition.NOT_NULL()}) + results.response["Items"].should.have.length_of(1) + + results = table.scan(scan_filter={"Ids": condition.NULL()}) + results.response["Items"].should.have.length_of(2) + + results = table.scan(scan_filter={"PK": condition.BETWEEN(8, 9)}) + results.response["Items"].should.have.length_of(0) + + results = table.scan(scan_filter={"PK": condition.BETWEEN(5, 8)}) + results.response["Items"].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_scan_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.scan.when.called_with( + table_name="undeclared-table", + scan_filter={ + "SentBy": { + "AttributeValueList": [{"S": "User B"}], + "ComparisonOperator": "EQ", + } + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan_after_has_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + list(table.scan()).should.equal([]) + + table.has_item(hash_key="the-key", range_key="123") + + list(table.scan()).should.equal([]) + + +@mock_dynamodb_deprecated +def test_write_batch(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + batch_list = conn.new_batch_write_list() + + items = [] + items.append( + table.new_item( + hash_key="the-key", + range_key="123", + attrs={ + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + }, + ) + ) + + items.append( + table.new_item( + hash_key="the-key", + range_key="789", + attrs={ + "Body": "http://url_to_lolcat.gif", + "SentBy": "User B", + "ReceivedTime": "12/9/2011 11:36:03 PM", + "Ids": set([1, 2, 3]), + "PK": 7, + }, + ) + ) + + batch_list.add_batch(table, puts=items) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(2) + + batch_list = conn.new_batch_write_list() + batch_list.add_batch(table, deletes=[("the-key", "789")]) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(1) + + +@mock_dynamodb_deprecated +def test_batch_read(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="the-key", range_key="456", attrs=item_data) + item.put() + + item = table.new_item(hash_key="the-key", range_key="123", attrs=item_data) + item.put() + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User B", + "ReceivedTime": "12/9/2011 11:36:03 PM", + "Ids": set([1, 2, 3]), + "PK": 7, + } + item = table.new_item(hash_key="another-key", range_key="789", attrs=item_data) + item.put() + + items = table.batch_get_item([("the-key", "123"), ("another-key", "789")]) + # Iterate through so that batch_item gets called + count = len([x for x in items]) + count.should.equal(2) diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index e5a268c97..c5031b5d1 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -1,390 +1,390 @@ -from __future__ import unicode_literals - -import boto -import sure # noqa -from freezegun import freeze_time - -from moto import mock_dynamodb_deprecated - -from boto.dynamodb import condition -from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError -from boto.exception import DynamoDBResponseError - - -def create_table(conn): - message_table_schema = conn.create_schema( - hash_key_name="forum_name", hash_key_proto_value=str - ) - - table = conn.create_table( - name="messages", schema=message_table_schema, read_units=10, write_units=10 - ) - return table - - -@freeze_time("2012-01-14") -@mock_dynamodb_deprecated -def test_create_table(): - conn = boto.connect_dynamodb() - create_table(conn) - - expected = { - "Table": { - "CreationDateTime": 1326499200.0, - "ItemCount": 0, - "KeySchema": { - "HashKeyElement": {"AttributeName": "forum_name", "AttributeType": "S"} - }, - "ProvisionedThroughput": { - "ReadCapacityUnits": 10, - "WriteCapacityUnits": 10, - }, - "TableName": "messages", - "TableSizeBytes": 0, - "TableStatus": "ACTIVE", - } - } - conn.describe_table("messages").should.equal(expected) - - -@mock_dynamodb_deprecated -def test_delete_table(): - conn = boto.connect_dynamodb() - create_table(conn) - conn.list_tables().should.have.length_of(1) - - conn.layer1.delete_table("messages") - conn.list_tables().should.have.length_of(0) - - conn.layer1.delete_table.when.called_with("messages").should.throw( - DynamoDBResponseError - ) - - -@mock_dynamodb_deprecated -def test_update_table_throughput(): - conn = boto.connect_dynamodb() - table = create_table(conn) - table.read_units.should.equal(10) - table.write_units.should.equal(10) - - table.update_throughput(5, 6) - table.refresh() - - table.read_units.should.equal(5) - table.write_units.should.equal(6) - - -@mock_dynamodb_deprecated -def test_item_add_and_describe_and_update(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="LOLCat Forum", attrs=item_data) - item.put() - - returned_item = table.get_item( - hash_key="LOLCat Forum", attributes_to_get=["Body", "SentBy"] - ) - dict(returned_item).should.equal( - { - "forum_name": "LOLCat Forum", - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - } - ) - - item["SentBy"] = "User B" - item.put() - - returned_item = table.get_item( - hash_key="LOLCat Forum", attributes_to_get=["Body", "SentBy"] - ) - dict(returned_item).should.equal( - { - "forum_name": "LOLCat Forum", - "Body": "http://url_to_lolcat.gif", - "SentBy": "User B", - } - ) - - -@mock_dynamodb_deprecated -def test_item_put_without_table(): - conn = boto.connect_dynamodb() - - conn.layer1.put_item.when.called_with( - table_name="undeclared-table", item=dict(hash_key="LOLCat Forum") - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_get_missing_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - table.get_item.when.called_with(hash_key="tester").should.throw( - DynamoDBKeyNotFoundError - ) - - -@mock_dynamodb_deprecated -def test_get_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.get_item.when.called_with( - table_name="undeclared-table", key={"HashKeyElement": {"S": "tester"}} - ).should.throw(DynamoDBKeyNotFoundError) - - -@mock_dynamodb_deprecated -def test_delete_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="LOLCat Forum", attrs=item_data) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete() - response.should.equal({"Attributes": [], "ConsumedCapacityUnits": 0.5}) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_attribute_response(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="LOLCat Forum", attrs=item_data) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete(return_values="ALL_OLD") - response.should.equal( - { - "Attributes": { - "Body": "http://url_to_lolcat.gif", - "forum_name": "LOLCat Forum", - "ReceivedTime": "12/9/2011 11:36:03 PM", - "SentBy": "User A", - }, - "ConsumedCapacityUnits": 0.5, - } - ) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.delete_item.when.called_with( - table_name="undeclared-table", key={"HashKeyElement": {"S": "tester"}} - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_query(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="the-key", attrs=item_data) - item.put() - - results = table.query(hash_key="the-key") - results.response["Items"].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_query_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.query.when.called_with( - table_name="undeclared-table", hash_key_value={"S": "the-key"} - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="the-key", attrs=item_data) - item.put() - - item = table.new_item(hash_key="the-key2", attrs=item_data) - item.put() - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User B", - "ReceivedTime": "12/9/2011 11:36:03 PM", - "Ids": set([1, 2, 3]), - "PK": 7, - } - item = table.new_item(hash_key="the-key3", attrs=item_data) - item.put() - - results = table.scan() - results.response["Items"].should.have.length_of(3) - - results = table.scan(scan_filter={"SentBy": condition.EQ("User B")}) - results.response["Items"].should.have.length_of(1) - - results = table.scan(scan_filter={"Body": condition.BEGINS_WITH("http")}) - results.response["Items"].should.have.length_of(3) - - results = table.scan(scan_filter={"Ids": condition.CONTAINS(2)}) - results.response["Items"].should.have.length_of(1) - - results = table.scan(scan_filter={"Ids": condition.NOT_NULL()}) - results.response["Items"].should.have.length_of(1) - - results = table.scan(scan_filter={"Ids": condition.NULL()}) - results.response["Items"].should.have.length_of(2) - - results = table.scan(scan_filter={"PK": condition.BETWEEN(8, 9)}) - results.response["Items"].should.have.length_of(0) - - results = table.scan(scan_filter={"PK": condition.BETWEEN(5, 8)}) - results.response["Items"].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_scan_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.scan.when.called_with( - table_name="undeclared-table", - scan_filter={ - "SentBy": { - "AttributeValueList": [{"S": "User B"}], - "ComparisonOperator": "EQ", - } - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan_after_has_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - list(table.scan()).should.equal([]) - - table.has_item("the-key") - - list(table.scan()).should.equal([]) - - -@mock_dynamodb_deprecated -def test_write_batch(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - batch_list = conn.new_batch_write_list() - - items = [] - items.append( - table.new_item( - hash_key="the-key", - attrs={ - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - }, - ) - ) - - items.append( - table.new_item( - hash_key="the-key2", - attrs={ - "Body": "http://url_to_lolcat.gif", - "SentBy": "User B", - "ReceivedTime": "12/9/2011 11:36:03 PM", - "Ids": set([1, 2, 3]), - "PK": 7, - }, - ) - ) - - batch_list.add_batch(table, puts=items) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(2) - - batch_list = conn.new_batch_write_list() - batch_list.add_batch(table, deletes=[("the-key")]) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(1) - - -@mock_dynamodb_deprecated -def test_batch_read(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User A", - "ReceivedTime": "12/9/2011 11:36:03 PM", - } - item = table.new_item(hash_key="the-key1", attrs=item_data) - item.put() - - item = table.new_item(hash_key="the-key2", attrs=item_data) - item.put() - - item_data = { - "Body": "http://url_to_lolcat.gif", - "SentBy": "User B", - "ReceivedTime": "12/9/2011 11:36:03 PM", - "Ids": set([1, 2, 3]), - "PK": 7, - } - item = table.new_item(hash_key="another-key", attrs=item_data) - item.put() - - items = table.batch_get_item([("the-key1"), ("another-key")]) - # Iterate through so that batch_item gets called - count = len([x for x in items]) - count.should.have.equal(2) +from __future__ import unicode_literals + +import boto +import sure # noqa +from freezegun import freeze_time + +from moto import mock_dynamodb_deprecated + +from boto.dynamodb import condition +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError +from boto.exception import DynamoDBResponseError + + +def create_table(conn): + message_table_schema = conn.create_schema( + hash_key_name="forum_name", hash_key_proto_value=str + ) + + table = conn.create_table( + name="messages", schema=message_table_schema, read_units=10, write_units=10 + ) + return table + + +@freeze_time("2012-01-14") +@mock_dynamodb_deprecated +def test_create_table(): + conn = boto.connect_dynamodb() + create_table(conn) + + expected = { + "Table": { + "CreationDateTime": 1326499200.0, + "ItemCount": 0, + "KeySchema": { + "HashKeyElement": {"AttributeName": "forum_name", "AttributeType": "S"} + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10, + }, + "TableName": "messages", + "TableSizeBytes": 0, + "TableStatus": "ACTIVE", + } + } + conn.describe_table("messages").should.equal(expected) + + +@mock_dynamodb_deprecated +def test_delete_table(): + conn = boto.connect_dynamodb() + create_table(conn) + conn.list_tables().should.have.length_of(1) + + conn.layer1.delete_table("messages") + conn.list_tables().should.have.length_of(0) + + conn.layer1.delete_table.when.called_with("messages").should.throw( + DynamoDBResponseError + ) + + +@mock_dynamodb_deprecated +def test_update_table_throughput(): + conn = boto.connect_dynamodb() + table = create_table(conn) + table.read_units.should.equal(10) + table.write_units.should.equal(10) + + table.update_throughput(5, 6) + table.refresh() + + table.read_units.should.equal(5) + table.write_units.should.equal(6) + + +@mock_dynamodb_deprecated +def test_item_add_and_describe_and_update(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="LOLCat Forum", attrs=item_data) + item.put() + + returned_item = table.get_item( + hash_key="LOLCat Forum", attributes_to_get=["Body", "SentBy"] + ) + dict(returned_item).should.equal( + { + "forum_name": "LOLCat Forum", + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + } + ) + + item["SentBy"] = "User B" + item.put() + + returned_item = table.get_item( + hash_key="LOLCat Forum", attributes_to_get=["Body", "SentBy"] + ) + dict(returned_item).should.equal( + { + "forum_name": "LOLCat Forum", + "Body": "http://url_to_lolcat.gif", + "SentBy": "User B", + } + ) + + +@mock_dynamodb_deprecated +def test_item_put_without_table(): + conn = boto.connect_dynamodb() + + conn.layer1.put_item.when.called_with( + table_name="undeclared-table", item=dict(hash_key="LOLCat Forum") + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_get_missing_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + table.get_item.when.called_with(hash_key="tester").should.throw( + DynamoDBKeyNotFoundError + ) + + +@mock_dynamodb_deprecated +def test_get_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.get_item.when.called_with( + table_name="undeclared-table", key={"HashKeyElement": {"S": "tester"}} + ).should.throw(DynamoDBKeyNotFoundError) + + +@mock_dynamodb_deprecated +def test_delete_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="LOLCat Forum", attrs=item_data) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete() + response.should.equal({"Attributes": [], "ConsumedCapacityUnits": 0.5}) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_attribute_response(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="LOLCat Forum", attrs=item_data) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete(return_values="ALL_OLD") + response.should.equal( + { + "Attributes": { + "Body": "http://url_to_lolcat.gif", + "forum_name": "LOLCat Forum", + "ReceivedTime": "12/9/2011 11:36:03 PM", + "SentBy": "User A", + }, + "ConsumedCapacityUnits": 0.5, + } + ) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.delete_item.when.called_with( + table_name="undeclared-table", key={"HashKeyElement": {"S": "tester"}} + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_query(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="the-key", attrs=item_data) + item.put() + + results = table.query(hash_key="the-key") + results.response["Items"].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_query_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.query.when.called_with( + table_name="undeclared-table", hash_key_value={"S": "the-key"} + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="the-key", attrs=item_data) + item.put() + + item = table.new_item(hash_key="the-key2", attrs=item_data) + item.put() + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User B", + "ReceivedTime": "12/9/2011 11:36:03 PM", + "Ids": set([1, 2, 3]), + "PK": 7, + } + item = table.new_item(hash_key="the-key3", attrs=item_data) + item.put() + + results = table.scan() + results.response["Items"].should.have.length_of(3) + + results = table.scan(scan_filter={"SentBy": condition.EQ("User B")}) + results.response["Items"].should.have.length_of(1) + + results = table.scan(scan_filter={"Body": condition.BEGINS_WITH("http")}) + results.response["Items"].should.have.length_of(3) + + results = table.scan(scan_filter={"Ids": condition.CONTAINS(2)}) + results.response["Items"].should.have.length_of(1) + + results = table.scan(scan_filter={"Ids": condition.NOT_NULL()}) + results.response["Items"].should.have.length_of(1) + + results = table.scan(scan_filter={"Ids": condition.NULL()}) + results.response["Items"].should.have.length_of(2) + + results = table.scan(scan_filter={"PK": condition.BETWEEN(8, 9)}) + results.response["Items"].should.have.length_of(0) + + results = table.scan(scan_filter={"PK": condition.BETWEEN(5, 8)}) + results.response["Items"].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_scan_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.scan.when.called_with( + table_name="undeclared-table", + scan_filter={ + "SentBy": { + "AttributeValueList": [{"S": "User B"}], + "ComparisonOperator": "EQ", + } + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan_after_has_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + list(table.scan()).should.equal([]) + + table.has_item("the-key") + + list(table.scan()).should.equal([]) + + +@mock_dynamodb_deprecated +def test_write_batch(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + batch_list = conn.new_batch_write_list() + + items = [] + items.append( + table.new_item( + hash_key="the-key", + attrs={ + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + }, + ) + ) + + items.append( + table.new_item( + hash_key="the-key2", + attrs={ + "Body": "http://url_to_lolcat.gif", + "SentBy": "User B", + "ReceivedTime": "12/9/2011 11:36:03 PM", + "Ids": set([1, 2, 3]), + "PK": 7, + }, + ) + ) + + batch_list.add_batch(table, puts=items) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(2) + + batch_list = conn.new_batch_write_list() + batch_list.add_batch(table, deletes=[("the-key")]) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(1) + + +@mock_dynamodb_deprecated +def test_batch_read(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User A", + "ReceivedTime": "12/9/2011 11:36:03 PM", + } + item = table.new_item(hash_key="the-key1", attrs=item_data) + item.put() + + item = table.new_item(hash_key="the-key2", attrs=item_data) + item.put() + + item_data = { + "Body": "http://url_to_lolcat.gif", + "SentBy": "User B", + "ReceivedTime": "12/9/2011 11:36:03 PM", + "Ids": set([1, 2, 3]), + "PK": 7, + } + item = table.new_item(hash_key="another-key", attrs=item_data) + item.put() + + items = table.batch_get_item([("the-key1"), ("another-key")]) + # Iterate through so that batch_item gets called + count = len([x for x in items]) + count.should.have.equal(2) diff --git a/tests/test_dynamodb2/__init__.py b/tests/test_dynamodb2/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_dynamodb2/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_dynamodb2/conftest.py b/tests/test_dynamodb2/conftest.py new file mode 100644 index 000000000..5f523db96 --- /dev/null +++ b/tests/test_dynamodb2/conftest.py @@ -0,0 +1,13 @@ +import pytest +from moto.dynamodb2.models import Table + + +@pytest.fixture +def table(): + return Table( + "Forums", + schema=[ + {"KeyType": "HASH", "AttributeName": "forum_name"}, + {"KeyType": "RANGE", "AttributeName": "subject"}, + ], + ) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index ec01889ae..0e0fcb082 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,24 +1,23 @@ from __future__ import unicode_literals, print_function +from datetime import datetime from decimal import Decimal -import six import boto import boto3 from boto3.dynamodb.conditions import Attr, Key +import re import sure # noqa -import requests from moto import mock_dynamodb2, mock_dynamodb2_deprecated from moto.dynamodb2 import dynamodb_backend2, dynamodb_backends2 from boto.exception import JSONResponseError from botocore.exceptions import ClientError, ParamValidationError from tests.helpers import requires_boto_gte -import tests.backport_assert_raises import moto.dynamodb2.comparisons import moto.dynamodb2.models -from nose.tools import assert_raises +import pytest try: import boto.dynamodb2 @@ -73,7 +72,7 @@ def test_describe_missing_table(): conn = boto.dynamodb2.connect_to_region( "us-west-2", aws_access_key_id="ak", aws_secret_access_key="sk" ) - with assert_raises(JSONResponseError): + with pytest.raises(JSONResponseError): conn.describe_table("messages") @@ -187,7 +186,7 @@ def test_list_not_found_table_tags(): @requires_boto_gte("2.9") @mock_dynamodb2 -def test_item_add_empty_string_exception(): +def test_item_add_empty_string_in_key_exception(): name = "TestTable" conn = boto3.client( "dynamodb", @@ -202,28 +201,57 @@ def test_item_add_empty_string_exception(): ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.put_item( TableName=name, Item={ - "forum_name": {"S": "LOLCat Forum"}, + "forum_name": {"S": ""}, "subject": {"S": "Check this out!"}, "Body": {"S": "http://url_to_lolcat.gif"}, - "SentBy": {"S": ""}, + "SentBy": {"S": "someone@somewhere.edu"}, "ReceivedTime": {"S": "12/9/2011 11:36:03 PM"}, }, ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: An AttributeValue may not contain an empty string" ) @requires_boto_gte("2.9") @mock_dynamodb2 -def test_update_item_with_empty_string_exception(): +def test_item_add_empty_string_no_exception(): + name = "TestTable" + conn = boto3.client( + "dynamodb", + region_name="us-west-2", + aws_access_key_id="ak", + aws_secret_access_key="sk", + ) + conn.create_table( + TableName=name, + KeySchema=[{"AttributeName": "forum_name", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "forum_name", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + conn.put_item( + TableName=name, + Item={ + "forum_name": {"S": "LOLCat Forum"}, + "subject": {"S": "Check this out!"}, + "Body": {"S": "http://url_to_lolcat.gif"}, + "SentBy": {"S": ""}, + "ReceivedTime": {"S": "12/9/2011 11:36:03 PM"}, + }, + ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_in_key_exception(): name = "TestTable" conn = boto3.client( "dynamodb", @@ -249,21 +277,57 @@ def test_update_item_with_empty_string_exception(): }, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.update_item( TableName=name, Key={"forum_name": {"S": "LOLCat Forum"}}, - UpdateExpression="set Body=:Body", - ExpressionAttributeValues={":Body": {"S": ""}}, + UpdateExpression="set forum_name=:NewName", + ExpressionAttributeValues={":NewName": {"S": ""}}, ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: An AttributeValue may not contain an empty string" ) +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_no_exception(): + name = "TestTable" + conn = boto3.client( + "dynamodb", + region_name="us-west-2", + aws_access_key_id="ak", + aws_secret_access_key="sk", + ) + conn.create_table( + TableName=name, + KeySchema=[{"AttributeName": "forum_name", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "forum_name", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + conn.put_item( + TableName=name, + Item={ + "forum_name": {"S": "LOLCat Forum"}, + "subject": {"S": "Check this out!"}, + "Body": {"S": "http://url_to_lolcat.gif"}, + "SentBy": {"S": "test"}, + "ReceivedTime": {"S": "12/9/2011 11:36:03 PM"}, + }, + ) + + conn.update_item( + TableName=name, + Key={"forum_name": {"S": "LOLCat Forum"}}, + UpdateExpression="set Body=:Body", + ExpressionAttributeValues={":Body": {"S": ""}}, + ) + + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_invalid_table(): @@ -1344,6 +1408,69 @@ def test_get_item_returns_consumed_capacity(): assert "TableName" in response["ConsumedCapacity"] +@mock_dynamodb2 +def test_put_empty_item(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], + TableName="test", + KeySchema=[{"AttributeName": "structure_id", "KeyType": "HASH"},], + ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, + ) + table = dynamodb.Table("test") + + with pytest.raises(ClientError) as ex: + table.put_item(Item={}) + ex.value.response["Error"]["Message"].should.equal( + "One or more parameter values were invalid: Missing the key structure_id in the item" + ) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + + +@mock_dynamodb2 +def test_put_item_nonexisting_hash_key(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], + TableName="test", + KeySchema=[{"AttributeName": "structure_id", "KeyType": "HASH"},], + ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, + ) + table = dynamodb.Table("test") + + with pytest.raises(ClientError) as ex: + table.put_item(Item={"a_terribly_misguided_id_attribute": "abcdef"}) + ex.value.response["Error"]["Message"].should.equal( + "One or more parameter values were invalid: Missing the key structure_id in the item" + ) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + + +@mock_dynamodb2 +def test_put_item_nonexisting_range_key(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + AttributeDefinitions=[ + {"AttributeName": "structure_id", "AttributeType": "S"}, + {"AttributeName": "added_at", "AttributeType": "N"}, + ], + TableName="test", + KeySchema=[ + {"AttributeName": "structure_id", "KeyType": "HASH"}, + {"AttributeName": "added_at", "KeyType": "RANGE"}, + ], + ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, + ) + table = dynamodb.Table("test") + + with pytest.raises(ClientError) as ex: + table.put_item(Item={"structure_id": "abcdef"}) + ex.value.response["Error"]["Message"].should.equal( + "One or more parameter values were invalid: Missing the key added_at in the item" + ) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + + def test_filter_expression(): row1 = moto.dynamodb2.models.Item( None, @@ -1389,6 +1516,13 @@ def test_filter_expression(): filter_expr.expr(row1).should.be(True) filter_expr.expr(row2).should.be(False) + # lowercase AND test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + "Id > :v0 and Subs < :v1", {}, {":v0": {"N": "5"}, ":v1": {"N": "7"}} + ) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + # OR test filter_expr = moto.dynamodb2.comparisons.get_filter_expression( "Id = :v0 OR Id=:v1", {}, {":v0": {"N": "5"}, ":v1": {"N": "8"}} @@ -1911,7 +2045,7 @@ def test_delete_item(): assert response["Count"] == 2 # Test ReturnValues validation - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.delete_item( Key={"client": "client1", "app": "app1"}, ReturnValues="ALL_NEW" ) @@ -1981,6 +2115,141 @@ def test_set_ttl(): resp["TimeToLiveDescription"]["TimeToLiveStatus"].should.equal("DISABLED") +@mock_dynamodb2 +def test_describe_continuous_backups(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + table_name = client.create_table( + TableName="test", + AttributeDefinitions=[ + {"AttributeName": "client", "AttributeType": "S"}, + {"AttributeName": "app", "AttributeType": "S"}, + ], + KeySchema=[ + {"AttributeName": "client", "KeyType": "HASH"}, + {"AttributeName": "app", "KeyType": "RANGE"}, + ], + BillingMode="PAY_PER_REQUEST", + )["TableDescription"]["TableName"] + + # when + response = client.describe_continuous_backups(TableName=table_name) + + # then + response["ContinuousBackupsDescription"].should.equal( + { + "ContinuousBackupsStatus": "ENABLED", + "PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"}, + } + ) + + +@mock_dynamodb2 +def test_describe_continuous_backups_errors(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + + # when + with pytest.raises(Exception) as e: + client.describe_continuous_backups(TableName="not-existing-table") + + # then + ex = e.value + ex.operation_name.should.equal("DescribeContinuousBackups") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TableNotFoundException") + ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table") + + +@mock_dynamodb2 +def test_update_continuous_backups(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + table_name = client.create_table( + TableName="test", + AttributeDefinitions=[ + {"AttributeName": "client", "AttributeType": "S"}, + {"AttributeName": "app", "AttributeType": "S"}, + ], + KeySchema=[ + {"AttributeName": "client", "KeyType": "HASH"}, + {"AttributeName": "app", "KeyType": "RANGE"}, + ], + BillingMode="PAY_PER_REQUEST", + )["TableDescription"]["TableName"] + + # when + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal( + "ENABLED" + ) + point_in_time = response["ContinuousBackupsDescription"][ + "PointInTimeRecoveryDescription" + ] + earliest_datetime = point_in_time["EarliestRestorableDateTime"] + earliest_datetime.should.be.a(datetime) + latest_datetime = point_in_time["LatestRestorableDateTime"] + latest_datetime.should.be.a(datetime) + point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED") + + # when + # a second update should not change anything + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal( + "ENABLED" + ) + point_in_time = response["ContinuousBackupsDescription"][ + "PointInTimeRecoveryDescription" + ] + point_in_time["EarliestRestorableDateTime"].should.equal(earliest_datetime) + point_in_time["LatestRestorableDateTime"].should.equal(latest_datetime) + point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED") + + # when + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": False}, + ) + + # then + response["ContinuousBackupsDescription"].should.equal( + { + "ContinuousBackupsStatus": "ENABLED", + "PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"}, + } + ) + + +@mock_dynamodb2 +def test_update_continuous_backups_errors(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + + # when + with pytest.raises(Exception) as e: + client.update_continuous_backups( + TableName="not-existing-table", + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + ex = e.value + ex.operation_name.should.equal("UpdateContinuousBackups") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TableNotFoundException") + ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table") + + # https://github.com/spulec/moto/issues/1043 @mock_dynamodb2 def test_query_missing_expr_names(): @@ -2047,6 +2316,30 @@ def test_update_item_with_list(): resp["Item"].should.equal({"key": "the-key", "list": [1, 2]}) +# https://github.com/spulec/moto/issues/2328 +@mock_dynamodb2 +def test_update_item_with_no_action_passed_with_list(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + + # Create the DynamoDB table. + dynamodb.create_table( + TableName="Table", + KeySchema=[{"AttributeName": "key", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "key", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, + ) + table = dynamodb.Table("Table") + table.update_item( + Key={"key": "the-key"}, + # Do not pass 'Action' key, in order to check that the + # parameter's default value will be used. + AttributeUpdates={"list": {"Value": [1, 2]}}, + ) + + resp = table.get_item(Key={"key": "the-key"}) + resp["Item"].should.equal({"key": "the-key", "list": [1, 2]}) + + # https://github.com/spulec/moto/issues/1342 @mock_dynamodb2 def test_update_item_on_map(): @@ -2082,13 +2375,33 @@ def test_update_item_on_map(): # Nonexistent nested attributes are supported for existing top-level attributes. table.update_item( Key={"forum_name": "the-key", "subject": "123"}, - UpdateExpression="SET body.#nested.#data = :tb, body.nested.#nonexistentnested.#data = :tb2", + UpdateExpression="SET body.#nested.#data = :tb", + ExpressionAttributeNames={"#nested": "nested", "#data": "data",}, + ExpressionAttributeValues={":tb": "new_value"}, + ) + # Running this against AWS DDB gives an exception so make sure it also fails.: + with pytest.raises(client.exceptions.ClientError): + # botocore.exceptions.ClientError: An error occurred (ValidationException) when calling the UpdateItem + # operation: The document path provided in the update expression is invalid for update + table.update_item( + Key={"forum_name": "the-key", "subject": "123"}, + UpdateExpression="SET body.#nested.#nonexistentnested.#data = :tb2", + ExpressionAttributeNames={ + "#nested": "nested", + "#nonexistentnested": "nonexistentnested", + "#data": "data", + }, + ExpressionAttributeValues={":tb2": "other_value"}, + ) + + table.update_item( + Key={"forum_name": "the-key", "subject": "123"}, + UpdateExpression="SET body.#nested.#nonexistentnested = :tb2", ExpressionAttributeNames={ "#nested": "nested", "#nonexistentnested": "nonexistentnested", - "#data": "data", }, - ExpressionAttributeValues={":tb": "new_value", ":tb2": "other_value"}, + ExpressionAttributeValues={":tb2": {"data": "other_value"}}, ) resp = table.scan() @@ -2096,8 +2409,8 @@ def test_update_item_on_map(): {"nested": {"data": "new_value", "nonexistentnested": {"data": "other_value"}}} ) - # Test nested value for a nonexistent attribute. - with assert_raises(client.exceptions.ConditionalCheckFailedException): + # Test nested value for a nonexistent attribute throws a ClientError. + with pytest.raises(client.exceptions.ClientError): table.update_item( Key={"forum_name": "the-key", "subject": "123"}, UpdateExpression="SET nonexistent.#nested = :tb", @@ -2185,10 +2498,48 @@ def test_update_return_attributes(): r = update("col1", "val5", "NONE") assert r["Attributes"] == {} - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: r = update("col1", "val6", "WRONG") +# https://github.com/spulec/moto/issues/3448 +@mock_dynamodb2 +def test_update_return_updated_new_attributes_when_same(): + dynamo_client = boto3.resource("dynamodb", region_name="us-east-1") + dynamo_client.create_table( + TableName="moto-test", + KeySchema=[{"AttributeName": "HashKey1", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "HashKey1", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, + ) + + dynamodb_table = dynamo_client.Table("moto-test") + dynamodb_table.put_item( + Item={"HashKey1": "HashKeyValue1", "listValuedAttribute1": ["a", "b"]} + ) + + def update(col, to, rv): + return dynamodb_table.update_item( + TableName="moto-test", + Key={"HashKey1": "HashKeyValue1"}, + UpdateExpression="SET listValuedAttribute1=:" + col, + ExpressionAttributeValues={":" + col: to}, + ReturnValues=rv, + ) + + r = update("a", ["a", "c"], "UPDATED_NEW") + assert r["Attributes"] == {"listValuedAttribute1": ["a", "c"]} + + r = update("a", {"a", "c"}, "UPDATED_NEW") + assert r["Attributes"] == {"listValuedAttribute1": {"a", "c"}} + + r = update("a", {1, 2}, "UPDATED_NEW") + assert r["Attributes"] == {"listValuedAttribute1": {1, 2}} + + with pytest.raises(ClientError) as ex: + r = update("a", ["a", "c"], "WRONG") + + @mock_dynamodb2 def test_put_return_attributes(): dynamodb = boto3.client("dynamodb", region_name="us-east-1") @@ -2214,15 +2565,15 @@ def test_put_return_attributes(): ) assert r["Attributes"] == {"id": {"S": "foo"}, "col1": {"S": "val1"}} - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.put_item( TableName="moto-test", Item={"id": {"S": "foo"}, "col1": {"S": "val3"}}, ReturnValues="ALL_NEW", ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Return values set to invalid value" ) @@ -2451,7 +2802,7 @@ def test_condition_expressions(): }, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.put_item( TableName="test1", Item={ @@ -2467,7 +2818,7 @@ def test_condition_expressions(): }, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.put_item( TableName="test1", Item={ @@ -2483,7 +2834,7 @@ def test_condition_expressions(): }, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.put_item( TableName="test1", Item={ @@ -2511,7 +2862,7 @@ def test_condition_expressions(): ExpressionAttributeValues={":match": {"S": "match"}}, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.update_item( TableName="test1", Key={"client": {"S": "client1"}, "app": {"S": "app1"}}, @@ -2521,7 +2872,7 @@ def test_condition_expressions(): ExpressionAttributeNames={"#existing": "existing", "#match": "match"}, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.delete_item( TableName="test1", Key={"client": {"S": "client1"}, "app": {"S": "app1"}}, @@ -2606,7 +2957,7 @@ def test_condition_expression__attr_doesnt_exist(): update_if_attr_doesnt_exist() # Second time should fail - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): update_if_attr_doesnt_exist() @@ -2646,7 +2997,7 @@ def test_condition_expression__and_order(): # ensure that the RHS of the AND expression is not evaluated if the LHS # returns true (as it would result an error) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.update_item( TableName="test", Key={"forum_name": {"S": "the-key"}}, @@ -2700,7 +3051,7 @@ def test_query_gsi_with_range_key(): res = dynamodb.query( TableName="test", IndexName="test_gsi", - KeyConditionExpression="gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key", + KeyConditionExpression="gsi_hash_key = :gsi_hash_key and gsi_range_key = :gsi_range_key", ExpressionAttributeValues={ ":gsi_hash_key": {"S": "key1"}, ":gsi_range_key": {"S": "range1"}, @@ -2742,12 +3093,12 @@ def test_scan_by_non_exists_index(): ], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.scan(TableName="test", IndexName="non_exists_index") - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "The table does not have the specified index: non_exists_index" ) @@ -2777,15 +3128,15 @@ def test_query_by_non_exists_index(): ], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.query( TableName="test", IndexName="non_exists_index", KeyConditionExpression="CarModel=M", ) - ex.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + ex.value.response["Error"]["Message"].should.equal( "Invalid index: non_exists_index for table: test. Available indexes are: test_gsi" ) @@ -2814,6 +3165,54 @@ def test_batch_items_returns_all(): ] +@mock_dynamodb2 +def test_batch_items_throws_exception_when_requesting_100_items_for_single_table(): + dynamodb = _create_user_table() + with pytest.raises(ClientError) as ex: + dynamodb.batch_get_item( + RequestItems={ + "users": { + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 104) + ], + "ConsistentRead": True, + } + } + ) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + msg = ex.value.response["Error"]["Message"] + msg.should.contain("1 validation error detected: Value") + msg.should.contain( + "at 'requestItems.users.member.keys' failed to satisfy constraint: Member must have length less than or equal to 100" + ) + + +@mock_dynamodb2 +def test_batch_items_throws_exception_when_requesting_100_items_across_all_tables(): + dynamodb = _create_user_table() + with pytest.raises(ClientError) as ex: + dynamodb.batch_get_item( + RequestItems={ + "users": { + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 75) + ], + "ConsistentRead": True, + }, + "users2": { + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 75) + ], + "ConsistentRead": True, + }, + } + ) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( + "Too many items requested for the BatchGetItem call" + ) + + @mock_dynamodb2 def test_batch_items_with_basic_projection_expression(): dynamodb = _create_user_table() @@ -2888,7 +3287,7 @@ def test_batch_items_with_basic_projection_expression_and_attr_expression_names( @mock_dynamodb2 def test_batch_items_should_throw_exception_for_duplicate_request(): client = _create_user_table() - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.batch_get_item( RequestItems={ "users": { @@ -2900,8 +3299,8 @@ def test_batch_items_should_throw_exception_for_duplicate_request(): } } ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Provided list of item keys contains duplicates" ) @@ -2914,7 +3313,7 @@ def test_index_with_unknown_attributes_should_fail(): "Some index key attributes are not defined in AttributeDefinitions." ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.create_table( AttributeDefinitions=[ {"AttributeName": "customer_nr", "AttributeType": "S"}, @@ -2938,8 +3337,8 @@ def test_index_with_unknown_attributes_should_fail(): BillingMode="PAY_PER_REQUEST", ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.contain(expected_exception) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain(expected_exception) @mock_dynamodb2 @@ -3094,7 +3493,7 @@ def test_update_list_index__set_index_of_a_string(): client.put_item( TableName=table_name, Item={"id": {"S": "foo2"}, "itemstr": {"S": "somestring"}} ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.update_item( TableName=table_name, Key={"id": {"S": "foo2"}}, @@ -3105,8 +3504,8 @@ def test_update_list_index__set_index_of_a_string(): "Item" ] - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "The document path provided in the update expression is invalid for update" ) @@ -3119,13 +3518,35 @@ def test_remove_top_level_attribute(): TableName=table_name, Item={"id": {"S": "foo"}, "item": {"S": "bar"}} ) client.update_item( - TableName=table_name, Key={"id": {"S": "foo"}}, UpdateExpression="REMOVE item" + TableName=table_name, + Key={"id": {"S": "foo"}}, + UpdateExpression="REMOVE #i", + ExpressionAttributeNames={"#i": "item"}, ) # result = client.get_item(TableName=table_name, Key={"id": {"S": "foo"}})["Item"] result.should.equal({"id": {"S": "foo"}}) +@mock_dynamodb2 +def test_remove_top_level_attribute_non_existent(): + """ + Remove statements do not require attribute to exist they silently pass + """ + table_name = "test_remove" + client = create_table_with_list(table_name) + ddb_item = {"id": {"S": "foo"}, "item": {"S": "bar"}} + client.put_item(TableName=table_name, Item=ddb_item) + client.update_item( + TableName=table_name, + Key={"id": {"S": "foo"}}, + UpdateExpression="REMOVE non_existent_attribute", + ExpressionAttributeNames={"#i": "item"}, + ) + result = client.get_item(TableName=table_name, Key={"id": {"S": "foo"}})["Item"] + result.should.equal(ddb_item) + + @mock_dynamodb2 def test_remove_list_index__remove_existing_index(): table_name = "test_list_index_access" @@ -3294,21 +3715,21 @@ def test_item_size_is_under_400KB(): assert_failure_due_to_item_size( func=client.put_item, TableName="moto-test", - Item={"id": {"S": "foo"}, "item": {"S": large_item}}, + Item={"id": {"S": "foo"}, "cont": {"S": large_item}}, ) assert_failure_due_to_item_size( - func=table.put_item, Item={"id": "bar", "item": large_item} + func=table.put_item, Item={"id": "bar", "cont": large_item} ) - assert_failure_due_to_item_size( + assert_failure_due_to_item_size_to_update( func=client.update_item, TableName="moto-test", Key={"id": {"S": "foo2"}}, - UpdateExpression="set item=:Item", + UpdateExpression="set cont=:Item", ExpressionAttributeValues={":Item": {"S": large_item}}, ) # Assert op fails when updating a nested item assert_failure_due_to_item_size( - func=table.put_item, Item={"id": "bar", "itemlist": [{"item": large_item}]} + func=table.put_item, Item={"id": "bar", "itemlist": [{"cont": large_item}]} ) assert_failure_due_to_item_size( func=client.put_item, @@ -3321,14 +3742,23 @@ def test_item_size_is_under_400KB(): def assert_failure_due_to_item_size(func, **kwargs): - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: func(**kwargs) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Item size has exceeded the maximum allowed size" ) +def assert_failure_due_to_item_size_to_update(func, **kwargs): + with pytest.raises(ClientError) as ex: + func(**kwargs) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( + "Item size to update has exceeded the maximum allowed size" + ) + + @mock_dynamodb2 # https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-KeyConditionExpression def test_hash_key_cannot_use_begins_with_operations(): @@ -3351,10 +3781,10 @@ def test_hash_key_cannot_use_begins_with_operations(): batch.put_item(Item=item) table = dynamodb.Table("test-table") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.query(KeyConditionExpression=Key("key").begins_with("prefix-")) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Query key condition not supported" ) @@ -3412,13 +3842,18 @@ def test_update_supports_list_append(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"SHA256": {"S": "sha-of-file"}}, UpdateExpression="SET crontab = list_append(crontab, :i)", ExpressionAttributeValues={":i": {"L": [{"S": "bar2"}]}}, + ReturnValues="UPDATED_NEW", ) + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"crontab": {"L": [{"S": "bar1"}, {"S": "bar2"}]}} + ) # Verify item is appended to the existing list result = client.get_item( TableName="TestTable", Key={"SHA256": {"S": "sha-of-file"}} @@ -3451,15 +3886,19 @@ def test_update_supports_nested_list_append(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}}, UpdateExpression="SET a.#b = list_append(a.#b, :i)", ExpressionAttributeValues={":i": {"L": [{"S": "bar2"}]}}, ExpressionAttributeNames={"#b": "b"}, + ReturnValues="UPDATED_NEW", ) - # Verify item is appended to the existing list + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"a": {"M": {"b": {"L": [{"S": "bar1"}, {"S": "bar2"}]}}}} + ) result = client.get_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}} )["Item"] @@ -3491,14 +3930,19 @@ def test_update_supports_multiple_levels_nested_list_append(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}}, UpdateExpression="SET a.#b.c = list_append(a.#b.#c, :i)", ExpressionAttributeValues={":i": {"L": [{"S": "bar2"}]}}, ExpressionAttributeNames={"#b": "b", "#c": "c"}, + ReturnValues="UPDATED_NEW", ) + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"a": {"M": {"b": {"M": {"c": {"L": [{"S": "bar1"}, {"S": "bar2"}]}}}}}} + ) # Verify item is appended to the existing list result = client.get_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}} @@ -3532,14 +3976,19 @@ def test_update_supports_nested_list_append_onto_another_list(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"id": {"S": "list_append_another"}}, UpdateExpression="SET a.#c = list_append(a.#b, :i)", ExpressionAttributeValues={":i": {"L": [{"S": "bar2"}]}}, ExpressionAttributeNames={"#b": "b", "#c": "c"}, + ReturnValues="UPDATED_NEW", ) + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"a": {"M": {"c": {"L": [{"S": "bar1"}, {"S": "bar2"}]}}}} + ) # Verify item is appended to the existing list result = client.get_item( TableName="TestTable", Key={"id": {"S": "list_append_another"}} @@ -3582,13 +4031,18 @@ def test_update_supports_list_append_maps(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}, "rid": {"S": "range_key"}}, UpdateExpression="SET a = list_append(a, :i)", ExpressionAttributeValues={":i": {"L": [{"M": {"b": {"S": "bar2"}}}]}}, + ReturnValues="UPDATED_NEW", ) + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"a": {"L": [{"M": {"b": {"S": "bar1"}}}, {"M": {"b": {"S": "bar2"}}}]}} + ) # Verify item is appended to the existing list result = client.query( TableName="TestTable", @@ -3609,6 +4063,101 @@ def test_update_supports_list_append_maps(): ) +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_supports_nested_update_if_nested_value_not_exists(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + name = "TestTable" + + dynamodb.create_table( + TableName=name, + KeySchema=[{"AttributeName": "user_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "user_id", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + table = dynamodb.Table(name) + table.put_item( + Item={"user_id": "1234", "friends": {"5678": {"name": "friend_5678"}},}, + ) + table.update_item( + Key={"user_id": "1234"}, + ExpressionAttributeNames={"#friends": "friends", "#friendid": "0000",}, + ExpressionAttributeValues={":friend": {"name": "friend_0000"},}, + UpdateExpression="SET #friends.#friendid = :friend", + ReturnValues="UPDATED_NEW", + ) + item = table.get_item(Key={"user_id": "1234"})["Item"] + assert item == { + "user_id": "1234", + "friends": {"5678": {"name": "friend_5678"}, "0000": {"name": "friend_0000"},}, + } + + +@mock_dynamodb2 +def test_update_supports_list_append_with_nested_if_not_exists_operation(): + dynamo = boto3.resource("dynamodb", region_name="us-west-1") + table_name = "test" + + dynamo.create_table( + TableName=table_name, + AttributeDefinitions=[{"AttributeName": "Id", "AttributeType": "S"}], + KeySchema=[{"AttributeName": "Id", "KeyType": "HASH"}], + ProvisionedThroughput={"ReadCapacityUnits": 20, "WriteCapacityUnits": 20}, + ) + + table = dynamo.Table(table_name) + + table.put_item(Item={"Id": "item-id", "nest1": {"nest2": {}}}) + updated_item = table.update_item( + Key={"Id": "item-id"}, + UpdateExpression="SET nest1.nest2.event_history = list_append(if_not_exists(nest1.nest2.event_history, :empty_list), :new_value)", + ExpressionAttributeValues={":empty_list": [], ":new_value": ["some_value"]}, + ReturnValues="UPDATED_NEW", + ) + + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"nest1": {"nest2": {"event_history": ["some_value"]}}} + ) + + table.get_item(Key={"Id": "item-id"})["Item"].should.equal( + {"Id": "item-id", "nest1": {"nest2": {"event_history": ["some_value"]}}} + ) + + +@mock_dynamodb2 +def test_update_supports_list_append_with_nested_if_not_exists_operation_and_property_already_exists(): + dynamo = boto3.resource("dynamodb", region_name="us-west-1") + table_name = "test" + + dynamo.create_table( + TableName=table_name, + AttributeDefinitions=[{"AttributeName": "Id", "AttributeType": "S"}], + KeySchema=[{"AttributeName": "Id", "KeyType": "HASH"}], + ProvisionedThroughput={"ReadCapacityUnits": 20, "WriteCapacityUnits": 20}, + ) + + table = dynamo.Table(table_name) + + table.put_item(Item={"Id": "item-id", "event_history": ["other_value"]}) + updated_item = table.update_item( + Key={"Id": "item-id"}, + UpdateExpression="SET event_history = list_append(if_not_exists(event_history, :empty_list), :new_value)", + ExpressionAttributeValues={":empty_list": [], ":new_value": ["some_value"]}, + ReturnValues="UPDATED_NEW", + ) + + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"event_history": ["other_value", "some_value"]} + ) + + table.get_item(Key={"Id": "item-id"})["Item"].should.equal( + {"Id": "item-id", "event_history": ["other_value", "some_value"]} + ) + + @mock_dynamodb2 def test_update_catches_invalid_list_append_operation(): client = boto3.client("dynamodb", region_name="us-east-1") @@ -3625,7 +4174,7 @@ def test_update_catches_invalid_list_append_operation(): ) # Update item using invalid list_append expression - with assert_raises(ParamValidationError) as ex: + with pytest.raises(ParamValidationError) as ex: client.update_item( TableName="TestTable", Key={"SHA256": {"S": "sha-of-file"}}, @@ -3634,10 +4183,8 @@ def test_update_catches_invalid_list_append_operation(): ) # Verify correct error is returned - str(ex.exception).should.match("Parameter validation failed:") - str(ex.exception).should.match( - "Invalid type for parameter ExpressionAttributeValues." - ) + str(ex.value).should.match("Parameter validation failed:") + str(ex.value).should.match("Invalid type for parameter ExpressionAttributeValues.") def _create_user_table(): @@ -3690,11 +4237,16 @@ def test_update_nested_item_if_original_value_is_none(): ) table = dynamo.Table("origin-rbu-dev") table.put_item(Item={"job_id": "a", "job_details": {"job_name": None}}) - table.update_item( + updated_item = table.update_item( Key={"job_id": "a"}, UpdateExpression="SET job_details.job_name = :output", ExpressionAttributeValues={":output": "updated"}, + ReturnValues="UPDATED_NEW", ) + + # Verify updated item is correct + updated_item["Attributes"].should.equal({"job_details": {"job_name": "updated"}}) + table.scan()["Items"][0]["job_details"]["job_name"].should.equal("updated") @@ -3710,11 +4262,16 @@ def test_allow_update_to_item_with_different_type(): table = dynamo.Table("origin-rbu-dev") table.put_item(Item={"job_id": "a", "job_details": {"job_name": {"nested": "yes"}}}) table.put_item(Item={"job_id": "b", "job_details": {"job_name": {"nested": "yes"}}}) - table.update_item( + updated_item = table.update_item( Key={"job_id": "a"}, UpdateExpression="SET job_details.job_name = :output", ExpressionAttributeValues={":output": "updated"}, + ReturnValues="UPDATED_NEW", ) + + # Verify updated item is correct + updated_item["Attributes"].should.equal({"job_details": {"job_name": "updated"}}) + table.get_item(Key={"job_id": "a"})["Item"]["job_details"][ "job_name" ].should.be.equal("updated") @@ -3734,11 +4291,1395 @@ def test_query_catches_when_no_filters(): ) table = dynamo.Table("origin-rbu-dev") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.query(TableName="original-rbu-dev") - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Either KeyConditions or QueryFilter should be present" ) + + +@mock_dynamodb2 +def test_invalid_transact_get_items(): + + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test1", + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + table = dynamodb.Table("test1") + table.put_item( + Item={"id": "1", "val": "1",} + ) + + table.put_item( + Item={"id": "1", "val": "2",} + ) + + client = boto3.client("dynamodb", region_name="us-east-1") + + with pytest.raises(ClientError) as ex: + client.transact_get_items( + TransactItems=[ + {"Get": {"Key": {"id": {"S": "1"}}, "TableName": "test1"}} + for i in range(26) + ] + ) + + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.match( + r"failed to satisfy constraint: Member must have length less than or equal to 25", + re.I, + ) + + with pytest.raises(ClientError) as ex: + client.transact_get_items( + TransactItems=[ + {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "test1",}}, + {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "non_exists_table",}}, + ] + ) + + ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal("Requested resource not found") + + +@mock_dynamodb2 +def test_valid_transact_get_items(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test1", + KeySchema=[ + {"AttributeName": "id", "KeyType": "HASH"}, + {"AttributeName": "sort_key", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "id", "AttributeType": "S"}, + {"AttributeName": "sort_key", "AttributeType": "S"}, + ], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + table1 = dynamodb.Table("test1") + table1.put_item( + Item={"id": "1", "sort_key": "1",} + ) + + table1.put_item( + Item={"id": "1", "sort_key": "2",} + ) + + dynamodb.create_table( + TableName="test2", + KeySchema=[ + {"AttributeName": "id", "KeyType": "HASH"}, + {"AttributeName": "sort_key", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "id", "AttributeType": "S"}, + {"AttributeName": "sort_key", "AttributeType": "S"}, + ], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + table2 = dynamodb.Table("test2") + table2.put_item( + Item={"id": "1", "sort_key": "1",} + ) + + client = boto3.client("dynamodb", region_name="us-east-1") + res = client.transact_get_items( + TransactItems=[ + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "non_exists_key"}, "sort_key": {"S": "2"}}, + "TableName": "test1", + } + }, + ] + ) + res["Responses"][0]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "1"}}) + len(res["Responses"]).should.equal(2) + res["Responses"][1].should.equal({}) + + res = client.transact_get_items( + TransactItems=[ + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "2"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test2", + } + }, + ] + ) + + res["Responses"][0]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "1"}}) + + res["Responses"][1]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "2"}}) + + res["Responses"][2]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "1"}}) + + res = client.transact_get_items( + TransactItems=[ + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "2"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test2", + } + }, + ], + ReturnConsumedCapacity="TOTAL", + ) + + res["ConsumedCapacity"][0].should.equal( + {"TableName": "test1", "CapacityUnits": 4.0, "ReadCapacityUnits": 4.0} + ) + + res["ConsumedCapacity"][1].should.equal( + {"TableName": "test2", "CapacityUnits": 2.0, "ReadCapacityUnits": 2.0} + ) + + res = client.transact_get_items( + TransactItems=[ + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "2"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test2", + } + }, + ], + ReturnConsumedCapacity="INDEXES", + ) + + res["ConsumedCapacity"][0].should.equal( + { + "TableName": "test1", + "CapacityUnits": 4.0, + "ReadCapacityUnits": 4.0, + "Table": {"CapacityUnits": 4.0, "ReadCapacityUnits": 4.0,}, + } + ) + + res["ConsumedCapacity"][1].should.equal( + { + "TableName": "test2", + "CapacityUnits": 2.0, + "ReadCapacityUnits": 2.0, + "Table": {"CapacityUnits": 2.0, "ReadCapacityUnits": 2.0,}, + } + ) + + +@mock_dynamodb2 +def test_gsi_verify_negative_number_order(): + table_schema = { + "KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}], + "GlobalSecondaryIndexes": [ + { + "IndexName": "GSI-K1", + "KeySchema": [ + {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, + {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, + ], + "Projection": {"ProjectionType": "KEYS_ONLY",}, + } + ], + "AttributeDefinitions": [ + {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1PartitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1SortKey", "AttributeType": "N"}, + ], + } + + item1 = { + "partitionKey": "pk-1", + "gsiK1PartitionKey": "gsi-k1", + "gsiK1SortKey": Decimal("-0.6"), + } + + item2 = { + "partitionKey": "pk-2", + "gsiK1PartitionKey": "gsi-k1", + "gsiK1SortKey": Decimal("-0.7"), + } + + item3 = { + "partitionKey": "pk-3", + "gsiK1PartitionKey": "gsi-k1", + "gsiK1SortKey": Decimal("0.7"), + } + + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + table = dynamodb.Table("test-table") + table.put_item(Item=item3) + table.put_item(Item=item1) + table.put_item(Item=item2) + + resp = table.query( + KeyConditionExpression=Key("gsiK1PartitionKey").eq("gsi-k1"), + IndexName="GSI-K1", + ) + # Items should be ordered with the lowest number first + [float(item["gsiK1SortKey"]) for item in resp["Items"]].should.equal( + [-0.7, -0.6, 0.7] + ) + + +@mock_dynamodb2 +def test_transact_write_items_put(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Put multiple items + dynamodb.transact_write_items( + TransactItems=[ + { + "Put": { + "Item": {"id": {"S": "foo{}".format(str(i))}, "foo": {"S": "bar"},}, + "TableName": "test-table", + } + } + for i in range(0, 5) + ] + ) + # Assert all are present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(5) + + +@mock_dynamodb2 +def test_transact_write_items_put_conditional_expressions(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + dynamodb.put_item( + TableName="test-table", Item={"id": {"S": "foo2"},}, + ) + # Put multiple items + with pytest.raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Put": { + "Item": { + "id": {"S": "foo{}".format(str(i))}, + "foo": {"S": "bar"}, + }, + "TableName": "test-table", + "ConditionExpression": "#i <> :i", + "ExpressionAttributeNames": {"#i": "id"}, + "ExpressionAttributeValues": { + ":i": { + "S": "foo2" + } # This item already exist, so the ConditionExpression should fail + }, + } + } + for i in range(0, 5) + ] + ) + # Assert the exception is correct + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + # Assert all are present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"id": {"S": "foo2"}}) + + +@mock_dynamodb2 +def test_transact_write_items_conditioncheck_passes(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item without email address + dynamodb.put_item( + TableName="test-table", Item={"id": {"S": "foo"},}, + ) + # Put an email address, after verifying it doesn't exist yet + dynamodb.transact_write_items( + TransactItems=[ + { + "ConditionCheck": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + } + }, + { + "Put": { + "Item": { + "id": {"S": "foo"}, + "email_address": {"S": "test@moto.com"}, + }, + "TableName": "test-table", + } + }, + ] + ) + # Assert all are present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"email_address": {"S": "test@moto.com"}, "id": {"S": "foo"}}) + + +@mock_dynamodb2 +def test_transact_write_items_conditioncheck_fails(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item with email address + dynamodb.put_item( + TableName="test-table", + Item={"id": {"S": "foo"}, "email_address": {"S": "test@moto.com"}}, + ) + # Try to put an email address, but verify whether it exists + # ConditionCheck should fail + with pytest.raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "ConditionCheck": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + } + }, + { + "Put": { + "Item": { + "id": {"S": "foo"}, + "email_address": {"S": "update@moto.com"}, + }, + "TableName": "test-table", + } + }, + ] + ) + # Assert the exception is correct + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + # Assert the original email address is still present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"email_address": {"S": "test@moto.com"}, "id": {"S": "foo"}}) + + +@mock_dynamodb2 +def test_transact_write_items_delete(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item + dynamodb.put_item( + TableName="test-table", Item={"id": {"S": "foo"},}, + ) + # Delete the item + dynamodb.transact_write_items( + TransactItems=[ + {"Delete": {"Key": {"id": {"S": "foo"}}, "TableName": "test-table",}} + ] + ) + # Assert the item is deleted + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(0) + + +@mock_dynamodb2 +def test_transact_write_items_delete_with_successful_condition_expression(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item without email address + dynamodb.put_item( + TableName="test-table", Item={"id": {"S": "foo"},}, + ) + # ConditionExpression will pass - no email address has been specified yet + dynamodb.transact_write_items( + TransactItems=[ + { + "Delete": { + "Key": {"id": {"S": "foo"},}, + "TableName": "test-table", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + } + } + ] + ) + # Assert the item is deleted + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(0) + + +@mock_dynamodb2 +def test_transact_write_items_delete_with_failed_condition_expression(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item with email address + dynamodb.put_item( + TableName="test-table", + Item={"id": {"S": "foo"}, "email_address": {"S": "test@moto.com"}}, + ) + # Try to delete an item that does not have an email address + # ConditionCheck should fail + with pytest.raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Delete": { + "Key": {"id": {"S": "foo"},}, + "TableName": "test-table", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + } + } + ] + ) + # Assert the exception is correct + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + # Assert the original item is still present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"email_address": {"S": "test@moto.com"}, "id": {"S": "foo"}}) + + +@mock_dynamodb2 +def test_transact_write_items_update(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item + dynamodb.put_item(TableName="test-table", Item={"id": {"S": "foo"}}) + # Update the item + dynamodb.transact_write_items( + TransactItems=[ + { + "Update": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "UpdateExpression": "SET #e = :v", + "ExpressionAttributeNames": {"#e": "email_address"}, + "ExpressionAttributeValues": {":v": {"S": "test@moto.com"}}, + } + } + ] + ) + # Assert the item is updated + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"id": {"S": "foo"}, "email_address": {"S": "test@moto.com"}}) + + +@mock_dynamodb2 +def test_transact_write_items_update_with_failed_condition_expression(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item with email address + dynamodb.put_item( + TableName="test-table", + Item={"id": {"S": "foo"}, "email_address": {"S": "test@moto.com"}}, + ) + # Try to update an item that does not have an email address + # ConditionCheck should fail + with pytest.raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Update": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "UpdateExpression": "SET #e = :v", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + "ExpressionAttributeValues": {":v": {"S": "update@moto.com"}}, + } + } + ] + ) + # Assert the exception is correct + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + # Assert the original item is still present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"email_address": {"S": "test@moto.com"}, "id": {"S": "foo"}}) + + +@mock_dynamodb2 +def test_dynamodb_max_1mb_limit(): + ddb = boto3.resource("dynamodb", region_name="eu-west-1") + + table_name = "populated-mock-table" + table = ddb.create_table( + TableName=table_name, + KeySchema=[ + {"AttributeName": "partition_key", "KeyType": "HASH"}, + {"AttributeName": "sort_key", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "partition_key", "AttributeType": "S"}, + {"AttributeName": "sort_key", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Populate the table + items = [ + { + "partition_key": "partition_key_val", # size=30 + "sort_key": "sort_key_value____" + str(i), # size=30 + } + for i in range(10000, 29999) + ] + with table.batch_writer() as batch: + for item in items: + batch.put_item(Item=item) + + response = table.query( + KeyConditionExpression=Key("partition_key").eq("partition_key_val") + ) + # We shouldn't get everything back - the total result set is well over 1MB + len(items).should.be.greater_than(response["Count"]) + response["LastEvaluatedKey"].shouldnt.be(None) + + +def assert_raise_syntax_error(client_error, token, near): + """ + Assert whether a client_error is as expected Syntax error. Syntax error looks like: `syntax_error_template` + + Args: + client_error(ClientError): The ClientError exception that was raised + token(str): The token that ws unexpected + near(str): The part in the expression that shows where the error occurs it generally has the preceding token the + optional separation and the problematic token. + """ + syntax_error_template = ( + 'Invalid UpdateExpression: Syntax error; token: "{token}", near: "{near}"' + ) + expected_syntax_error = syntax_error_template.format(token=token, near=near) + assert client_error.response["Error"]["Code"] == "ValidationException" + assert expected_syntax_error == client_error.response["Error"]["Message"] + + +@mock_dynamodb2 +def test_update_expression_with_numeric_literal_instead_of_value(): + """ + DynamoDB requires literals to be passed in as values. If they are put literally in the expression a token error will + be raised + """ + dynamodb = boto3.client("dynamodb", region_name="eu-west-1") + + dynamodb.create_table( + TableName="moto-test", + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + ) + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = myNum + 1", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_raise_syntax_error(e, "1", "+ 1") + + +@mock_dynamodb2 +def test_update_expression_with_multiple_set_clauses_must_be_comma_separated(): + """ + An UpdateExpression can have multiple set clauses but if they are passed in without the separating comma. + """ + dynamodb = boto3.client("dynamodb", region_name="eu-west-1") + + dynamodb.create_table( + TableName="moto-test", + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + ) + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = myNum Mystr2 myNum2", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_raise_syntax_error(e, "Mystr2", "myNum Mystr2 myNum2") + + +@mock_dynamodb2 +def test_list_tables_exclusive_start_table_name_empty(): + client = boto3.client("dynamodb", region_name="us-east-1") + + resp = client.list_tables(Limit=1, ExclusiveStartTableName="whatever") + + len(resp["TableNames"]).should.equal(0) + + +def assert_correct_client_error( + client_error, code, message_template, message_values=None, braces=None +): + """ + Assert whether a client_error is as expected. Allow for a list of values to be passed into the message + + Args: + client_error(ClientError): The ClientError exception that was raised + code(str): The code for the error (e.g. ValidationException) + message_template(str): Error message template. if message_values is not None then this template has a {values} + as placeholder. For example: + 'Value provided in ExpressionAttributeValues unused in expressions: keys: {values}' + message_values(list of str|None): The values that are passed in the error message + braces(list of str|None): List of length 2 with opening and closing brace for the values. By default it will be + surrounded by curly brackets + """ + braces = braces or ["{", "}"] + assert client_error.response["Error"]["Code"] == code + if message_values is not None: + values_string = "{open_brace}(?P.*){close_brace}".format( + open_brace=braces[0], close_brace=braces[1] + ) + re_msg = re.compile(message_template.format(values=values_string)) + match_result = re_msg.match(client_error.response["Error"]["Message"]) + assert match_result is not None + values_string = match_result.groupdict()["values"] + values = [key for key in values_string.split(", ")] + assert len(message_values) == len(values) + for value in message_values: + assert value in values + else: + assert client_error.response["Error"]["Message"] == message_template + + +def create_simple_table_and_return_client(): + dynamodb = boto3.client("dynamodb", region_name="eu-west-1") + dynamodb.create_table( + TableName="moto-test", + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"},], + ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, + ) + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "myNum": {"N": "1"}, "MyStr": {"S": "1"},}, + ) + return dynamodb + + +# https://github.com/spulec/moto/issues/2806 +# https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html +# #DDB-UpdateItem-request-UpdateExpression +@mock_dynamodb2 +def test_update_item_with_attribute_in_right_hand_side_and_operation(): + dynamodb = create_simple_table_and_return_client() + + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET myNum = myNum+:val", + ExpressionAttributeValues={":val": {"N": "3"}}, + ) + + result = dynamodb.get_item(TableName="moto-test", Key={"id": {"S": "1"}}) + assert result["Item"]["myNum"]["N"] == "4" + + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET myNum = myNum - :val", + ExpressionAttributeValues={":val": {"N": "1"}}, + ) + result = dynamodb.get_item(TableName="moto-test", Key={"id": {"S": "1"}}) + assert result["Item"]["myNum"]["N"] == "3" + + +@mock_dynamodb2 +def test_non_existing_attribute_should_raise_exception(): + """ + Does error message get correctly raised if attribute is referenced but it does not exist for the item. + """ + dynamodb = create_simple_table_and_return_client() + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = no_attr + MyStr", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_correct_client_error( + e, + "ValidationException", + "The provided expression refers to an attribute that does not exist in the item", + ) + + +@mock_dynamodb2 +def test_update_expression_with_plus_in_attribute_name(): + """ + Does error message get correctly raised if attribute contains a plus and is passed in without an AttributeName. And + lhs & rhs are not attribute IDs by themselve. + """ + dynamodb = create_simple_table_and_return_client() + + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "my+Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + ) + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = my+Num", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_correct_client_error( + e, + "ValidationException", + "The provided expression refers to an attribute that does not exist in the item", + ) + + +@mock_dynamodb2 +def test_update_expression_with_minus_in_attribute_name(): + """ + Does error message get correctly raised if attribute contains a minus and is passed in without an AttributeName. And + lhs & rhs are not attribute IDs by themselve. + """ + dynamodb = create_simple_table_and_return_client() + + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "my-Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + ) + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = my-Num", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_correct_client_error( + e, + "ValidationException", + "The provided expression refers to an attribute that does not exist in the item", + ) + + +@mock_dynamodb2 +def test_update_expression_with_space_in_attribute_name(): + """ + Does error message get correctly raised if attribute contains a space and is passed in without an AttributeName. And + lhs & rhs are not attribute IDs by themselves. + """ + dynamodb = create_simple_table_and_return_client() + + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "my Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + ) + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = my Num", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_raise_syntax_error(e, "Num", "my Num") + + +@mock_dynamodb2 +def test_summing_up_2_strings_raises_exception(): + """ + Update set supports different DynamoDB types but some operations are not supported. For example summing up 2 strings + raises an exception. It results in ClientError with code ValidationException: + Saying An operand in the update expression has an incorrect data type + """ + dynamodb = create_simple_table_and_return_client() + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = MyStr + MyStr", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_correct_client_error( + e, + "ValidationException", + "An operand in the update expression has an incorrect data type", + ) + + +# https://github.com/spulec/moto/issues/2806 +@mock_dynamodb2 +def test_update_item_with_attribute_in_right_hand_side(): + """ + After tokenization and building expression make sure referenced attributes are replaced with their current value + """ + dynamodb = create_simple_table_and_return_client() + + # Make sure there are 2 values + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "myVal1": {"S": "Value1"}, "myVal2": {"S": "Value2"}}, + ) + + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET myVal1 = myVal2", + ) + + result = dynamodb.get_item(TableName="moto-test", Key={"id": {"S": "1"}}) + assert result["Item"]["myVal1"]["S"] == result["Item"]["myVal2"]["S"] == "Value2" + + +@mock_dynamodb2 +def test_multiple_updates(): + dynamodb = create_simple_table_and_return_client() + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "myNum": {"N": "1"}, "path": {"N": "6"}}, + ) + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET myNum = #p + :val, newAttr = myNum", + ExpressionAttributeValues={":val": {"N": "1"}}, + ExpressionAttributeNames={"#p": "path"}, + ) + result = dynamodb.get_item(TableName="moto-test", Key={"id": {"S": "1"}})["Item"] + expected_result = { + "myNum": {"N": "7"}, + "newAttr": {"N": "1"}, + "path": {"N": "6"}, + "id": {"S": "1"}, + } + assert result == expected_result + + +@mock_dynamodb2 +def test_update_item_atomic_counter(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-3") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + + key = {"t_id": {"S": "item1"}} + + ddb_mock.put_item( + TableName=table, + Item={"t_id": {"S": "item1"}, "n_i": {"N": "5"}, "n_f": {"N": "5.3"}}, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="set n_i = n_i + :inc1, n_f = n_f + :inc2", + ExpressionAttributeValues={":inc1": {"N": "1.2"}, ":inc2": {"N": "0.05"}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + updated_item["n_i"]["N"].should.equal("6.2") + updated_item["n_f"]["N"].should.equal("5.35") + + +@mock_dynamodb2 +def test_update_item_atomic_counter_return_values(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-3") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + + key = {"t_id": {"S": "item1"}} + + ddb_mock.put_item(TableName=table, Item={"t_id": {"S": "item1"}, "v": {"N": "5"}}) + + response = ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="set v = v + :inc", + ExpressionAttributeValues={":inc": {"N": "1"}}, + ReturnValues="UPDATED_OLD", + ) + assert ( + "v" in response["Attributes"] + ), "v has been updated, and should be returned here" + response["Attributes"]["v"]["N"].should.equal("5") + + # second update + response = ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="set v = v + :inc", + ExpressionAttributeValues={":inc": {"N": "1"}}, + ReturnValues="UPDATED_OLD", + ) + assert ( + "v" in response["Attributes"] + ), "v has been updated, and should be returned here" + response["Attributes"]["v"]["N"].should.equal("6") + + # third update + response = ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="set v = v + :inc", + ExpressionAttributeValues={":inc": {"N": "1"}}, + ReturnValues="UPDATED_NEW", + ) + assert ( + "v" in response["Attributes"] + ), "v has been updated, and should be returned here" + response["Attributes"]["v"]["N"].should.equal("8") + + +@mock_dynamodb2 +def test_update_item_atomic_counter_from_zero(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + + key = {"t_id": {"S": "item1"}} + + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add n_i :inc1, n_f :inc2", + ExpressionAttributeValues={":inc1": {"N": "1.2"}, ":inc2": {"N": "-0.5"}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["n_i"]["N"] == "1.2" + assert updated_item["n_f"]["N"] == "-0.5" + + +@mock_dynamodb2 +def test_update_item_add_to_non_existent_set(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + key = {"t_id": {"S": "item1"}} + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add s_i :s1", + ExpressionAttributeValues={":s1": {"SS": ["hello"]}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["s_i"]["SS"] == ["hello"] + + +@mock_dynamodb2 +def test_update_item_add_to_non_existent_number_set(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + key = {"t_id": {"S": "item1"}} + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add s_i :s1", + ExpressionAttributeValues={":s1": {"NS": ["3"]}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["s_i"]["NS"] == ["3"] + + +@mock_dynamodb2 +def test_transact_write_items_fails_with_transaction_canceled_exception(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert one item + dynamodb.put_item(TableName="test-table", Item={"id": {"S": "foo"}}) + # Update two items, the one that exists and another that doesn't + with pytest.raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Update": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "UpdateExpression": "SET #k = :v", + "ConditionExpression": "attribute_exists(id)", + "ExpressionAttributeNames": {"#k": "key"}, + "ExpressionAttributeValues": {":v": {"S": "value"}}, + } + }, + { + "Update": { + "Key": {"id": {"S": "doesnotexist"}}, + "TableName": "test-table", + "UpdateExpression": "SET #e = :v", + "ConditionExpression": "attribute_exists(id)", + "ExpressionAttributeNames": {"#e": "key"}, + "ExpressionAttributeValues": {":v": {"S": "value"}}, + } + }, + ] + ) + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "Transaction cancelled, please refer cancellation reasons for specific reasons [None, ConditionalCheckFailed]" + ) + + +@mock_dynamodb2 +def test_gsi_projection_type_keys_only(): + table_schema = { + "KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}], + "GlobalSecondaryIndexes": [ + { + "IndexName": "GSI-K1", + "KeySchema": [ + {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, + {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, + ], + "Projection": {"ProjectionType": "KEYS_ONLY",}, + } + ], + "AttributeDefinitions": [ + {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1PartitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1SortKey", "AttributeType": "S"}, + ], + } + + item = { + "partitionKey": "pk-1", + "gsiK1PartitionKey": "gsi-pk", + "gsiK1SortKey": "gsi-sk", + "someAttribute": "lore ipsum", + } + + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + table = dynamodb.Table("test-table") + table.put_item(Item=item) + + items = table.query( + KeyConditionExpression=Key("gsiK1PartitionKey").eq("gsi-pk"), + IndexName="GSI-K1", + )["Items"] + items.should.have.length_of(1) + # Item should only include GSI Keys and Table Keys, as per the ProjectionType + items[0].should.equal( + { + "gsiK1PartitionKey": "gsi-pk", + "gsiK1SortKey": "gsi-sk", + "partitionKey": "pk-1", + } + ) + + +@mock_dynamodb2 +def test_gsi_projection_type_include(): + table_schema = { + "KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}], + "GlobalSecondaryIndexes": [ + { + "IndexName": "GSI-INC", + "KeySchema": [ + {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, + {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, + ], + "Projection": { + "ProjectionType": "INCLUDE", + "NonKeyAttributes": ["projectedAttribute"], + }, + } + ], + "AttributeDefinitions": [ + {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1PartitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1SortKey", "AttributeType": "S"}, + ], + } + + item = { + "partitionKey": "pk-1", + "gsiK1PartitionKey": "gsi-pk", + "gsiK1SortKey": "gsi-sk", + "projectedAttribute": "lore ipsum", + "nonProjectedAttribute": "dolor sit amet", + } + + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + table = dynamodb.Table("test-table") + table.put_item(Item=item) + + items = table.query( + KeyConditionExpression=Key("gsiK1PartitionKey").eq("gsi-pk"), + IndexName="GSI-INC", + )["Items"] + items.should.have.length_of(1) + # Item should only include keys and additionally projected attributes only + items[0].should.equal( + { + "gsiK1PartitionKey": "gsi-pk", + "gsiK1SortKey": "gsi-sk", + "partitionKey": "pk-1", + "projectedAttribute": "lore ipsum", + } + ) + + +@mock_dynamodb2 +def test_lsi_projection_type_keys_only(): + table_schema = { + "KeySchema": [ + {"AttributeName": "partitionKey", "KeyType": "HASH"}, + {"AttributeName": "sortKey", "KeyType": "RANGE"}, + ], + "LocalSecondaryIndexes": [ + { + "IndexName": "LSI", + "KeySchema": [ + {"AttributeName": "partitionKey", "KeyType": "HASH"}, + {"AttributeName": "lsiK1SortKey", "KeyType": "RANGE"}, + ], + "Projection": {"ProjectionType": "KEYS_ONLY",}, + } + ], + "AttributeDefinitions": [ + {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "sortKey", "AttributeType": "S"}, + {"AttributeName": "lsiK1SortKey", "AttributeType": "S"}, + ], + } + + item = { + "partitionKey": "pk-1", + "sortKey": "sk-1", + "lsiK1SortKey": "lsi-sk", + "someAttribute": "lore ipsum", + } + + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + table = dynamodb.Table("test-table") + table.put_item(Item=item) + + items = table.query( + KeyConditionExpression=Key("partitionKey").eq("pk-1"), IndexName="LSI", + )["Items"] + items.should.have.length_of(1) + # Item should only include GSI Keys and Table Keys, as per the ProjectionType + items[0].should.equal( + {"partitionKey": "pk-1", "sortKey": "sk-1", "lsiK1SortKey": "lsi-sk"} + ) + + +@mock_dynamodb2 +def test_set_attribute_is_dropped_if_empty_after_update_expression(): + table_name, item_key, set_item = "test-table", "test-id", "test-data" + client = boto3.client("dynamodb", region_name="us-east-1") + client.create_table( + TableName=table_name, + KeySchema=[{"AttributeName": "customer", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "customer", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + client.update_item( + TableName=table_name, + Key={"customer": {"S": item_key}}, + UpdateExpression="ADD orders :order", + ExpressionAttributeValues={":order": {"SS": [set_item]}}, + ) + resp = client.scan(TableName=table_name, ProjectionExpression="customer, orders") + item = resp["Items"][0] + item.should.have.key("customer") + item.should.have.key("orders") + + client.update_item( + TableName=table_name, + Key={"customer": {"S": item_key}}, + UpdateExpression="DELETE orders :order", + ExpressionAttributeValues={":order": {"SS": [set_item]}}, + ) + resp = client.scan(TableName=table_name, ProjectionExpression="customer, orders") + item = resp["Items"][0] + item.should.have.key("customer") + item.should_not.have.key("orders") + + +@mock_dynamodb2 +def test_transact_get_items_should_return_empty_map_for_non_existent_item(): + client = boto3.client("dynamodb", region_name="us-west-2") + table_name = "test-table" + key_schema = [{"AttributeName": "id", "KeyType": "HASH"}] + attribute_definitions = [{"AttributeName": "id", "AttributeType": "S"}] + client.create_table( + TableName=table_name, + KeySchema=key_schema, + AttributeDefinitions=attribute_definitions, + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + item = {"id": {"S": "1"}} + client.put_item(TableName=table_name, Item=item) + items = client.transact_get_items( + TransactItems=[ + {"Get": {"Key": {"id": {"S": "1"}}, "TableName": table_name}}, + {"Get": {"Key": {"id": {"S": "2"}}, "TableName": table_name}}, + ] + ).get("Responses", []) + items.should.have.length_of(2) + items[0].should.equal({"Item": item}) + items[1].should.equal({}) diff --git a/tests/test_dynamodb2/test_dynamodb_executor.py b/tests/test_dynamodb2/test_dynamodb_executor.py new file mode 100644 index 000000000..577a5bae0 --- /dev/null +++ b/tests/test_dynamodb2/test_dynamodb_executor.py @@ -0,0 +1,466 @@ +import pytest + +from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType +from moto.dynamodb2.models import Item, DynamoType +from moto.dynamodb2.parsing.executors import UpdateExpressionExecutor +from moto.dynamodb2.parsing.expressions import UpdateExpressionParser +from moto.dynamodb2.parsing.validators import UpdateExpressionValidator + + +def test_execution_of_if_not_exists_not_existing_value(table): + update_expression = "SET a = if_not_exists(b, a)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}}, + ) + assert expected_item == item + + +def test_execution_of_if_not_exists_with_existing_attribute_should_return_attribute( + table, +): + update_expression = "SET a = if_not_exists(b, a)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}, "b": {"S": "B"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "B"}, "b": {"S": "B"}}, + ) + assert expected_item == item + + +def test_execution_of_if_not_exists_with_existing_attribute_should_return_value(table): + update_expression = "SET a = if_not_exists(b, :val)" + update_expression_values = {":val": {"N": "4"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "3"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "3"}, "a": {"N": "3"}}, + ) + assert expected_item == item + + +def test_execution_of_if_not_exists_with_non_existing_attribute_should_return_value( + table, +): + update_expression = "SET a = if_not_exists(b, :val)" + update_expression_values = {":val": {"N": "4"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "4"}}, + ) + assert expected_item == item + + +def test_execution_of_sum_operation(table): + update_expression = "SET a = a + b" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "7"}, "b": {"N": "4"}}, + ) + assert expected_item == item + + +def test_execution_of_remove(table): + update_expression = "Remove a" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "4"}}, + ) + assert expected_item == item + + +def test_execution_of_remove_in_map(table): + update_expression = "Remove itemmap.itemlist[1].foo11" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "itemmap": { + "M": { + "itemlist": { + "L": [ + {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, + {"M": {"foo10": {"S": "bar1"}, "foo11": {"S": "bar2"}}}, + ] + } + } + }, + }, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "itemmap": { + "M": { + "itemlist": { + "L": [ + {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, + {"M": {"foo10": {"S": "bar1"},}}, + ] + } + } + }, + }, + ) + assert expected_item == item + + +def test_execution_of_remove_in_list(table): + update_expression = "Remove itemmap.itemlist[1]" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "itemmap": { + "M": { + "itemlist": { + "L": [ + {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, + {"M": {"foo10": {"S": "bar1"}, "foo11": {"S": "bar2"}}}, + ] + } + } + }, + }, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "itemmap": { + "M": { + "itemlist": { + "L": [{"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}},] + } + } + }, + }, + ) + assert expected_item == item + + +def test_execution_of_delete_element_from_set(table): + update_expression = "delete s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"SS": ["value2", "value5"]}}, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value3"]},}, + ) + assert expected_item == item + + +def test_execution_of_add_number(table): + update_expression = "add s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"N": "5"},}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"N": "10"}}, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"N": "15"}}, + ) + assert expected_item == item + + +def test_execution_of_add_set_to_a_number(table): + update_expression = "add s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"N": "5"},}, + ) + try: + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"SS": ["s1"]}}, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"N": "15"}}, + ) + assert expected_item == item + assert False + except IncorrectDataType: + assert True + + +def test_execution_of_add_to_a_set(table): + update_expression = "ADD s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"SS": ["value2", "value5"]}}, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "s": {"SS": ["value1", "value2", "value3", "value5"]}, + }, + ) + assert expected_item == item + + +@pytest.mark.parametrize( + "expression_attribute_values,unexpected_data_type", + [ + ({":value": {"S": "10"}}, "STRING",), + ({":value": {"N": "10"}}, "NUMBER",), + ({":value": {"B": "10"}}, "BINARY",), + ({":value": {"BOOL": True}}, "BOOLEAN",), + ({":value": {"NULL": True}}, "NULL",), + ({":value": {"M": {"el0": {"S": "10"}}}}, "MAP",), + ({":value": {"L": []}}, "LIST",), + ], +) +def test_execution_of__delete_element_from_set_invalid_value( + expression_attribute_values, unexpected_data_type, table +): + """A delete statement must use a value of type SS in order to delete elements from a set.""" + update_expression = "delete s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + ) + try: + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=expression_attribute_values, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + assert False, "Must raise exception" + except IncorrectOperandType as e: + assert e.operator_or_function == "operator: DELETE" + assert e.operand_type == unexpected_data_type + + +def test_execution_of_delete_element_from_a_string_attribute(table): + """A delete statement must use a value of type SS in order to delete elements from a set.""" + update_expression = "delete s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"S": "5"},}, + ) + try: + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"SS": ["value2"]}}, + item=item, + table=table, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + assert False, "Must raise exception" + except IncorrectDataType: + assert True diff --git a/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py b/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py new file mode 100644 index 000000000..ddfb81d1a --- /dev/null +++ b/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py @@ -0,0 +1,271 @@ +from moto.dynamodb2.exceptions import ( + InvalidTokenException, + InvalidExpressionAttributeNameKey, +) +from moto.dynamodb2.parsing.tokens import ExpressionTokenizer, Token + + +def test_expression_tokenizer_single_set_action(): + set_action = "SET attrName = :attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + ] + + +def test_expression_tokenizer_single_set_action_leading_space(): + set_action = "Set attrName = :attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "Set"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + ] + + +def test_expression_tokenizer_single_set_action_attribute_name_leading_space(): + set_action = "SET #a = :attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_NAME, "#a"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + ] + + +def test_expression_tokenizer_single_set_action_trailing_space(): + set_action = "SET attrName = :attrValue " + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + Token(Token.WHITESPACE, " "), + ] + + +def test_expression_tokenizer_single_set_action_multi_spaces(): + set_action = "SET attrName = :attrValue " + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + Token(Token.WHITESPACE, " "), + ] + + +def test_expression_tokenizer_single_set_action_with_numbers_in_identifiers(): + set_action = "SET attrName3 = :attr3Value" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName3"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attr3Value"), + ] + + +def test_expression_tokenizer_single_set_action_with_underscore_in_identifier(): + set_action = "SET attr_Name = :attr_Value" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attr_Name"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attr_Value"), + ] + + +def test_expression_tokenizer_leading_underscore_in_attribute_name_expression(): + """Leading underscore is not allowed for an attribute name""" + set_action = "SET attrName = _idid" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "_" + assert te.near == "= _idid" + + +def test_expression_tokenizer_leading_underscore_in_attribute_value_expression(): + """Leading underscore is allowed in an attribute value""" + set_action = "SET attrName = :_attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":_attrValue"), + ] + + +def test_expression_tokenizer_single_set_action_nested_attribute(): + set_action = "SET attrName.elem = :attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.DOT, "."), + Token(Token.ATTRIBUTE, "elem"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + ] + + +def test_expression_tokenizer_list_index_with_sub_attribute(): + set_action = "SET itemmap.itemlist[1].foos=:Item" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "itemmap"), + Token(Token.DOT, "."), + Token(Token.ATTRIBUTE, "itemlist"), + Token(Token.OPEN_SQUARE_BRACKET, "["), + Token(Token.NUMBER, "1"), + Token(Token.CLOSE_SQUARE_BRACKET, "]"), + Token(Token.DOT, "."), + Token(Token.ATTRIBUTE, "foos"), + Token(Token.EQUAL_SIGN, "="), + Token(Token.ATTRIBUTE_VALUE, ":Item"), + ] + + +def test_expression_tokenizer_list_index_surrounded_with_whitespace(): + set_action = "SET itemlist[ 1 ]=:Item" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "itemlist"), + Token(Token.OPEN_SQUARE_BRACKET, "["), + Token(Token.WHITESPACE, " "), + Token(Token.NUMBER, "1"), + Token(Token.WHITESPACE, " "), + Token(Token.CLOSE_SQUARE_BRACKET, "]"), + Token(Token.EQUAL_SIGN, "="), + Token(Token.ATTRIBUTE_VALUE, ":Item"), + ] + + +def test_expression_tokenizer_single_set_action_attribute_name_invalid_key(): + """ + ExpressionAttributeNames contains invalid key: Syntax error; key: "#va#l2" + """ + set_action = "SET #va#l2 = 3" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidExpressionAttributeNameKey as e: + assert e.key == "#va#l2" + + +def test_expression_tokenizer_single_set_action_attribute_name_invalid_key_double_hash(): + """ + ExpressionAttributeNames contains invalid key: Syntax error; key: "#va#l" + """ + set_action = "SET #va#l = 3" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidExpressionAttributeNameKey as e: + assert e.key == "#va#l" + + +def test_expression_tokenizer_single_set_action_attribute_name_valid_key(): + set_action = "SET attr=#val2" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attr"), + Token(Token.EQUAL_SIGN, "="), + Token(Token.ATTRIBUTE_NAME, "#val2"), + ] + + +def test_expression_tokenizer_single_set_action_attribute_name_leading_number(): + set_action = "SET attr=#0" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attr"), + Token(Token.EQUAL_SIGN, "="), + Token(Token.ATTRIBUTE_NAME, "#0"), + ] + + +def test_expression_tokenizer_just_a_pipe(): + set_action = "|" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "|" + assert te.near == "|" + + +def test_expression_tokenizer_just_a_pipe_with_leading_white_spaces(): + set_action = " |" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "|" + assert te.near == " |" + + +def test_expression_tokenizer_just_a_pipe_for_set_expression(): + set_action = "SET|" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "|" + assert te.near == "SET|" + + +def test_expression_tokenizer_just_an_attribute_and_a_pipe_for_set_expression(): + set_action = "SET a|" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "|" + assert te.near == "a|" diff --git a/tests/test_dynamodb2/test_dynamodb_expressions.py b/tests/test_dynamodb2/test_dynamodb_expressions.py new file mode 100644 index 000000000..2c82d8bc4 --- /dev/null +++ b/tests/test_dynamodb2/test_dynamodb_expressions.py @@ -0,0 +1,405 @@ +from moto.dynamodb2.exceptions import InvalidTokenException +from moto.dynamodb2.parsing.expressions import UpdateExpressionParser +from moto.dynamodb2.parsing.reserved_keywords import ReservedKeywords + + +def test_get_reserved_keywords(): + reserved_keywords = ReservedKeywords.get_reserved_keywords() + assert "SET" in reserved_keywords + assert "DELETE" in reserved_keywords + assert "ADD" in reserved_keywords + # REMOVE is not part of the list of reserved keywords. + assert "REMOVE" not in reserved_keywords + + +def test_update_expression_numeric_literal_in_expression(): + set_action = "SET attrName = 3" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "3" + assert te.near == "= 3" + + +def test_expression_tokenizer_multi_number_numeric_literal_in_expression(): + set_action = "SET attrName = 34" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "34" + assert te.near == "= 34" + + +def test_expression_tokenizer_numeric_literal_unclosed_square_bracket(): + set_action = "SET MyStr[ 3" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == "3" + + +def test_expression_tokenizer_wrong_closing_bracket_with_space(): + set_action = "SET MyStr[3 )" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "3 )" + + +def test_expression_tokenizer_wrong_closing_bracket(): + set_action = "SET MyStr[3)" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "3)" + + +def test_expression_tokenizer_only_numeric_literal_for_set(): + set_action = "SET 2" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "2" + assert te.near == "SET 2" + + +def test_expression_tokenizer_only_numeric_literal(): + set_action = "2" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "2" + assert te.near == "2" + + +def test_expression_tokenizer_set_closing_round_bracket(): + set_action = "SET )" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "SET )" + + +def test_expression_tokenizer_set_closing_followed_by_numeric_literal(): + set_action = "SET ) 3" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "SET ) 3" + + +def test_expression_tokenizer_numeric_literal_unclosed_square_bracket_trailing_space(): + set_action = "SET MyStr[ 3 " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == "3 " + + +def test_expression_tokenizer_unbalanced_round_brackets_only_opening(): + set_action = "SET MyStr = (:_val" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == ":_val" + + +def test_expression_tokenizer_unbalanced_round_brackets_only_opening_trailing_space(): + set_action = "SET MyStr = (:_val " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == ":_val " + + +def test_expression_tokenizer_unbalanced_square_brackets_only_opening(): + set_action = "SET MyStr = [:_val" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "[" + assert te.near == "= [:_val" + + +def test_expression_tokenizer_unbalanced_square_brackets_only_opening_trailing_spaces(): + set_action = "SET MyStr = [:_val " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "[" + assert te.near == "= [:_val" + + +def test_expression_tokenizer_unbalanced_round_brackets_multiple_opening(): + set_action = "SET MyStr = (:_val + (:val2" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == ":val2" + + +def test_expression_tokenizer_unbalanced_round_brackets_only_closing(): + set_action = "SET MyStr = ):_val" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "= ):_val" + + +def test_expression_tokenizer_unbalanced_square_brackets_only_closing(): + set_action = "SET MyStr = ]:_val" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "]" + assert te.near == "= ]:_val" + + +def test_expression_tokenizer_unbalanced_round_brackets_only_closing_followed_by_other_parts(): + set_action = "SET MyStr = ):_val + :val2" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "= ):_val" + + +def test_update_expression_starts_with_keyword_reset_followed_by_identifier(): + update_expression = "RESET NonExistent" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "RESET" + assert te.near == "RESET NonExistent" + + +def test_update_expression_starts_with_keyword_reset_followed_by_identifier_and_value(): + update_expression = "RESET NonExistent value" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "RESET" + assert te.near == "RESET NonExistent" + + +def test_update_expression_starts_with_leading_spaces_and_keyword_reset_followed_by_identifier_and_value(): + update_expression = " RESET NonExistent value" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "RESET" + assert te.near == " RESET NonExistent" + + +def test_update_expression_with_only_keyword_reset(): + update_expression = "RESET" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "RESET" + assert te.near == "RESET" + + +def test_update_nested_expression_with_selector_just_should_fail_parsing_at_numeric_literal_value(): + update_expression = "SET a[0].b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_update_nested_expression_with_selector_and_spaces_should_only_fail_parsing_at_numeric_literal_value(): + update_expression = "SET a [ 2 ]. b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_update_nested_expression_with_double_selector_and_spaces_should_only_fail_parsing_at_numeric_literal_value(): + update_expression = "SET a [2][ 3 ]. b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_update_nested_expression_should_only_fail_parsing_at_numeric_literal_value(): + update_expression = "SET a . b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_nested_selectors_in_update_expression_should_fail_at_nesting(): + update_expression = "SET a [ [2] ]. b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "[" + assert te.near == "[ [2" + + +def test_update_expression_number_in_selector_cannot_be_splite(): + update_expression = "SET a [2 1]. b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "1" + assert te.near == "2 1]" + + +def test_update_expression_cannot_have_successive_attributes(): + update_expression = "SET #a a = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "a" + assert te.near == "#a a =" + + +def test_update_expression_path_with_both_attribute_and_attribute_name_should_only_fail_at_numeric_value(): + update_expression = "SET #a.a = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_expression_tokenizer_2_same_operators_back_to_back(): + set_action = "SET MyStr = NoExist + + :_val " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "+" + assert te.near == "+ + :_val" + + +def test_expression_tokenizer_2_different_operators_back_to_back(): + set_action = "SET MyStr = NoExist + - :_val " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "-" + assert te.near == "+ - :_val" + + +def test_update_expression_remove_does_not_allow_operations(): + remove_action = "REMOVE NoExist + " + try: + UpdateExpressionParser.make(remove_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "+" + assert te.near == "NoExist + " + + +def test_update_expression_add_does_not_allow_attribute_after_path(): + """value here is not really a value since a value starts with a colon (:)""" + add_expr = "ADD attr val foobar" + try: + UpdateExpressionParser.make(add_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "val" + assert te.near == "attr val foobar" + + +def test_update_expression_add_does_not_allow_attribute_foobar_after_value(): + add_expr = "ADD attr :val foobar" + try: + UpdateExpressionParser.make(add_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "foobar" + assert te.near == ":val foobar" + + +def test_update_expression_delete_does_not_allow_attribute_after_path(): + """value here is not really a value since a value starts with a colon (:)""" + delete_expr = "DELETE attr val" + try: + UpdateExpressionParser.make(delete_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "val" + assert te.near == "attr val" + + +def test_update_expression_delete_does_not_allow_attribute_foobar_after_value(): + delete_expr = "DELETE attr :val foobar" + try: + UpdateExpressionParser.make(delete_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "foobar" + assert te.near == ":val foobar" + + +def test_update_expression_parsing_is_not_keyword_aware(): + """path and VALUE are keywords. Yet a token error will be thrown for the numeric literal 1.""" + delete_expr = "SET path = VALUE 1" + try: + UpdateExpressionParser.make(delete_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "1" + assert te.near == "VALUE 1" + + +def test_expression_if_not_exists_is_not_valid_in_remove_statement(): + set_action = "REMOVE if_not_exists(a,b)" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "(" + assert te.near == "if_not_exists(a" diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 7c7770874..1c8c12110 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -8,6 +8,8 @@ from boto3.dynamodb.conditions import Key from botocore.exceptions import ClientError import sure # noqa from freezegun import freeze_time +import pytest + from moto import mock_dynamodb2, mock_dynamodb2_deprecated from boto.exception import JSONResponseError from tests.helpers import requires_boto_gte @@ -574,6 +576,7 @@ def test_create_with_global_indexes(): "ReadCapacityUnits": 6, "WriteCapacityUnits": 1, }, + "IndexStatus": "ACTIVE", } ] ) @@ -928,6 +931,83 @@ boto3 """ +@mock_dynamodb2 +def test_boto3_create_table_with_gsi(): + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + + table = dynamodb.create_table( + TableName="users", + KeySchema=[ + {"AttributeName": "forum_name", "KeyType": "HASH"}, + {"AttributeName": "subject", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "forum_name", "AttributeType": "S"}, + {"AttributeName": "subject", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + GlobalSecondaryIndexes=[ + { + "IndexName": "test_gsi", + "KeySchema": [{"AttributeName": "subject", "KeyType": "HASH"}], + "Projection": {"ProjectionType": "ALL"}, + } + ], + ) + table["TableDescription"]["GlobalSecondaryIndexes"].should.equal( + [ + { + "KeySchema": [{"KeyType": "HASH", "AttributeName": "subject"}], + "IndexName": "test_gsi", + "Projection": {"ProjectionType": "ALL"}, + "IndexStatus": "ACTIVE", + "ProvisionedThroughput": { + "ReadCapacityUnits": 0, + "WriteCapacityUnits": 0, + }, + } + ] + ) + + table = dynamodb.create_table( + TableName="users2", + KeySchema=[ + {"AttributeName": "forum_name", "KeyType": "HASH"}, + {"AttributeName": "subject", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "forum_name", "AttributeType": "S"}, + {"AttributeName": "subject", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + GlobalSecondaryIndexes=[ + { + "IndexName": "test_gsi", + "KeySchema": [{"AttributeName": "subject", "KeyType": "HASH"}], + "Projection": {"ProjectionType": "ALL"}, + "ProvisionedThroughput": { + "ReadCapacityUnits": 3, + "WriteCapacityUnits": 5, + }, + } + ], + ) + table["TableDescription"]["GlobalSecondaryIndexes"].should.equal( + [ + { + "KeySchema": [{"KeyType": "HASH", "AttributeName": "subject"}], + "IndexName": "test_gsi", + "Projection": {"ProjectionType": "ALL"}, + "IndexStatus": "ACTIVE", + "ProvisionedThroughput": { + "ReadCapacityUnits": 3, + "WriteCapacityUnits": 5, + }, + } + ] + ) + + @mock_dynamodb2 def test_boto3_conditions(): dynamodb = boto3.resource("dynamodb", region_name="us-east-1") @@ -1253,14 +1333,31 @@ def test_update_item_with_expression(): item_key = {"forum_name": "the-key", "subject": "123"} - table.update_item(Key=item_key, UpdateExpression="SET field=2") + table.update_item( + Key=item_key, + UpdateExpression="SET field = :field_value", + ExpressionAttributeValues={":field_value": 2}, + ) dict(table.get_item(Key=item_key)["Item"]).should.equal( - {"field": "2", "forum_name": "the-key", "subject": "123"} + {"field": Decimal("2"), "forum_name": "the-key", "subject": "123"} ) - table.update_item(Key=item_key, UpdateExpression="SET field = 3") + table.update_item( + Key=item_key, + UpdateExpression="SET field = :field_value", + ExpressionAttributeValues={":field_value": 3}, + ) dict(table.get_item(Key=item_key)["Item"]).should.equal( - {"field": "3", "forum_name": "the-key", "subject": "123"} + {"field": Decimal("3"), "forum_name": "the-key", "subject": "123"} + ) + + +def assert_failure_due_to_key_not_in_schema(func, **kwargs): + with pytest.raises(ClientError) as ex: + func(**kwargs) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( + "The provided key element does not match the schema" ) @@ -1287,17 +1384,16 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": {"item4"}}, ) current_item["str_set"] = current_item["str_set"].union({"item4"}) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a string value to a non-existing set - # Should just create the set in the background table.update_item( Key=item_key, UpdateExpression="ADD non_existing_str_set :v", ExpressionAttributeValues={":v": {"item4"}}, ) current_item["non_existing_str_set"] = {"item4"} - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a num value to a num set table.update_item( @@ -1306,7 +1402,7 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": {6}}, ) current_item["num_set"] = current_item["num_set"].union({6}) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a value to a number value table.update_item( @@ -1315,7 +1411,7 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": 20}, ) current_item["num_val"] = current_item["num_val"] + 20 - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to add a number value to a string set, should raise Client Error table.update_item.when.called_with( @@ -1323,7 +1419,7 @@ def test_update_item_add_with_expression(): UpdateExpression="ADD str_set :v", ExpressionAttributeValues={":v": 20}, ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to add a number set to the string set, should raise a ClientError table.update_item.when.called_with( @@ -1331,7 +1427,7 @@ def test_update_item_add_with_expression(): UpdateExpression="ADD str_set :v", ExpressionAttributeValues={":v": {20}}, ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to update with a bad expression table.update_item.when.called_with( @@ -1369,10 +1465,10 @@ def test_update_item_add_with_nested_sets(): current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union( {"item4"} ) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a string value to a non-existing set - # Should just create the set in the background + # Should raise table.update_item( Key=item_key, UpdateExpression="ADD #ns.#ne :v", @@ -1380,7 +1476,7 @@ def test_update_item_add_with_nested_sets(): ExpressionAttributeValues={":v": {"new_item"}}, ) current_item["nested"]["non_existing_str_set"] = {"new_item"} - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item @mock_dynamodb2 diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 08d7724f8..b5cc01c84 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -443,23 +443,40 @@ def test_update_item_nested_remove(): dict(returned_item).should.equal({"username": "steve", "Meta": {}}) -@mock_dynamodb2_deprecated +@mock_dynamodb2 def test_update_item_double_nested_remove(): - conn = boto.dynamodb2.connect_to_region("us-east-1") - table = Table.create("messages", schema=[HashKey("username")]) + conn = boto3.client("dynamodb", region_name="us-east-1") + conn.create_table( + TableName="messages", + KeySchema=[{"AttributeName": "username", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "username", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) - data = {"username": "steve", "Meta": {"Name": {"First": "Steve", "Last": "Urkel"}}} - table.put_item(data=data) + item = { + "username": {"S": "steve"}, + "Meta": { + "M": {"Name": {"M": {"First": {"S": "Steve"}, "Last": {"S": "Urkel"}}}} + }, + } + conn.put_item(TableName="messages", Item=item) key_map = {"username": {"S": "steve"}} # Then remove the Meta.FullName field - conn.update_item("messages", key_map, update_expression="REMOVE Meta.Name.First") - - returned_item = table.get_item(username="steve") - dict(returned_item).should.equal( - {"username": "steve", "Meta": {"Name": {"Last": "Urkel"}}} + conn.update_item( + TableName="messages", + Key=key_map, + UpdateExpression="REMOVE Meta.#N.#F", + ExpressionAttributeNames={"#N": "Name", "#F": "First"}, ) + returned_item = conn.get_item(TableName="messages", Key=key_map) + expected_item = { + "username": {"S": "steve"}, + "Meta": {"M": {"Name": {"M": {"Last": {"S": "Urkel"}}}}}, + } + dict(returned_item["Item"]).should.equal(expected_item) + @mock_dynamodb2_deprecated def test_update_item_set(): @@ -471,7 +488,10 @@ def test_update_item_set(): key_map = {"username": {"S": "steve"}} conn.update_item( - "messages", key_map, update_expression="SET foo=bar, blah=baz REMOVE SentBy" + "messages", + key_map, + update_expression="SET foo=:bar, blah=:baz REMOVE SentBy", + expression_attribute_values={":bar": {"S": "bar"}, ":baz": {"S": "baz"}}, ) returned_item = table.get_item(username="steve") @@ -616,8 +636,9 @@ def test_boto3_update_item_conditions_fail(): table.put_item(Item={"username": "johndoe", "foo": "baz"}) table.update_item.when.called_with( Key={"username": "johndoe"}, - UpdateExpression="SET foo=bar", + UpdateExpression="SET foo=:bar", Expected={"foo": {"Value": "bar"}}, + ExpressionAttributeValues={":bar": "bar"}, ).should.throw(botocore.client.ClientError) @@ -627,8 +648,9 @@ def test_boto3_update_item_conditions_fail_because_expect_not_exists(): table.put_item(Item={"username": "johndoe", "foo": "baz"}) table.update_item.when.called_with( Key={"username": "johndoe"}, - UpdateExpression="SET foo=bar", + UpdateExpression="SET foo=:bar", Expected={"foo": {"Exists": False}}, + ExpressionAttributeValues={":bar": "bar"}, ).should.throw(botocore.client.ClientError) @@ -638,8 +660,9 @@ def test_boto3_update_item_conditions_fail_because_expect_not_exists_by_compare_ table.put_item(Item={"username": "johndoe", "foo": "baz"}) table.update_item.when.called_with( Key={"username": "johndoe"}, - UpdateExpression="SET foo=bar", + UpdateExpression="SET foo=:bar", Expected={"foo": {"ComparisonOperator": "NULL"}}, + ExpressionAttributeValues={":bar": "bar"}, ).should.throw(botocore.client.ClientError) @@ -649,8 +672,9 @@ def test_boto3_update_item_conditions_pass(): table.put_item(Item={"username": "johndoe", "foo": "bar"}) table.update_item( Key={"username": "johndoe"}, - UpdateExpression="SET foo=baz", + UpdateExpression="SET foo=:baz", Expected={"foo": {"Value": "bar"}}, + ExpressionAttributeValues={":baz": "baz"}, ) returned_item = table.get_item(Key={"username": "johndoe"}) assert dict(returned_item)["Item"]["foo"].should.equal("baz") @@ -662,8 +686,9 @@ def test_boto3_update_item_conditions_pass_because_expect_not_exists(): table.put_item(Item={"username": "johndoe", "foo": "bar"}) table.update_item( Key={"username": "johndoe"}, - UpdateExpression="SET foo=baz", + UpdateExpression="SET foo=:baz", Expected={"whatever": {"Exists": False}}, + ExpressionAttributeValues={":baz": "baz"}, ) returned_item = table.get_item(Key={"username": "johndoe"}) assert dict(returned_item)["Item"]["foo"].should.equal("baz") @@ -675,8 +700,9 @@ def test_boto3_update_item_conditions_pass_because_expect_not_exists_by_compare_ table.put_item(Item={"username": "johndoe", "foo": "bar"}) table.update_item( Key={"username": "johndoe"}, - UpdateExpression="SET foo=baz", + UpdateExpression="SET foo=:baz", Expected={"whatever": {"ComparisonOperator": "NULL"}}, + ExpressionAttributeValues={":baz": "baz"}, ) returned_item = table.get_item(Key={"username": "johndoe"}) assert dict(returned_item)["Item"]["foo"].should.equal("baz") @@ -688,8 +714,9 @@ def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_n table.put_item(Item={"username": "johndoe", "foo": "bar"}) table.update_item( Key={"username": "johndoe"}, - UpdateExpression="SET foo=baz", + UpdateExpression="SET foo=:baz", Expected={"foo": {"ComparisonOperator": "NOT_NULL"}}, + ExpressionAttributeValues={":baz": "baz"}, ) returned_item = table.get_item(Key={"username": "johndoe"}) assert dict(returned_item)["Item"]["foo"].should.equal("baz") diff --git a/tests/test_dynamodb2/test_dynamodb_validation.py b/tests/test_dynamodb2/test_dynamodb_validation.py new file mode 100644 index 000000000..c966efc14 --- /dev/null +++ b/tests/test_dynamodb2/test_dynamodb_validation.py @@ -0,0 +1,514 @@ +import pytest + +from moto.dynamodb2.exceptions import ( + AttributeIsReservedKeyword, + ExpressionAttributeValueNotDefined, + AttributeDoesNotExist, + ExpressionAttributeNameNotDefined, + IncorrectOperandType, + InvalidUpdateExpressionInvalidDocumentPath, + EmptyKeyAttributeException, +) +from moto.dynamodb2.models import Item, DynamoType +from moto.dynamodb2.parsing.ast_nodes import ( + NodeDepthLeftTypeFetcher, + UpdateExpressionSetAction, + DDBTypedValue, +) +from moto.dynamodb2.parsing.expressions import UpdateExpressionParser +from moto.dynamodb2.parsing.validators import UpdateExpressionValidator + + +def test_validation_of_empty_string_key_val(table): + with pytest.raises(EmptyKeyAttributeException): + update_expression = "set forum_name=:NewName" + update_expression_values = {":NewName": {"S": ""}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "forum_name"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"forum_name": {"S": "hello"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + table=table, + ).validate() + + +def test_validation_of_update_expression_with_keyword(table): + try: + update_expression = "SET myNum = path + :val" + update_expression_values = {":val": {"N": "3"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "path": {"N": "3"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + table=table, + ).validate() + assert False, "No exception raised" + except AttributeIsReservedKeyword as e: + assert e.keyword == "path" + + +@pytest.mark.parametrize( + "update_expression", ["SET a = #b + :val2", "SET a = :val2 + #b",] +) +def test_validation_of_a_set_statement_with_incorrect_passed_value( + update_expression, table +): + """ + By running permutations it shows that values are replaced prior to resolving attributes. + + An error occurred (ValidationException) when calling the UpdateItem operation: Invalid UpdateExpression: + An expression attribute value used in expression is not defined; attribute value: :val2 + """ + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "3"}}, + ) + try: + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names={"#b": "ok"}, + expression_attribute_values={":val": {"N": "3"}}, + item=item, + table=table, + ).validate() + except ExpressionAttributeValueNotDefined as e: + assert e.attribute_value == ":val2" + + +def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_item( + table, +): + """ + When an update expression tries to get an attribute that does not exist it must throw the appropriate exception. + + An error occurred (ValidationException) when calling the UpdateItem operation: + The provided expression refers to an attribute that does not exist in the item + """ + try: + update_expression = "SET a = nonexistent" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "path": {"N": "3"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + assert False, "No exception raised" + except AttributeDoesNotExist: + assert True + + +@pytest.mark.parametrize("update_expression", ["SET a = #c", "SET a = #c + #d",]) +def test_validation_of_update_expression_with_attribute_name_that_is_not_defined( + update_expression, table, +): + """ + When an update expression tries to get an attribute name that is not provided it must throw an exception. + + An error occurred (ValidationException) when calling the UpdateItem operation: Invalid UpdateExpression: + An expression attribute name used in the document path is not defined; attribute name: #c + """ + try: + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "path": {"N": "3"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names={"#b": "ok"}, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + assert False, "No exception raised" + except ExpressionAttributeNameNotDefined as e: + assert e.not_defined_attribute_name == "#c" + + +def test_validation_of_if_not_exists_not_existing_invalid_replace_value(table): + try: + update_expression = "SET a = if_not_exists(b, a.c)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + assert False, "No exception raised" + except AttributeDoesNotExist: + assert True + + +def get_first_node_of_type(ast, node_type): + return next(NodeDepthLeftTypeFetcher(node_type, ast)) + + +def get_set_action_value(ast): + """ + Helper that takes an AST and gets the first UpdateExpressionSetAction and retrieves the value of that action. + This should only be called on validated expressions. + Args: + ast(Node): + + Returns: + DynamoType: The DynamoType object representing the Dynamo value. + """ + set_action = get_first_node_of_type(ast, UpdateExpressionSetAction) + typed_value = set_action.children[1] + assert isinstance(typed_value, DDBTypedValue) + dynamo_value = typed_value.children[0] + assert isinstance(dynamo_value, DynamoType) + return dynamo_value + + +def test_validation_of_if_not_exists_not_existing_value(table): + update_expression = "SET a = if_not_exists(b, a)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"S": "A"}) + + +def test_validation_of_if_not_exists_with_existing_attribute_should_return_attribute( + table, +): + update_expression = "SET a = if_not_exists(b, a)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}, "b": {"S": "B"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"S": "B"}) + + +def test_validation_of_if_not_exists_with_existing_attribute_should_return_value(table): + update_expression = "SET a = if_not_exists(b, :val)" + update_expression_values = {":val": {"N": "4"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "3"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "3"}) + + +def test_validation_of_if_not_exists_with_non_existing_attribute_should_return_value( + table, +): + update_expression = "SET a = if_not_exists(b, :val)" + update_expression_values = {":val": {"N": "4"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "4"}) + + +def test_validation_of_sum_operation(table): + update_expression = "SET a = a + b" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "7"}) + + +def test_validation_homogeneous_list_append_function(table): + update_expression = "SET ri = list_append(ri, :vals)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":vals": {"L": [{"S": "i3"}, {"S": "i4"}]}}, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType( + {"L": [{"S": "i1"}, {"S": "i2"}, {"S": "i3"}, {"S": "i4"}]} + ) + + +def test_validation_hetereogenous_list_append_function(table): + update_expression = "SET ri = list_append(ri, :vals)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":vals": {"L": [{"N": "3"}]}}, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"L": [{"S": "i1"}, {"S": "i2"}, {"N": "3"}]}) + + +def test_validation_list_append_function_with_non_list_arg(table): + """ + Must error out: + Invalid UpdateExpression: Incorrect operand type for operator or function; + operator or function: list_append, operand type: S' + Returns: + + """ + try: + update_expression = "SET ri = list_append(ri, :vals)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":vals": {"S": "N"}}, + item=item, + table=table, + ).validate() + except IncorrectOperandType as e: + assert e.operand_type == "S" + assert e.operator_or_function == "list_append" + + +def test_sum_with_incompatible_types(table): + """ + Must error out: + Invalid UpdateExpression: Incorrect operand type for operator or function; operator or function: +, operand type: S' + Returns: + + """ + try: + update_expression = "SET ri = :val + :val2" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":val": {"S": "N"}, ":val2": {"N": "3"}}, + item=item, + table=table, + ).validate() + except IncorrectOperandType as e: + assert e.operand_type == "S" + assert e.operator_or_function == "+" + + +def test_validation_of_subraction_operation(table): + update_expression = "SET ri = :val - :val2" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":val": {"N": "1"}, ":val2": {"N": "3"}}, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "-2"}) + + +def test_cannot_index_into_a_string(table): + """ + Must error out: + The document path provided in the update expression is invalid for update' + """ + try: + update_expression = "set itemstr[1]=:Item" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "itemstr": {"S": "somestring"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":Item": {"S": "string_update"}}, + item=item, + table=table, + ).validate() + assert False, "Must raise exception" + except InvalidUpdateExpressionInvalidDocumentPath: + assert True + + +def test_validation_set_path_does_not_need_to_be_resolvable_when_setting_a_new_attribute( + table, +): + """If this step just passes we are happy enough""" + update_expression = "set d=a" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "a": {"N": "3"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "3"}) + + +def test_validation_set_path_does_not_need_to_be_resolvable_but_must_be_creatable_when_setting_a_new_attribute( + table, +): + try: + update_expression = "set d.e=a" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "a": {"N": "3"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + table=table, + ).validate() + assert False, "Must raise exception" + except InvalidUpdateExpressionInvalidDocumentPath: + assert True diff --git a/tests/test_dynamodbstreams/__init__.py b/tests/test_dynamodbstreams/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_dynamodbstreams/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_dynamodbstreams/test_dynamodbstreams.py b/tests/test_dynamodbstreams/test_dynamodbstreams.py index 8fad0ff23..70efc5289 100644 --- a/tests/test_dynamodbstreams/test_dynamodbstreams.py +++ b/tests/test_dynamodbstreams/test_dynamodbstreams.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals, print_function -from nose.tools import assert_raises +import pytest import boto3 from moto import mock_dynamodb2, mock_dynamodbstreams @@ -134,6 +134,7 @@ class TestCore: "id": {"S": "entry1"}, "first_col": {"S": "bar"}, "second_col": {"S": "baz"}, + "a": {"L": [{"M": {"b": {"S": "bar1"}}}]}, }, ) conn.delete_item(TableName="test-streams", Key={"id": {"S": "entry1"}}) @@ -154,7 +155,7 @@ class TestCore: assert len(resp["Records"]) == 3 assert resp["Records"][0]["eventName"] == "INSERT" assert resp["Records"][1]["eventName"] == "MODIFY" - assert resp["Records"][2]["eventName"] == "DELETE" + assert resp["Records"][2]["eventName"] == "REMOVE" sequence_number_modify = resp["Records"][1]["dynamodb"]["SequenceNumber"] @@ -174,7 +175,7 @@ class TestCore: resp = conn.get_records(ShardIterator=iterator_id) assert len(resp["Records"]) == 2 assert resp["Records"][0]["eventName"] == "MODIFY" - assert resp["Records"][1]["eventName"] == "DELETE" + assert resp["Records"][1]["eventName"] == "REMOVE" # check that if we get the shard iterator AFTER_SEQUENCE_NUMBER will get the DELETE event resp = conn.get_shard_iterator( @@ -186,7 +187,7 @@ class TestCore: iterator_id = resp["ShardIterator"] resp = conn.get_records(ShardIterator=iterator_id) assert len(resp["Records"]) == 1 - assert resp["Records"][0]["eventName"] == "DELETE" + assert resp["Records"][0]["eventName"] == "REMOVE" class TestEdges: @@ -223,7 +224,7 @@ class TestEdges: assert "LatestStreamLabel" in resp["TableDescription"] # now try to enable it again - with assert_raises(conn.exceptions.ResourceInUseException): + with pytest.raises(conn.exceptions.ResourceInUseException): resp = conn.update_table( TableName="test-streams", StreamSpecification={ diff --git a/tests/test_ec2/test_account_attributes.py b/tests/test_ec2/test_account_attributes.py index a3135f22e..41c71def5 100644 --- a/tests/test_ec2/test_account_attributes.py +++ b/tests/test_ec2/test_account_attributes.py @@ -1,37 +1,37 @@ -from __future__ import unicode_literals -import boto3 -from moto import mock_ec2 -import sure # noqa - - -@mock_ec2 -def test_describe_account_attributes(): - conn = boto3.client("ec2", region_name="us-east-1") - response = conn.describe_account_attributes() - expected_attribute_values = [ - { - "AttributeValues": [{"AttributeValue": "5"}], - "AttributeName": "vpc-max-security-groups-per-interface", - }, - { - "AttributeValues": [{"AttributeValue": "20"}], - "AttributeName": "max-instances", - }, - { - "AttributeValues": [{"AttributeValue": "EC2"}, {"AttributeValue": "VPC"}], - "AttributeName": "supported-platforms", - }, - { - "AttributeValues": [{"AttributeValue": "none"}], - "AttributeName": "default-vpc", - }, - { - "AttributeValues": [{"AttributeValue": "5"}], - "AttributeName": "max-elastic-ips", - }, - { - "AttributeValues": [{"AttributeValue": "5"}], - "AttributeName": "vpc-max-elastic-ips", - }, - ] - response["AccountAttributes"].should.equal(expected_attribute_values) +from __future__ import unicode_literals +import boto3 +from moto import mock_ec2 +import sure # noqa + + +@mock_ec2 +def test_describe_account_attributes(): + conn = boto3.client("ec2", region_name="us-east-1") + response = conn.describe_account_attributes() + expected_attribute_values = [ + { + "AttributeValues": [{"AttributeValue": "5"}], + "AttributeName": "vpc-max-security-groups-per-interface", + }, + { + "AttributeValues": [{"AttributeValue": "20"}], + "AttributeName": "max-instances", + }, + { + "AttributeValues": [{"AttributeValue": "EC2"}, {"AttributeValue": "VPC"}], + "AttributeName": "supported-platforms", + }, + { + "AttributeValues": [{"AttributeValue": "none"}], + "AttributeName": "default-vpc", + }, + { + "AttributeValues": [{"AttributeValue": "5"}], + "AttributeName": "max-elastic-ips", + }, + { + "AttributeValues": [{"AttributeValue": "5"}], + "AttributeName": "vpc-max-elastic-ips", + }, + ] + response["AccountAttributes"].should.equal(expected_attribute_values) diff --git a/tests/test_ec2/test_amazon_dev_pay.py b/tests/test_ec2/test_amazon_dev_pay.py index 38e1eb751..1dd9cc74e 100644 --- a/tests/test_ec2/test_amazon_dev_pay.py +++ b/tests/test_ec2/test_amazon_dev_pay.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_amazon_dev_pay(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_amazon_dev_pay(): + pass diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index f65352c7c..db1e263b3 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -6,12 +6,13 @@ import boto3 from boto.exception import EC2ResponseError from botocore.exceptions import ClientError -# Ensure 'assert_raises' context manager support for Python 2.6 -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 from moto.ec2.models import AMIS, OWNER_ID +from moto.core import ACCOUNT_ID from tests.helpers import requires_boto_gte @@ -26,13 +27,13 @@ def test_ami_create_and_delete(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: image_id = conn.create_image( instance.id, "test-ami", "this is a test ami", dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set" ) @@ -75,22 +76,22 @@ def test_ami_create_and_delete(): root_mapping.should_not.be.none # Deregister - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: success = conn.deregister_image(image_id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set" ) success = conn.deregister_image(image_id) success.should.be.true - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.deregister_image(image_id) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @requires_boto_gte("2.14.0") @@ -111,7 +112,7 @@ def test_ami_copy(): # Boto returns a 'CopyImage' object with an image_id attribute here. Use # the image_id to fetch the full info. - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: copy_image_ref = conn.copy_image( source_image.region.name, source_image.id, @@ -119,9 +120,9 @@ def test_ami_copy(): "this is a test copy ami", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set" ) @@ -151,28 +152,28 @@ def test_ami_copy(): ) # Copy from non-existent source ID. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.copy_image( source_image.region.name, "ami-abcd1234", "test-copy-ami", "this is a test copy ami", ) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Copy from non-existent source region. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: invalid_region = ( "us-east-1" if (source_image.region.name != "us-east-1") else "us-west-1" ) conn.copy_image( invalid_region, source_image.id, "test-copy-ami", "this is a test copy ami" ) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -207,11 +208,11 @@ def test_ami_tagging(): conn.create_image(instance.id, "test-ami", "this is a test ami") image = conn.get_all_images()[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: image.add_tag("a key", "some value", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -232,11 +233,11 @@ def test_ami_create_from_missing_instance(): conn = boto.connect_ec2("the_key", "the_secret") args = ["i-abcdefg", "test-ami", "this is a test ami"] - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_image(*args) - cm.exception.code.should.equal("InvalidInstanceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInstanceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -251,6 +252,19 @@ def test_ami_pulls_attributes_from_instance(): image.kernel_id.should.equal("test-kernel") +@mock_ec2_deprecated +def test_ami_uses_account_id_if_valid_access_key_is_supplied(): + access_key = "AKIAXXXXXXXXXXXXXXXX" + conn = boto.connect_ec2(access_key, "the_secret") + reservation = conn.run_instances("ami-1234abcd") + instance = reservation.instances[0] + instance.modify_attribute("kernel", "test-kernel") + + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + images = conn.get_all_images(owners=["self"]) + [(ami.id, ami.owner_id) for ami in images].should.equal([(image_id, ACCOUNT_ID)]) + + @mock_ec2_deprecated def test_ami_filters(): conn = boto.connect_ec2("the_key", "the_secret") @@ -339,22 +353,22 @@ def test_ami_filtering_via_tag(): def test_getting_missing_ami(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_image("ami-missing") - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated def test_getting_malformed_ami(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_image("foo-missing") - cm.exception.code.should.equal("InvalidAMIID.Malformed") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.Malformed") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -385,11 +399,11 @@ def test_ami_attribute_group_permissions(): } # Add 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_image_attribute(**dict(ADD_GROUP_ARGS, **{"dry_run": True})) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set" ) @@ -602,9 +616,9 @@ def test_ami_describe_executable_users_and_filter(): @mock_ec2_deprecated def test_ami_attribute_user_and_group_permissions(): """ - Boto supports adding/removing both users and groups at the same time. - Just spot-check this -- input variations, idempotency, etc are validated - via user-specific and group-specific tests above. + Boto supports adding/removing both users and groups at the same time. + Just spot-check this -- input variations, idempotency, etc are validated + via user-specific and group-specific tests above. """ conn = boto.connect_ec2("the_key", "the_secret") reservation = conn.run_instances("ami-1234abcd") @@ -664,86 +678,86 @@ def test_ami_attribute_error_cases(): image = conn.get_image(image_id) # Error: Add with group != 'all' - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", operation="add", groups="everyone" ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with user ID that isn't an integer. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", operation="add", user_ids="12345678901A", ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with user ID that is > length 12. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", operation="add", user_ids="1234567890123", ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with user ID that is < length 12. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", operation="add", user_ids="12345678901", ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with one invalid user ID among other valid IDs, ensure no # partial changes. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", operation="add", user_ids=["123456789011", "foo", "123456789022"], ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none attributes = conn.get_image_attribute(image.id, attribute="launchPermission") attributes.attrs.should.have.length_of(0) # Error: Add with invalid image ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( "ami-abcd1234", attribute="launchPermission", operation="add", groups="all" ) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Remove with invalid image ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( "ami-abcd1234", attribute="launchPermission", operation="remove", groups="all", ) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -751,14 +765,34 @@ def test_ami_describe_non_existent(): ec2 = boto3.resource("ec2", region_name="us-west-1") # Valid pattern but non-existent id img = ec2.Image("ami-abcd1234") - with assert_raises(ClientError): + with pytest.raises(ClientError): img.load() # Invalid ami pattern img = ec2.Image("not_an_ami_id") - with assert_raises(ClientError): + with pytest.raises(ClientError): img.load() +@mock_ec2 +def test_ami_registration(): + ec2 = boto3.client("ec2", region_name="us-east-1") + image_id = ec2.register_image(Name="test-register-image").get("ImageId", "") + images = ec2.describe_images(ImageIds=[image_id]).get("Images", []) + assert images[0]["Name"] == "test-register-image", "No image was registered." + assert images[0]["RootDeviceName"] == "/dev/sda1", "Wrong root device name." + assert images[0]["State"] == "available", "State should be available." + + +@mock_ec2 +def test_ami_registration(): + ec2 = boto3.client("ec2", region_name="us-east-1") + image_id = ec2.register_image(Name="test-register-image").get("ImageId", "") + images = ec2.describe_images(ImageIds=[image_id]).get("Images", []) + assert images[0]["Name"] == "test-register-image", "No image was registered." + assert images[0]["RootDeviceName"] == "/dev/sda1", "Wrong root device name." + assert images[0]["State"] == "available", "State should be available." + + @mock_ec2 def test_ami_filter_wildcard(): ec2_resource = boto3.resource("ec2", region_name="us-west-1") @@ -773,7 +807,7 @@ def test_ami_filter_wildcard(): instance.create_image(Name="not-matching-image") my_images = ec2_client.describe_images( - Owners=["111122223333"], Filters=[{"Name": "name", "Values": ["test*"]}] + Owners=[ACCOUNT_ID], Filters=[{"Name": "name", "Values": ["test*"]}] )["Images"] my_images.should.have.length_of(1) @@ -829,7 +863,11 @@ def test_ami_snapshots_have_correct_owner(): ] existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, []) owner_id_to_snapshot_ids[owner_id] = existing_snapshot_ids + snapshot_ids - + # adding an assertion to volumeType + assert ( + image.get("BlockDeviceMappings", {})[0].get("Ebs", {}).get("VolumeType") + == "standard" + ) for owner_id in owner_id_to_snapshot_ids: snapshots_rseponse = ec2_client.describe_snapshots( SnapshotIds=owner_id_to_snapshot_ids[owner_id] diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index d5355f3b1..830d4c2bf 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -40,6 +40,15 @@ def test_boto3_describe_regions(): resp = ec2.describe_regions(RegionNames=[test_region]) resp["Regions"].should.have.length_of(1) resp["Regions"][0].should.have.key("RegionName").which.should.equal(test_region) + resp["Regions"][0].should.have.key("OptInStatus").which.should.equal( + "opt-in-not-required" + ) + + test_region = "ap-east-1" + resp = ec2.describe_regions(RegionNames=[test_region]) + resp["Regions"].should.have.length_of(1) + resp["Regions"][0].should.have.key("RegionName").which.should.equal(test_region) + resp["Regions"][0].should.have.key("OptInStatus").which.should.equal("not-opted-in") @mock_ec2 @@ -52,3 +61,15 @@ def test_boto3_availability_zones(): resp = conn.describe_availability_zones() for rec in resp["AvailabilityZones"]: rec["ZoneName"].should.contain(region) + + +@mock_ec2 +def test_boto3_zoneId_in_availability_zones(): + conn = boto3.client("ec2", "us-east-1") + resp = conn.describe_availability_zones() + for rec in resp["AvailabilityZones"]: + rec.get("ZoneId").should.contain("use1") + conn = boto3.client("ec2", "us-west-1") + resp = conn.describe_availability_zones() + for rec in resp["AvailabilityZones"]: + rec.get("ZoneId").should.contain("usw1") diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py index a676a2b5d..8d94a9a94 100644 --- a/tests/test_ec2/test_customer_gateways.py +++ b/tests/test_ec2/test_customer_gateways.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals import boto import sure # noqa -from nose.tools import assert_raises -from nose.tools import assert_false +import pytest from boto.exception import EC2ResponseError from moto import mock_ec2_deprecated @@ -45,5 +44,5 @@ def test_delete_customer_gateways(): @mock_ec2_deprecated def test_delete_customer_gateways_bad_id(): conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_customer_gateway("cgw-0123abcd") diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index 4aaceaa07..85bc7f244 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import boto3 import boto @@ -33,11 +32,11 @@ def test_dhcp_options_associate_invalid_dhcp_id(): conn = boto.connect_vpc("the_key", "the_secret") vpc = conn.create_vpc("10.0.0.0/16") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_dhcp_options("foo", vpc.id) - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -46,11 +45,11 @@ def test_dhcp_options_associate_invalid_vpc_id(): conn = boto.connect_vpc("the_key", "the_secret") dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_dhcp_options(dhcp_options.id, "foo") - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -64,19 +63,19 @@ def test_dhcp_options_delete_with_vpc(): rval = conn.associate_dhcp_options(dhcp_options_id, vpc.id) rval.should.be.equal(True) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options(dhcp_options_id) - cm.exception.code.should.equal("DependencyViolation") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("DependencyViolation") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none vpc.delete() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options([dhcp_options_id]) - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -100,17 +99,17 @@ def test_create_dhcp_options_invalid_options(): conn = boto.connect_vpc("the_key", "the_secret") servers = ["f", "f", "f", "f", "f"] - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_dhcp_options(ntp_servers=servers) - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_dhcp_options(netbios_node_type="0") - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -131,11 +130,11 @@ def test_describe_dhcp_options_invalid_id(): """get error on invalid dhcp_option_id lookup""" conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options(["1"]) - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -149,11 +148,11 @@ def test_delete_dhcp_options(): conn.delete_dhcp_options(dhcp_option.id) # .should.be.equal(True) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options([dhcp_option.id]) - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -162,11 +161,11 @@ def test_delete_dhcp_options_invalid_id(): conn.create_dhcp_options() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options("dopt-abcd1234") - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -175,11 +174,11 @@ def test_delete_dhcp_options_malformed_id(): conn.create_dhcp_options() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options("foo-abcd1234") - cm.exception.code.should.equal("InvalidDhcpOptionsId.Malformed") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionsId.Malformed") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated diff --git a/tests/test_ec2/test_ec2_cloudformation.py b/tests/test_ec2/test_ec2_cloudformation.py new file mode 100644 index 000000000..6fa27140b --- /dev/null +++ b/tests/test_ec2/test_ec2_cloudformation.py @@ -0,0 +1,103 @@ +from moto import mock_cloudformation_deprecated, mock_ec2_deprecated +from moto import mock_cloudformation, mock_ec2 +from tests.test_cloudformation.fixtures import vpc_eni +import boto +import boto.ec2 +import boto.cloudformation +import boto.vpc +import boto3 +import json +import sure # noqa + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_elastic_network_interfaces_cloudformation(): + template = vpc_eni.template + template_json = json.dumps(template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack("test_stack", template_body=template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eni = ec2_conn.get_all_network_interfaces()[0] + eni.private_ip_addresses.should.have.length_of(1) + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + cfn_eni = [ + resource + for resource in resources + if resource.resource_type == "AWS::EC2::NetworkInterface" + ][0] + cfn_eni.physical_resource_id.should.equal(eni.id) + + outputs = {output.key: output.value for output in stack.outputs} + outputs["ENIIpAddress"].should.equal(eni.private_ip_addresses[0].private_ip_address) + + +@mock_ec2 +@mock_cloudformation +def test_volume_size_through_cloudformation(): + ec2 = boto3.client("ec2", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + volume_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-d3adb33f", + "KeyName": "dummy", + "InstanceType": "t2.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": "50"}} + ], + "Tags": [ + {"Key": "foo", "Value": "bar"}, + {"Key": "blah", "Value": "baz"}, + ], + }, + } + }, + } + template_json = json.dumps(volume_template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + instances = ec2.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2.describe_volumes(VolumeIds=[volume["VolumeId"]]) + volumes["Volumes"][0]["Size"].should.equal(50) + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_subnet_tags_through_cloudformation(): + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + subnet_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": vpc.id, + "CidrBlock": "10.0.0.0/24", + "AvailabilityZone": "us-west-1b", + "Tags": [ + {"Key": "foo", "Value": "bar"}, + {"Key": "blah", "Value": "baz"}, + ], + }, + } + }, + } + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + template_json = json.dumps(subnet_template) + cf_conn.create_stack("test_stack", template_body=template_json) + + subnet = vpc_conn.get_all_subnets(filters={"cidrBlock": "10.0.0.0/24"})[0] + subnet.tags["foo"].should.equal("bar") + subnet.tags["blah"].should.equal("baz") diff --git a/tests/test_ec2/test_ec2_core.py b/tests/test_ec2/test_ec2_core.py index baffc4882..78b780d97 100644 --- a/tests/test_ec2/test_ec2_core.py +++ b/tests/test_ec2/test_ec2_core.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 3c7e17ec8..d0b1dee2d 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -1,19 +1,17 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -from moto.ec2 import ec2_backends import boto import boto3 -from botocore.exceptions import ClientError -from boto.exception import EC2ResponseError -from freezegun import freeze_time -import sure # noqa -from moto import mock_ec2_deprecated, mock_ec2 +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest +import sure # noqa +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +from moto import mock_ec2, mock_ec2_deprecated +from moto.ec2 import ec2_backends from moto.ec2.models import OWNER_ID +from moto.kms import mock_kms @mock_ec2_deprecated @@ -31,11 +29,11 @@ def test_create_and_delete_volume(): volume = current_volume[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: volume.delete(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -46,21 +44,60 @@ def test_create_and_delete_volume(): my_volume.should.have.length_of(0) # Deleting something that was already deleted should throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: volume.delete() - cm.exception.code.should.equal("InvalidVolume.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVolume.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_delete_attached_volume(): + conn = boto.ec2.connect_to_region("us-east-1") + reservation = conn.run_instances("ami-1234abcd") + # create an instance + instance = reservation.instances[0] + # create a volume + volume = conn.create_volume(80, "us-east-1a") + # attach volume to instance + volume.attach(instance.id, "/dev/sdh") + + volume.update() + volume.volume_state().should.equal("in-use") + volume.attachment_state().should.equal("attached") + + volume.attach_data.instance_id.should.equal(instance.id) + + # attempt to delete volume + # assert raises VolumeInUseError + with pytest.raises(EC2ResponseError) as ex: + volume.delete() + ex.value.error_code.should.equal("VolumeInUse") + ex.value.status.should.equal(400) + ex.value.message.should.equal( + "Volume {0} is currently attached to {1}".format(volume.id, instance.id) + ) + + volume.detach() + + volume.update() + volume.volume_state().should.equal("available") + + volume.delete() + + all_volumes = conn.get_all_volumes() + my_volume = [item for item in all_volumes if item.id == volume.id] + my_volume.should.have.length_of(0) @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): conn = boto.ec2.connect_to_region("us-east-1") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -70,11 +107,11 @@ def test_create_encrypted_volume(): conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -95,11 +132,11 @@ def test_filter_volume_by_id(): vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id]) vol2.should.have.length_of(2) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_volumes(volume_ids=["vol-does_not_exist"]) - cm.exception.code.should.equal("InvalidVolume.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVolume.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -220,11 +257,11 @@ def test_volume_attach_and_detach(): volume.update() volume.volume_state().should.equal("available") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: volume.attach(instance.id, "/dev/sdh", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -236,11 +273,11 @@ def test_volume_attach_and_detach(): volume.attach_data.instance_id.should.equal(instance.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: volume.detach(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -249,23 +286,23 @@ def test_volume_attach_and_detach(): volume.update() volume.volume_state().should.equal("available") - with assert_raises(EC2ResponseError) as cm1: + with pytest.raises(EC2ResponseError) as cm1: volume.attach("i-1234abcd", "/dev/sdh") - cm1.exception.code.should.equal("InvalidInstanceID.NotFound") - cm1.exception.status.should.equal(400) - cm1.exception.request_id.should_not.be.none + cm1.value.code.should.equal("InvalidInstanceID.NotFound") + cm1.value.status.should.equal(400) + cm1.value.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as cm2: + with pytest.raises(EC2ResponseError) as cm2: conn.detach_volume(volume.id, instance.id, "/dev/sdh") - cm2.exception.code.should.equal("InvalidAttachment.NotFound") - cm2.exception.status.should.equal(400) - cm2.exception.request_id.should_not.be.none + cm2.value.code.should.equal("InvalidAttachment.NotFound") + cm2.value.status.should.equal(400) + cm2.value.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as cm3: + with pytest.raises(EC2ResponseError) as cm3: conn.detach_volume(volume.id, "i-1234abcd", "/dev/sdh") - cm3.exception.code.should.equal("InvalidInstanceID.NotFound") - cm3.exception.status.should.equal(400) - cm3.exception.request_id.should_not.be.none + cm3.value.code.should.equal("InvalidInstanceID.NotFound") + cm3.value.status.should.equal(400) + cm3.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -273,11 +310,11 @@ def test_create_snapshot(): conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: snapshot = volume.create_snapshot("a dryrun snapshot", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set" ) @@ -301,11 +338,11 @@ def test_create_snapshot(): conn.get_all_snapshots().should.have.length_of(num_snapshots) # Deleting something that was already deleted should throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: snapshot.delete() - cm.exception.code.should.equal("InvalidSnapshot.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSnapshot.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -343,11 +380,11 @@ def test_filter_snapshot_by_id(): s.volume_id.should.be.within([volume2.id, volume3.id]) s.region.name.should.equal(conn.region.name) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_snapshots(snapshot_ids=["snap-does_not_exist"]) - cm.exception.code.should.equal("InvalidSnapshot.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSnapshot.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -445,11 +482,11 @@ def test_snapshot_attribute(): # Add 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{"dry_run": True})) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set" ) @@ -467,11 +504,11 @@ def test_snapshot_attribute(): ) # Remove 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_snapshot_attribute(**dict(REMOVE_GROUP_ARGS, **{"dry_run": True})) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set" ) @@ -488,54 +525,211 @@ def test_snapshot_attribute(): ).should_not.throw(EC2ResponseError) # Error: Add with group != 'all' - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_snapshot_attribute( snapshot.id, attribute="createVolumePermission", operation="add", groups="everyone", ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with invalid snapshot ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_snapshot_attribute( "snapshot-abcd1234", attribute="createVolumePermission", operation="add", groups="all", ) - cm.exception.code.should.equal("InvalidSnapshot.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSnapshot.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Remove with invalid snapshot ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_snapshot_attribute( "snapshot-abcd1234", attribute="createVolumePermission", operation="remove", groups="all", ) - cm.exception.code.should.equal("InvalidSnapshot.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSnapshot.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none - # Error: Add or remove with user ID instead of group - conn.modify_snapshot_attribute.when.called_with( - snapshot.id, - attribute="createVolumePermission", - operation="add", - user_ids=["user"], - ).should.throw(NotImplementedError) - conn.modify_snapshot_attribute.when.called_with( - snapshot.id, - attribute="createVolumePermission", - operation="remove", - user_ids=["user"], - ).should.throw(NotImplementedError) + +@mock_ec2 +def test_modify_snapshot_attribute(): + import copy + + ec2_client = boto3.client("ec2", region_name="us-east-1") + response = ec2_client.create_volume(Size=80, AvailabilityZone="us-east-1a") + volume = boto3.resource("ec2", region_name="us-east-1").Volume(response["VolumeId"]) + snapshot = volume.create_snapshot() + + # Baseline + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert not attributes[ + "CreateVolumePermissions" + ], "Snapshot should have no permissions." + + ADD_GROUP_ARGS = { + "SnapshotId": snapshot.id, + "Attribute": "createVolumePermission", + "OperationType": "add", + "GroupNames": ["all"], + } + + REMOVE_GROUP_ARGS = { + "SnapshotId": snapshot.id, + "Attribute": "createVolumePermission", + "OperationType": "remove", + "GroupNames": ["all"], + } + + # Add 'all' group and confirm + with pytest.raises(ClientError) as cm: + ec2_client.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{"DryRun": True})) + + cm.value.response["Error"]["Code"].should.equal("DryRunOperation") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + ec2_client.modify_snapshot_attribute(**ADD_GROUP_ARGS) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert attributes["CreateVolumePermissions"] == [ + {"Group": "all"} + ], "This snapshot should have public group permissions." + + # Add is idempotent + ec2_client.modify_snapshot_attribute.when.called_with( + **ADD_GROUP_ARGS + ).should_not.throw(ClientError) + assert attributes["CreateVolumePermissions"] == [ + {"Group": "all"} + ], "This snapshot should have public group permissions." + + # Remove 'all' group and confirm + with pytest.raises(ClientError) as ex: + ec2_client.modify_snapshot_attribute( + **dict(REMOVE_GROUP_ARGS, **{"DryRun": True}) + ) + cm.value.response["Error"]["Code"].should.equal("DryRunOperation") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + ec2_client.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert not attributes[ + "CreateVolumePermissions" + ], "This snapshot should have no permissions." + + # Remove is idempotent + ec2_client.modify_snapshot_attribute.when.called_with( + **REMOVE_GROUP_ARGS + ).should_not.throw(ClientError) + assert not attributes[ + "CreateVolumePermissions" + ], "This snapshot should have no permissions." + + # Error: Add with group != 'all' + with pytest.raises(ClientError) as cm: + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="add", + GroupNames=["everyone"], + ) + cm.value.response["Error"]["Code"].should.equal("InvalidAMIAttributeItemValue") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + # Error: Add with invalid snapshot ID + with pytest.raises(ClientError) as cm: + ec2_client.modify_snapshot_attribute( + SnapshotId="snapshot-abcd1234", + Attribute="createVolumePermission", + OperationType="add", + GroupNames=["all"], + ) + cm.value.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + # Error: Remove with invalid snapshot ID + with pytest.raises(ClientError) as cm: + ec2_client.modify_snapshot_attribute( + SnapshotId="snapshot-abcd1234", + Attribute="createVolumePermission", + OperationType="remove", + GroupNames=["all"], + ) + cm.value.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + # Test adding user id + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="add", + UserIds=["1234567891"], + ) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert len(attributes["CreateVolumePermissions"]) == 1 + + # Test adding user id again along with additional. + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="add", + UserIds=["1234567891", "2345678912"], + ) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert len(attributes["CreateVolumePermissions"]) == 2 + + # Test removing both user IDs. + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="remove", + UserIds=["1234567891", "2345678912"], + ) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert len(attributes["CreateVolumePermissions"]) == 0 + + # Idempotency when removing users. + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="remove", + UserIds=["1234567891"], + ) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert len(attributes["CreateVolumePermissions"]) == 0 @mock_ec2_deprecated @@ -544,11 +738,11 @@ def test_create_volume_from_snapshot(): volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot("a test snapshot") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: snapshot = volume.create_snapshot("a test snapshot", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set" ) @@ -590,13 +784,13 @@ def test_modify_attribute_blockDeviceMapping(): instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute( "blockDeviceMapping", {"/dev/sda1": True}, dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set" ) @@ -613,11 +807,11 @@ def test_volume_tag_escaping(): vol = conn.create_volume(10, "us-east-1a") snapshot = conn.create_snapshot(vol.id, "Desc") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: snapshot.add_tags({"key": ""}, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] @@ -638,22 +832,26 @@ def test_volume_property_hidden_when_no_tags_exist(): volume_response.get("Tags").should.equal(None) -@freeze_time @mock_ec2 def test_copy_snapshot(): ec2_client = boto3.client("ec2", region_name="eu-west-1") dest_ec2_client = boto3.client("ec2", region_name="eu-west-2") volume_response = ec2_client.create_volume(AvailabilityZone="eu-west-1a", Size=10) + tag_spec = [ + {"ResourceType": "snapshot", "Tags": [{"Key": "key", "Value": "value"}]} + ] create_snapshot_response = ec2_client.create_snapshot( - VolumeId=volume_response["VolumeId"] + VolumeId=volume_response["VolumeId"], TagSpecifications=tag_spec ) copy_snapshot_response = dest_ec2_client.copy_snapshot( SourceSnapshotId=create_snapshot_response["SnapshotId"], SourceRegion="eu-west-1", + TagSpecifications=tag_spec, ) + copy_snapshot_response["Tags"].should.equal(tag_spec[0]["Tags"]) ec2 = boto3.resource("ec2", region_name="eu-west-1") dest_ec2 = boto3.resource("ec2", region_name="eu-west-2") @@ -679,25 +877,25 @@ def test_copy_snapshot(): getattr(source, attrib).should.equal(getattr(dest, attrib)) # Copy from non-existent source ID. - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: create_snapshot_error = ec2_client.create_snapshot(VolumeId="vol-abcd1234") - cm.exception.response["Error"]["Code"].should.equal("InvalidVolume.NotFound") - cm.exception.response["Error"]["Message"].should.equal( - "The volume 'vol-abcd1234' does not exist." - ) - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["Error"]["Code"].should.equal("InvalidVolume.NotFound") + cm.value.response["Error"]["Message"].should.equal( + "The volume 'vol-abcd1234' does not exist." + ) + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Copy from non-existent source region. - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: copy_snapshot_response = dest_ec2_client.copy_snapshot( SourceSnapshotId=create_snapshot_response["SnapshotId"], SourceRegion="eu-west-2", ) - cm.exception.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") - cm.exception.response["Error"]["Message"].should.be.none - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.value.response["Error"]["Message"].should.be.none + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_ec2 @@ -716,3 +914,65 @@ def test_search_for_many_snapshots(): snapshots_response = ec2_client.describe_snapshots(SnapshotIds=snapshot_ids) assert len(snapshots_response["Snapshots"]) == len(snapshot_ids) + + +@mock_ec2 +def test_create_unencrypted_volume_with_kms_key_fails(): + resource = boto3.resource("ec2", region_name="us-east-1") + with pytest.raises(ClientError) as ex: + resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=False, KmsKeyId="key", Size=10 + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameterDependency") + ex.value.response["Error"]["Message"].should.contain("KmsKeyId") + + +@mock_kms +@mock_ec2 +def test_create_encrypted_volume_without_kms_key_should_use_default_key(): + kms = boto3.client("kms", region_name="us-east-1") + # Default master key for EBS does not exist until needed. + with pytest.raises(ClientError) as ex: + kms.describe_key(KeyId="alias/aws/ebs") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + # Creating an encrypted volume should create (and use) the default key. + resource = boto3.resource("ec2", region_name="us-east-1") + volume = resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=True, Size=10 + ) + default_ebs_key_arn = kms.describe_key(KeyId="alias/aws/ebs")["KeyMetadata"]["Arn"] + volume.kms_key_id.should.equal(default_ebs_key_arn) + volume.encrypted.should.be.true + # Subsequent encrypted volumes should use the now-created default key. + volume = resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=True, Size=10 + ) + volume.kms_key_id.should.equal(default_ebs_key_arn) + volume.encrypted.should.be.true + + +@mock_ec2 +def test_create_volume_with_kms_key(): + resource = boto3.resource("ec2", region_name="us-east-1") + volume = resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=True, KmsKeyId="key", Size=10 + ) + volume.kms_key_id.should.equal("key") + volume.encrypted.should.be.true + + +@mock_ec2 +def test_kms_key_id_property_hidden_when_volume_not_encrypted(): + client = boto3.client("ec2", region_name="us-east-1") + resp = client.create_volume(AvailabilityZone="us-east-1a", Encrypted=False, Size=10) + resp["Encrypted"].should.be.false + resp.should_not.have.key("KmsKeyId") + resp = client.describe_volumes(VolumeIds=[resp["VolumeId"]]) + resp["Volumes"][0]["Encrypted"].should.be.false + resp["Volumes"][0].should_not.have.key("KmsKeyId") + resource = boto3.resource("ec2", region_name="us-east-1") + volume = resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=False, Size=10 + ) + volume.encrypted.should.be.false + volume.kms_key_id.should.be.none diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index 886cdff56..8edd92e65 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import boto import boto3 @@ -21,11 +20,11 @@ def test_eip_allocate_classic(): """Allocate/release Classic EIP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: standard = conn.allocate_address(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -35,11 +34,11 @@ def test_eip_allocate_classic(): standard.instance_id.should.be.none standard.domain.should.be.equal("standard") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: standard.release(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -52,11 +51,11 @@ def test_eip_allocate_vpc(): """Allocate/release VPC EIP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: vpc = conn.allocate_address(domain="vpc", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -84,11 +83,11 @@ def test_eip_allocate_invalid_domain(): """Allocate EIP invalid domain""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.allocate_address(domain="bogus") - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -102,19 +101,19 @@ def test_eip_associate_classic(): eip = conn.allocate_address() eip.instance_id.should.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(public_ip=eip.public_ip) - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.associate_address( instance_id=instance.id, public_ip=eip.public_ip, dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -123,11 +122,11 @@ def test_eip_associate_classic(): eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(instance.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -153,11 +152,11 @@ def test_eip_associate_vpc(): eip = conn.allocate_address(domain="vpc") eip.instance_id.should.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(allocation_id=eip.allocation_id) - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.associate_address(instance_id=instance.id, allocation_id=eip.allocation_id) # no .update() on address ): @@ -169,11 +168,11 @@ def test_eip_associate_vpc(): eip.instance_id.should.be.equal("") eip.association_id.should.be.none - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: eip.release(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -241,11 +240,11 @@ def test_eip_associate_network_interface(): eip = conn.allocate_address(domain="vpc") eip.network_interface_id.should.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(network_interface_id=eni.id) - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.associate_address(network_interface_id=eni.id, allocation_id=eip.allocation_id) # no .update() on address ): @@ -276,13 +275,13 @@ def test_eip_reassociate(): conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) # Different ID detects resource association - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address( instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False ) - cm.exception.code.should.equal("Resource.AlreadyAssociated") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Resource.AlreadyAssociated") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.associate_address.when.called_with( instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True @@ -312,11 +311,11 @@ def test_eip_reassociate_nic(): conn.associate_address(network_interface_id=eni1.id, public_ip=eip.public_ip) # Different ID detects resource association - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(network_interface_id=eni2.id, public_ip=eip.public_ip) - cm.exception.code.should.equal("Resource.AlreadyAssociated") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Resource.AlreadyAssociated") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.associate_address.when.called_with( network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True @@ -336,11 +335,11 @@ def test_eip_associate_invalid_args(): eip = conn.allocate_address() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(instance_id=instance.id) - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none instance.terminate() @@ -350,11 +349,11 @@ def test_eip_disassociate_bogus_association(): """Disassociate bogus EIP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.disassociate_address(association_id="bogus") - cm.exception.code.should.equal("InvalidAssociationID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAssociationID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -362,11 +361,11 @@ def test_eip_release_bogus_eip(): """Release bogus EIP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.release_address(allocation_id="bogus") - cm.exception.code.should.equal("InvalidAllocationID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAllocationID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -374,11 +373,11 @@ def test_eip_disassociate_arg_error(): """Invalid arguments disassociate address""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.disassociate_address() - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -386,11 +385,11 @@ def test_eip_release_arg_error(): """Invalid arguments release address""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.release_address() - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -438,11 +437,11 @@ def test_eip_describe_none(): """Error when search for bogus IP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_addresses(addresses=["256.256.256.256"]) - cm.exception.code.should.equal("InvalidAddress.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAddress.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -537,3 +536,48 @@ def test_eip_filters(): service.vpc_addresses.filter(Filters=[{"Name": "domain", "Values": ["vpc"]}]) ) len(addresses).should.equal(3) + + +@mock_ec2 +def test_eip_tags(): + service = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + # Allocate one address without tags + client.allocate_address(Domain="vpc") + # Allocate one address and add tags + alloc_tags = client.allocate_address(Domain="vpc") + with_tags = client.create_tags( + Resources=[alloc_tags["AllocationId"]], + Tags=[{"Key": "ManagedBy", "Value": "MyCode"}], + ) + addresses_with_tags = client.describe_addresses( + Filters=[ + {"Name": "domain", "Values": ["vpc"]}, + {"Name": "tag:ManagedBy", "Values": ["MyCode"]}, + ] + ) + len(addresses_with_tags["Addresses"]).should.equal(1) + addresses_with_tags = list( + service.vpc_addresses.filter( + Filters=[ + {"Name": "domain", "Values": ["vpc"]}, + {"Name": "tag:ManagedBy", "Values": ["MyCode"]}, + ] + ) + ) + len(addresses_with_tags).should.equal(1) + addresses_with_tags = list( + service.vpc_addresses.filter( + Filters=[ + {"Name": "domain", "Values": ["vpc"]}, + {"Name": "tag:ManagedBy", "Values": ["SomethingOther"]}, + ] + ) + ) + len(addresses_with_tags).should.equal(0) + addresses = list( + service.vpc_addresses.filter(Filters=[{"Name": "domain", "Values": ["vpc"]}]) + ) + # Expected total is 2, one with and one without tags + len(addresses).should.equal(2) diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 4e502586e..a5bb019b0 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -1,21 +1,17 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import boto3 from botocore.exceptions import ClientError import boto -import boto.cloudformation import boto.ec2 from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated +from moto import mock_ec2, mock_ec2_deprecated from tests.helpers import requires_boto_gte -from tests.test_cloudformation.fixtures import vpc_eni -import json @mock_ec2_deprecated @@ -24,11 +20,11 @@ def test_elastic_network_interfaces(): vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: eni = conn.create_network_interface(subnet.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -41,11 +37,11 @@ def test_elastic_network_interfaces(): eni.private_ip_addresses.should.have.length_of(1) eni.private_ip_addresses[0].private_ip_address.startswith("10.").should.be.true - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.delete_network_interface(eni.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -54,22 +50,22 @@ def test_elastic_network_interfaces(): all_enis = conn.get_all_network_interfaces() all_enis.should.have.length_of(0) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_network_interface(eni.id) - cm.exception.error_code.should.equal("InvalidNetworkInterfaceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.error_code.should.equal("InvalidNetworkInterfaceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated def test_elastic_network_interfaces_subnet_validation(): conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_network_interface("subnet-abcd1234") - cm.exception.error_code.should.equal("InvalidSubnetID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.error_code.should.equal("InvalidSubnetID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -136,13 +132,13 @@ def test_elastic_network_interfaces_modify_attribute(): eni.groups.should.have.length_of(1) eni.groups[0].id.should.equal(security_group1.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_network_interface_attribute( eni.id, "groupset", [security_group2.id], dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -231,11 +227,11 @@ def test_elastic_network_interfaces_get_by_tag_name(): SubnetId=subnet.id, PrivateIpAddress="10.0.10.5" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: eni1.create_tags(Tags=[{"Key": "Name", "Value": "eni1"}], DryRun=True) - ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -501,27 +497,3 @@ def test_elastic_network_interfaces_describe_network_interfaces_with_filter(): eni1.private_ip_address ) response["NetworkInterfaces"][0]["Description"].should.equal(eni1.description) - - -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_elastic_network_interfaces_cloudformation(): - template = vpc_eni.template - template_json = json.dumps(template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack("test_stack", template_body=template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eni = ec2_conn.get_all_network_interfaces()[0] - eni.private_ip_addresses.should.have.length_of(1) - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - cfn_eni = [ - resource - for resource in resources - if resource.resource_type == "AWS::EC2::NetworkInterface" - ][0] - cfn_eni.physical_resource_id.should.equal(eni.id) - - outputs = {output.key: output.value for output in stack.outputs} - outputs["ENIIpAddress"].should.equal(eni.private_ip_addresses[0].private_ip_address) diff --git a/tests/test_ec2/test_flow_logs.py b/tests/test_ec2/test_flow_logs.py new file mode 100644 index 000000000..743466eaa --- /dev/null +++ b/tests/test_ec2/test_flow_logs.py @@ -0,0 +1,677 @@ +from __future__ import unicode_literals + +import pytest + +import boto3 + +from botocore.exceptions import ParamValidationError, ClientError +from botocore.parsers import ResponseParserError +import json +import sure # noqa +import random +import sys + +from moto import ( + settings, + mock_cloudformation, + mock_ec2, + mock_s3, + mock_logs, +) +from moto.core import ACCOUNT_ID +from moto.ec2.exceptions import FilterNotImplementedError + + +@mock_s3 +@mock_ec2 +def test_create_flow_logs_s3(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + with pytest.raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + DryRun=True, + ) + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "An error occurred (DryRunOperation) when calling the CreateFlowLogs operation: Request would have succeeded, but DryRun flag is set" + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + )["FlowLogIds"] + response.should.have.length_of(1) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + + flow_log = flow_logs[0] + + flow_log["FlowLogId"].should.equal(response[0]) + flow_log["DeliverLogsStatus"].should.equal("SUCCESS") + flow_log["FlowLogStatus"].should.equal("ACTIVE") + flow_log["ResourceId"].should.equal(vpc["VpcId"]) + flow_log["TrafficType"].should.equal("ALL") + flow_log["LogDestinationType"].should.equal("s3") + flow_log["LogDestination"].should.equal("arn:aws:s3:::" + bucket.name) + flow_log["LogFormat"].should.equal( + "${version} ${account-id} ${interface-id} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${protocol} ${packets} ${bytes} ${start} ${end} ${action} ${log-status}" + ) + flow_log["MaxAggregationInterval"].should.equal(600) + + +@mock_logs +@mock_ec2 +def test_create_flow_logs_cloud_watch(): + client = boto3.client("ec2", region_name="us-west-1") + logs_client = boto3.client("logs", region_name="us-west-1") + + vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + logs_client.create_log_group(logGroupName="test-group") + + with pytest.raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="cloud-watch-logs", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + DryRun=True, + ) + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "An error occurred (DryRunOperation) when calling the CreateFlowLogs operation: Request would have succeeded, but DryRun flag is set" + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="cloud-watch-logs", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + )["FlowLogIds"] + response.should.have.length_of(1) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + + flow_log = flow_logs[0] + + flow_log["FlowLogId"].should.equal(response[0]) + flow_log["DeliverLogsStatus"].should.equal("SUCCESS") + flow_log["FlowLogStatus"].should.equal("ACTIVE") + flow_log["ResourceId"].should.equal(vpc["VpcId"]) + flow_log["TrafficType"].should.equal("ALL") + flow_log["LogDestinationType"].should.equal("cloud-watch-logs") + flow_log["LogGroupName"].should.equal("test-group") + flow_log["DeliverLogsPermissionArn"].should.equal( + "arn:aws:iam::" + ACCOUNT_ID + ":role/test-role" + ) + flow_log["LogFormat"].should.equal( + "${version} ${account-id} ${interface-id} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${protocol} ${packets} ${bytes} ${start} ${end} ${action} ${log-status}" + ) + flow_log["MaxAggregationInterval"].should.equal(600) + + +@mock_s3 +@mock_ec2 +def test_create_flow_log_create(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1",}, + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"], vpc2["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + LogFormat="${version} ${vpc-id} ${subnet-id} ${instance-id} ${interface-id} ${account-id} ${type} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${pkt-srcaddr} ${pkt-dstaddr} ${protocol} ${bytes} ${packets} ${start} ${end} ${action} ${tcp-flags} ${log-status}", + )["FlowLogIds"] + response.should.have.length_of(2) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(2) + + flow_logs[0]["LogFormat"].should.equal( + "${version} ${vpc-id} ${subnet-id} ${instance-id} ${interface-id} ${account-id} ${type} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${pkt-srcaddr} ${pkt-dstaddr} ${protocol} ${bytes} ${packets} ${start} ${end} ${action} ${tcp-flags} ${log-status}" + ) + flow_logs[1]["LogFormat"].should.equal( + "${version} ${vpc-id} ${subnet-id} ${instance-id} ${interface-id} ${account-id} ${type} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${pkt-srcaddr} ${pkt-dstaddr} ${protocol} ${bytes} ${packets} ${start} ${end} ${action} ${tcp-flags} ${log-status}" + ) + + +@mock_s3 +@mock_ec2 +def test_delete_flow_logs(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"], vpc2["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + )["FlowLogIds"] + response.should.have.length_of(2) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(2) + + client.delete_flow_logs(FlowLogIds=[response[0]]) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + flow_logs[0]["FlowLogId"].should.equal(response[1]) + + client.delete_flow_logs(FlowLogIds=[response[1]]) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(0) + + +@mock_s3 +@mock_ec2 +def test_delete_flow_logs_delete_many(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"], vpc2["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + )["FlowLogIds"] + response.should.have.length_of(2) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(2) + + client.delete_flow_logs(FlowLogIds=response) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(0) + + +@mock_ec2 +def test_delete_flow_logs_non_existing(): + client = boto3.client("ec2", region_name="us-west-1") + + with pytest.raises(ClientError) as ex: + client.delete_flow_logs(FlowLogIds=["fl-1a2b3c4d"]) + ex.value.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "These flow log ids in the input list are not found: [TotalCount: 1] fl-1a2b3c4d" + ) + + with pytest.raises(ClientError) as ex: + client.delete_flow_logs(FlowLogIds=["fl-1a2b3c4d", "fl-2b3c4d5e"]) + ex.value.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "These flow log ids in the input list are not found: [TotalCount: 2] fl-1a2b3c4d fl-2b3c4d5e" + ) + + +@mock_ec2 +def test_create_flow_logs_unsuccessful(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"], vpc2["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::non-existing-bucket", + ) + response["FlowLogIds"].should.have.length_of(0) + response["Unsuccessful"].should.have.length_of(2) + + error1 = response["Unsuccessful"][0]["Error"] + error2 = response["Unsuccessful"][1]["Error"] + + error1["Code"].should.equal("400") + error1["Message"].should.equal( + "LogDestination: non-existing-bucket does not exist." + ) + error2["Code"].should.equal("400") + error2["Message"].should.equal( + "LogDestination: non-existing-bucket does not exist." + ) + + +@mock_s3 +@mock_ec2 +def test_create_flow_logs_invalid_parameters(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + with pytest.raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + MaxAggregationInterval=10, + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "Invalid Flow Log Max Aggregation Interval" + ) + + with pytest.raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "LogDestination can't be empty if LogGroupName is not provided." + ) + + with pytest.raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogGroupName="test", + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "LogDestination type must be cloud-watch-logs if LogGroupName is provided." + ) + + with pytest.raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogGroupName="test", + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "DeliverLogsPermissionArn can't be empty if LogDestinationType is cloud-watch-logs." + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + )["FlowLogIds"] + response.should.have.length_of(1) + + with pytest.raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + ) + ex.value.response["Error"]["Code"].should.equal("FlowLogAlreadyExists") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "Error. There is an existing Flow Log with the same configuration and log destination." + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + )["FlowLogIds"] + response.should.have.length_of(1) + + with pytest.raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + ) + ex.value.response["Error"]["Code"].should.equal("FlowLogAlreadyExists") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "Error. There is an existing Flow Log with the same configuration and log destination." + ) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(2) + + +@mock_s3 +@mock_ec2 +@mock_logs +def test_describe_flow_logs_filtering(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + logs_client = boto3.client("logs", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + vpc3 = client.create_vpc(CidrBlock="10.2.0.0/16")["Vpc"] + + subnet1 = client.create_subnet(VpcId=vpc1["VpcId"], CidrBlock="10.0.0.0/18")[ + "Subnet" + ] + + bucket1 = s3.create_bucket( + Bucket="test-flow-logs-1", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + logs_client.create_log_group(logGroupName="test-group") + + fl1 = client.create_flow_logs( + ResourceType="Subnet", + ResourceIds=[subnet1["SubnetId"]], + TrafficType="ALL", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + )["FlowLogIds"][0] + + fl2 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc2["VpcId"]], + TrafficType="Accept", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket1.name, + TagSpecifications=[ + {"ResourceType": "vpc-flow-log", "Tags": [{"Key": "foo", "Value": "bar"}]} + ], + )["FlowLogIds"][0] + + fl3 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc3["VpcId"]], + TrafficType="Reject", + LogGroupName="non-existing-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + )["FlowLogIds"][0] + + all_flow_logs = client.describe_flow_logs()["FlowLogs"] + all_flow_logs.should.have.length_of(3) + + fl_by_deliver_status = client.describe_flow_logs( + Filters=[{"Name": "deliver-log-status", "Values": ["SUCCESS"]}], + )["FlowLogs"] + fl_by_deliver_status.should.have.length_of(3) + + fl_by_s3_bucket = client.describe_flow_logs( + Filters=[{"Name": "log-destination-type", "Values": ["s3"]}], + )["FlowLogs"] + fl_by_s3_bucket.should.have.length_of(1) + fl_by_s3_bucket[0]["FlowLogId"].should.equal(fl2) + fl_by_s3_bucket[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + fl_by_cloud_watch = client.describe_flow_logs( + Filters=[{"Name": "log-destination-type", "Values": ["cloud-watch-logs"]}], + )["FlowLogs"] + fl_by_cloud_watch.should.have.length_of(2) + + flow_logs_ids = tuple(map(lambda fl: fl["FlowLogId"], fl_by_cloud_watch)) + fl1.should.be.within(flow_logs_ids) + fl3.should.be.within(flow_logs_ids) + + flow_logs_resource_ids = tuple(map(lambda fl: fl["ResourceId"], fl_by_cloud_watch)) + subnet1["SubnetId"].should.be.within(flow_logs_resource_ids) + vpc3["VpcId"].should.be.within(flow_logs_resource_ids) + + test_fl3 = next(fl for fl in fl_by_cloud_watch if fl["FlowLogId"] == fl3) + test_fl3["DeliverLogsStatus"].should.equal("FAILED") + test_fl3["DeliverLogsErrorMessage"].should.equal("Access error") + + fl_by_both = client.describe_flow_logs( + Filters=[ + {"Name": "log-destination-type", "Values": ["cloud-watch-logs", "s3"]} + ], + )["FlowLogs"] + fl_by_both.should.have.length_of(3) + + fl_by_flow_log_ids = client.describe_flow_logs( + Filters=[{"Name": "flow-log-id", "Values": [fl1, fl3]}], + )["FlowLogs"] + fl_by_flow_log_ids.should.have.length_of(2) + flow_logs_ids = tuple(map(lambda fl: fl["FlowLogId"], fl_by_flow_log_ids)) + fl1.should.be.within(flow_logs_ids) + fl3.should.be.within(flow_logs_ids) + + flow_logs_resource_ids = tuple(map(lambda fl: fl["ResourceId"], fl_by_flow_log_ids)) + subnet1["SubnetId"].should.be.within(flow_logs_resource_ids) + vpc3["VpcId"].should.be.within(flow_logs_resource_ids) + + fl_by_group_name = client.describe_flow_logs( + Filters=[{"Name": "log-group-name", "Values": ["test-group"]}], + )["FlowLogs"] + fl_by_group_name.should.have.length_of(1) + fl_by_group_name[0]["FlowLogId"].should.equal(fl1) + fl_by_group_name[0]["ResourceId"].should.equal(subnet1["SubnetId"]) + + fl_by_group_name = client.describe_flow_logs( + Filters=[{"Name": "log-group-name", "Values": ["non-existing-group"]}], + )["FlowLogs"] + fl_by_group_name.should.have.length_of(1) + fl_by_group_name[0]["FlowLogId"].should.equal(fl3) + fl_by_group_name[0]["ResourceId"].should.equal(vpc3["VpcId"]) + + fl_by_resource_id = client.describe_flow_logs( + Filters=[{"Name": "resource-id", "Values": [vpc2["VpcId"]]}], + )["FlowLogs"] + fl_by_resource_id.should.have.length_of(1) + fl_by_resource_id[0]["FlowLogId"].should.equal(fl2) + fl_by_resource_id[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + fl_by_traffic_type = client.describe_flow_logs( + Filters=[{"Name": "traffic-type", "Values": ["ALL"]}], + )["FlowLogs"] + fl_by_traffic_type.should.have.length_of(1) + fl_by_traffic_type[0]["FlowLogId"].should.equal(fl1) + fl_by_traffic_type[0]["ResourceId"].should.equal(subnet1["SubnetId"]) + + fl_by_traffic_type = client.describe_flow_logs( + Filters=[{"Name": "traffic-type", "Values": ["Reject"]}], + )["FlowLogs"] + fl_by_traffic_type.should.have.length_of(1) + fl_by_traffic_type[0]["FlowLogId"].should.equal(fl3) + fl_by_traffic_type[0]["ResourceId"].should.equal(vpc3["VpcId"]) + + fl_by_traffic_type = client.describe_flow_logs( + Filters=[{"Name": "traffic-type", "Values": ["Accept"]}], + )["FlowLogs"] + fl_by_traffic_type.should.have.length_of(1) + fl_by_traffic_type[0]["FlowLogId"].should.equal(fl2) + fl_by_traffic_type[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + fl_by_tag_key = client.describe_flow_logs( + Filters=[{"Name": "tag-key", "Values": ["foo"]}], + )["FlowLogs"] + fl_by_tag_key.should.have.length_of(1) + fl_by_tag_key[0]["FlowLogId"].should.equal(fl2) + fl_by_tag_key[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + fl_by_tag_key = client.describe_flow_logs( + Filters=[{"Name": "tag-key", "Values": ["non-existing"]}], + )["FlowLogs"] + fl_by_tag_key.should.have.length_of(0) + + if not settings.TEST_SERVER_MODE: + client.describe_flow_logs.when.called_with( + Filters=[{"Name": "not-implemented-filter", "Values": ["foobar"]}], + ).should.throw(FilterNotImplementedError) + else: + client.describe_flow_logs.when.called_with( + Filters=[{"Name": "not-implemented-filter", "Values": ["foobar"]}], + ).should.throw(ResponseParserError) + + +@mock_s3 +@mock_ec2 +def test_flow_logs_by_ids(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + vpc3 = client.create_vpc(CidrBlock="10.2.0.0/16")["Vpc"] + + fl1 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"]], + TrafficType="Reject", + LogGroupName="test-group-1", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role-1", + )["FlowLogIds"][0] + + fl2 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc2["VpcId"]], + TrafficType="Reject", + LogGroupName="test-group-3", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role-3", + )["FlowLogIds"][0] + + fl3 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc3["VpcId"]], + TrafficType="Reject", + LogGroupName="test-group-3", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role-3", + )["FlowLogIds"][0] + + flow_logs = client.describe_flow_logs(FlowLogIds=[fl1, fl3])["FlowLogs"] + flow_logs.should.have.length_of(2) + flow_logs_ids = tuple(map(lambda fl: fl["FlowLogId"], flow_logs)) + fl1.should.be.within(flow_logs_ids) + fl3.should.be.within(flow_logs_ids) + + flow_logs_resource_ids = tuple(map(lambda fl: fl["ResourceId"], flow_logs)) + vpc1["VpcId"].should.be.within(flow_logs_resource_ids) + vpc3["VpcId"].should.be.within(flow_logs_resource_ids) + + client.delete_flow_logs(FlowLogIds=[fl1, fl3]) + + flow_logs = client.describe_flow_logs(FlowLogIds=[fl1, fl3])["FlowLogs"] + flow_logs.should.have.length_of(0) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + flow_logs[0]["FlowLogId"].should.equal(fl2) + flow_logs[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + flow_logs = client.delete_flow_logs(FlowLogIds=[fl2]) + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(0) + + +@mock_cloudformation +@mock_ec2 +@mock_s3 +def test_flow_logs_by_cloudformation(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + cf_client = boto3.client("cloudformation", "us-west-1") + + vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + flow_log_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Template for VPC Flow Logs creation.", + "Resources": { + "TestFlowLogs": { + "Type": "AWS::EC2::FlowLog", + "Properties": { + "ResourceType": "VPC", + "ResourceId": vpc["VpcId"], + "TrafficType": "ALL", + "LogDestinationType": "s3", + "LogDestination": "arn:aws:s3:::" + bucket.name, + "MaxAggregationInterval": "60", + "Tags": [{"Key": "foo", "Value": "bar"}], + }, + } + }, + } + flow_log_template_json = json.dumps(flow_log_template) + stack_id = cf_client.create_stack( + StackName="test_stack", TemplateBody=flow_log_template_json + )["StackId"] + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + flow_logs[0]["ResourceId"].should.equal(vpc["VpcId"]) + flow_logs[0]["LogDestination"].should.equal("arn:aws:s3:::" + bucket.name) + flow_logs[0]["MaxAggregationInterval"].should.equal(60) diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py index 7b8f3bd53..c2b578929 100644 --- a/tests/test_ec2/test_general.py +++ b/tests/test_ec2/test_general.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import boto import boto3 @@ -25,11 +24,11 @@ def test_console_output(): def test_console_output_without_instance(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_console_output("i-1234abcd") - cm.exception.code.should.equal("InvalidInstanceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInstanceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 diff --git a/tests/test_ec2/test_iam_instance_profile_associations.py b/tests/test_ec2/test_iam_instance_profile_associations.py new file mode 100644 index 000000000..6a7dcad30 --- /dev/null +++ b/tests/test_ec2/test_iam_instance_profile_associations.py @@ -0,0 +1,345 @@ +from __future__ import unicode_literals + +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest + +import time +import json +import boto3 +from botocore.exceptions import ClientError +import sure # noqa + +from moto import mock_ec2, mock_iam, mock_cloudformation + + +def quick_instance_creation(): + image_id = "ami-1234abcd" + conn_ec2 = boto3.resource("ec2", "us-east-1") + test_instance = conn_ec2.create_instances(ImageId=image_id, MinCount=1, MaxCount=1) + # We only need instance id for this tests + return test_instance[0].id + + +def quick_instance_profile_creation(name): + conn_iam = boto3.resource("iam", "us-east-1") + test_instance_profile = conn_iam.create_instance_profile( + InstanceProfileName=name, Path="/" + ) + return test_instance_profile.arn, test_instance_profile.name + + +@mock_ec2 +@mock_iam +def test_associate(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + + association = client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + association["IamInstanceProfileAssociation"]["InstanceId"].should.equal(instance_id) + association["IamInstanceProfileAssociation"]["IamInstanceProfile"][ + "Arn" + ].should.equal(instance_profile_arn) + association["IamInstanceProfileAssociation"]["State"].should.equal("associating") + + +@mock_ec2 +@mock_iam +def test_invalid_associate(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + + # Duplicate + with pytest.raises(ClientError) as ex: + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + ex.value.response["Error"]["Code"].should.equal("IncorrectState") + ex.value.response["Error"]["Message"].should.contain( + "There is an existing association for" + ) + + # Wrong instance profile + with pytest.raises(ClientError) as ex: + client.associate_iam_instance_profile( + IamInstanceProfile={"Arn": "fake", "Name": "fake"}, InstanceId=instance_id, + ) + ex.value.response["Error"]["Code"].should.equal("NoSuchEntity") + ex.value.response["Error"]["Message"].should.contain("not found") + + # Wrong instance id + with pytest.raises(ClientError) as ex: + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId="fake", + ) + ex.value.response["Error"]["Code"].should.equal("InvalidInstanceID.NotFound") + ex.value.response["Error"]["Message"].should.contain("does not exist") + + +@mock_ec2 +@mock_iam +def test_describe(): + client = boto3.client("ec2", region_name="us-east-1") + + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + associations["IamInstanceProfileAssociations"][0]["InstanceId"].should.equal( + instance_id + ) + associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][ + "Arn" + ].should.equal(instance_profile_arn) + associations["IamInstanceProfileAssociations"][0]["State"].should.equal( + "associated" + ) + + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile1" + ) + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + + next_test_associations = client.describe_iam_instance_profile_associations() + next_test_associations["IamInstanceProfileAssociations"].should.have.length_of(2) + + associations = client.describe_iam_instance_profile_associations( + AssociationIds=[ + next_test_associations["IamInstanceProfileAssociations"][0][ + "AssociationId" + ], + ] + ) + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][ + "Arn" + ].should.equal( + next_test_associations["IamInstanceProfileAssociations"][0][ + "IamInstanceProfile" + ]["Arn"] + ) + + associations = client.describe_iam_instance_profile_associations( + Filters=[ + { + "Name": "instance-id", + "Values": [ + next_test_associations["IamInstanceProfileAssociations"][0][ + "InstanceId" + ], + ], + }, + {"Name": "state", "Values": ["associated"]}, + ] + ) + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][ + "Arn" + ].should.equal( + next_test_associations["IamInstanceProfileAssociations"][0][ + "IamInstanceProfile" + ]["Arn"] + ) + + +@mock_ec2 +@mock_iam +def test_replace(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id1 = quick_instance_creation() + instance_profile_arn1, instance_profile_name1 = quick_instance_profile_creation( + "test_profile1" + ) + instance_profile_arn2, instance_profile_name2 = quick_instance_profile_creation( + "test_profile2" + ) + + association = client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn1, + "Name": instance_profile_name1, + }, + InstanceId=instance_id1, + ) + + association = client.replace_iam_instance_profile_association( + IamInstanceProfile={ + "Arn": instance_profile_arn2, + "Name": instance_profile_name2, + }, + AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"], + ) + + association["IamInstanceProfileAssociation"]["IamInstanceProfile"][ + "Arn" + ].should.equal(instance_profile_arn2) + association["IamInstanceProfileAssociation"]["State"].should.equal("associating") + + +@mock_ec2 +@mock_iam +def test_invalid_replace(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + instance_profile_arn2, instance_profile_name2 = quick_instance_profile_creation( + "test_profile2" + ) + + association = client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + + # Wrong id + with pytest.raises(ClientError) as ex: + client.replace_iam_instance_profile_association( + IamInstanceProfile={ + "Arn": instance_profile_arn2, + "Name": instance_profile_name2, + }, + AssociationId="fake", + ) + ex.value.response["Error"]["Code"].should.equal("InvalidAssociationID.NotFound") + ex.value.response["Error"]["Message"].should.contain("An invalid association-id of") + + # Wrong instance profile + with pytest.raises(ClientError) as ex: + client.replace_iam_instance_profile_association( + IamInstanceProfile={"Arn": "fake", "Name": "fake",}, + AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"], + ) + ex.value.response["Error"]["Code"].should.equal("NoSuchEntity") + ex.value.response["Error"]["Message"].should.contain("not found") + + +@mock_ec2 +@mock_iam +def test_disassociate(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + + association = client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + + disassociation = client.disassociate_iam_instance_profile( + AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"], + ) + + disassociation["IamInstanceProfileAssociation"]["IamInstanceProfile"][ + "Arn" + ].should.equal(instance_profile_arn) + disassociation["IamInstanceProfileAssociation"]["State"].should.equal( + "disassociating" + ) + + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(0) + + +@mock_ec2 +@mock_iam +def test_invalid_disassociate(): + client = boto3.client("ec2", region_name="us-east-1") + + # Wrong id + with pytest.raises(ClientError) as ex: + client.disassociate_iam_instance_profile(AssociationId="fake",) + ex.value.response["Error"]["Code"].should.equal("InvalidAssociationID.NotFound") + ex.value.response["Error"]["Message"].should.contain("An invalid association-id of") + + +@mock_ec2 +@mock_cloudformation +def test_cloudformation(): + dummy_template_json = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "InstanceProfile": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": {"Path": "/", "Roles": []}, + }, + "Ec2Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "IamInstanceProfile": {"Ref": "InstanceProfile"}, + "KeyName": "mykey1", + "ImageId": "ami-7a11e213", + }, + }, + }, + } + + client = boto3.client("ec2", region_name="us-east-1") + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(dummy_template_json) + ) + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][ + "Arn" + ].should.contain("test_stack") + + cf_conn.delete_stack(StackName="test_stack") + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(0) diff --git a/tests/test_ec2/test_instance_types.py b/tests/test_ec2/test_instance_types.py new file mode 100644 index 000000000..1385d6113 --- /dev/null +++ b/tests/test_ec2/test_instance_types.py @@ -0,0 +1,18 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_describe_instance_types(): + client = boto3.client("ec2", "us-east-1") + instance_types = client.describe_instance_types() + + instance_types.should.have.key("InstanceTypes") + instance_types["InstanceTypes"].should_not.be.empty + instance_types["InstanceTypes"][0].should.have.key("InstanceType") + instance_types["InstanceTypes"][0].should.have.key("MemoryInfo") + instance_types["InstanceTypes"][0]["MemoryInfo"].should.have.key("SizeInMiB") diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 041bc8c85..146e3c696 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1,20 +1,18 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 from botocore.exceptions import ClientError -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import base64 -import datetime import ipaddress import six import boto import boto3 from boto.ec2.instance import Reservation, InstanceAttribute -from boto.exception import EC2ResponseError, EC2ResponseError +from boto.exception import EC2ResponseError from freezegun import freeze_time import sure # noqa @@ -22,6 +20,11 @@ from moto import mock_ec2_deprecated, mock_ec2 from tests.helpers import requires_boto_gte +if six.PY2: + decode_method = base64.decodestring +else: + decode_method = base64.decodebytes + ################ Test Readme ############### def add_servers(ami_id, count): conn = boto.connect_ec2() @@ -48,11 +51,11 @@ def test_add_servers(): def test_instance_launch_and_terminate(): conn = boto.ec2.connect_to_region("us-east-1") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: reservation = conn.run_instances("ami-1234abcd", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -71,7 +74,7 @@ def test_instance_launch_and_terminate(): instance.id.should.equal(instance.id) instance.state.should.equal("running") instance.launch_time.should.equal("2014-01-01T05:00:00.000Z") - instance.vpc_id.should.equal(None) + instance.vpc_id.shouldnt.equal(None) instance.placement.should.equal("us-east-1a") root_device_name = instance.root_device_name @@ -83,11 +86,11 @@ def test_instance_launch_and_terminate(): volume.attach_data.instance_id.should.equal(instance.id) volume.status.should.equal("in-use") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.terminate_instances([instance.id], dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -98,6 +101,132 @@ def test_instance_launch_and_terminate(): instance.state.should.equal("terminated") +@mock_ec2 +def test_instance_terminate_discard_volumes(): + + ec2_resource = boto3.resource("ec2", "us-west-1") + + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": {"VolumeSize": 50, "DeleteOnTermination": True}, + } + ], + ) + instance = result[0] + + instance_volume_ids = [] + for volume in instance.volumes.all(): + instance_volume_ids.append(volume.volume_id) + + instance.terminate() + instance.wait_until_terminated() + + assert not list(ec2_resource.volumes.all()) + + +@mock_ec2 +def test_instance_terminate_keep_volumes_explicit(): + + ec2_resource = boto3.resource("ec2", "us-west-1") + + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": {"VolumeSize": 50, "DeleteOnTermination": False}, + } + ], + ) + instance = result[0] + + instance_volume_ids = [] + for volume in instance.volumes.all(): + instance_volume_ids.append(volume.volume_id) + + instance.terminate() + instance.wait_until_terminated() + + assert len(list(ec2_resource.volumes.all())) == 1 + + +@mock_ec2 +def test_instance_terminate_keep_volumes_implicit(): + ec2_resource = boto3.resource("ec2", "us-west-1") + + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}], + ) + instance = result[0] + + instance_volume_ids = [] + for volume in instance.volumes.all(): + instance_volume_ids.append(volume.volume_id) + + instance.terminate() + instance.wait_until_terminated() + + assert len(instance_volume_ids) == 1 + volume = ec2_resource.Volume(instance_volume_ids[0]) + volume.state.should.equal("available") + + +@mock_ec2 +def test_instance_terminate_detach_volumes(): + ec2_resource = boto3.resource("ec2", "us-west-1") + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[ + {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}, + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}, + ], + ) + instance = result[0] + for volume in instance.volumes.all(): + response = instance.detach_volume(VolumeId=volume.volume_id) + response["State"].should.equal("detaching") + + instance.terminate() + instance.wait_until_terminated() + + assert len(list(ec2_resource.volumes.all())) == 2 + + +@mock_ec2 +def test_instance_detach_volume_wrong_path(): + ec2_resource = boto3.resource("ec2", "us-west-1") + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},], + ) + instance = result[0] + for volume in instance.volumes.all(): + with pytest.raises(ClientError) as ex: + instance.detach_volume(VolumeId=volume.volume_id, Device="/dev/sdf") + + ex.value.response["Error"]["Code"].should.equal("InvalidAttachment.NotFound") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "The volume {0} is not attached to instance {1} as device {2}".format( + volume.volume_id, instance.instance_id, "/dev/sdf" + ) + ) + + @mock_ec2_deprecated def test_terminate_empty_instances(): conn = boto.connect_ec2("the_key", "the_secret") @@ -158,11 +287,11 @@ def test_get_instances_by_id(): instance_ids.should.equal([instance1.id, instance2.id]) # Call get_all_instances with a bad id should raise an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_instances(instance_ids=[instance1.id, "i-1234abcd"]) - cm.exception.code.should.equal("InvalidInstanceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInstanceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -407,6 +536,20 @@ def test_get_instances_filtering_by_image_id(): reservations[0]["Instances"].should.have.length_of(1) +@mock_ec2 +def test_get_instances_filtering_by_account_id(): + image_id = "ami-1234abcd" + client = boto3.client("ec2", region_name="us-east-1") + conn = boto3.resource("ec2", "us-east-1") + conn.create_instances(ImageId=image_id, MinCount=1, MaxCount=1) + + reservations = client.describe_instances( + Filters=[{"Name": "owner-id", "Values": ["123456789012"]}] + )["Reservations"] + + reservations[0]["Instances"].should.have.length_of(1) + + @mock_ec2 def test_get_instances_filtering_by_private_dns(): image_id = "ami-1234abcd" @@ -597,11 +740,11 @@ def test_instance_start_and_stop(): instance_ids = [instance.id for instance in instances] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: stopped_instances = conn.stop_instances(instance_ids, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -610,11 +753,11 @@ def test_instance_start_and_stop(): for instance in stopped_instances: instance.state.should.equal("stopping") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: started_instances = conn.start_instances([instances[0].id], dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -628,11 +771,11 @@ def test_instance_reboot(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.reboot(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -646,11 +789,11 @@ def test_instance_attribute_instance_type(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("instanceType", "m1.small", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set" ) @@ -674,11 +817,11 @@ def test_modify_instance_attribute_security_groups(): "test security group 2", "this is a test security group 2" ).id - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set" ) @@ -697,11 +840,11 @@ def test_instance_attribute_user_data(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("userData", "this is my user data", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set" ) @@ -727,11 +870,11 @@ def test_instance_attribute_source_dest_check(): # Set to false (note: Boto converts bool to string, eg 'false') - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("sourceDestCheck", False, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set" ) @@ -765,7 +908,7 @@ def test_user_data_with_run_instance(): instance_attribute = instance.get_attribute("userData") instance_attribute.should.be.a(InstanceAttribute) retrieved_user_data = instance_attribute.get("userData").encode("utf-8") - decoded_user_data = base64.decodestring(retrieved_user_data) + decoded_user_data = decode_method(retrieved_user_data) decoded_user_data.should.equal(b"some user data") @@ -773,11 +916,11 @@ def test_user_data_with_run_instance(): def test_run_instance_with_security_group_name(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: group = conn.create_security_group("group1", "some description", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set" ) @@ -1050,11 +1193,11 @@ def test_instance_with_nic_attach_detach(): set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) # Attach - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.attach_network_interface(eni.id, instance.id, device_index=1, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -1077,11 +1220,11 @@ def test_instance_with_nic_attach_detach(): ) # Detach - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -1096,11 +1239,11 @@ def test_instance_with_nic_attach_detach(): set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) # Detach with invalid attachment ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.detach_network_interface("eni-attach-1234abcd") - cm.exception.code.should.equal("InvalidAttachmentID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAttachmentID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -1125,6 +1268,111 @@ def test_run_instance_with_keypair(): instance.key_name.should.equal("keypair_name") +@mock_ec2 +def test_run_instance_with_block_device_mappings(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}], + } + + ec2_client.run_instances(**kwargs) + + instances = ec2_client.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]]) + volumes["Volumes"][0]["Size"].should.equal(50) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_missing_ebs(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}], + } + with pytest.raises(ClientError) as ex: + ec2_client.run_instances(**kwargs) + + ex.value.response["Error"]["Code"].should.equal("MissingParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "The request must contain the parameter ebs" + ) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_missing_size(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}} + ], + } + with pytest.raises(ClientError) as ex: + ec2_client.run_instances(**kwargs) + + ex.value.response["Error"]["Code"].should.equal("MissingParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + "The request must contain the parameter size or snapshotId" + ) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_from_snapshot(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + ec2_resource = boto3.resource("ec2", region_name="us-east-1") + volume_details = { + "AvailabilityZone": "1a", + "Size": 30, + } + + volume = ec2_resource.create_volume(**volume_details) + snapshot = volume.create_snapshot() + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"SnapshotId": snapshot.snapshot_id}} + ], + } + + ec2_client.run_instances(**kwargs) + + instances = ec2_client.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]]) + + volumes["Volumes"][0]["Size"].should.equal(30) + volumes["Volumes"][0]["SnapshotId"].should.equal(snapshot.snapshot_id) + + @mock_ec2_deprecated def test_describe_instance_status_no_instances(): conn = boto.connect_ec2("the_key", "the_secret") @@ -1144,7 +1392,7 @@ def test_describe_instance_status_with_instances(): @mock_ec2_deprecated -def test_describe_instance_status_with_instance_filter(): +def test_describe_instance_status_with_instance_filter_deprecated(): conn = boto.connect_ec2("the_key", "the_secret") # We want to filter based on this one @@ -1159,11 +1407,95 @@ def test_describe_instance_status_with_instance_filter(): all_status[0].id.should.equal(instance.id) # Call get_all_instance_status with a bad id should raise an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"]) - cm.exception.code.should.equal("InvalidInstanceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInstanceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none + + +@mock_ec2 +def test_describe_instance_credit_specifications(): + conn = boto3.client("ec2", region_name="us-west-1") + + # We want to filter based on this one + reservation = conn.run_instances(ImageId="ami-1234abcd", MinCount=1, MaxCount=1) + result = conn.describe_instance_credit_specifications( + InstanceIds=[reservation["Instances"][0]["InstanceId"]] + ) + assert ( + result["InstanceCreditSpecifications"][0]["InstanceId"] + == reservation["Instances"][0]["InstanceId"] + ) + + +@mock_ec2 +def test_describe_instance_status_with_instance_filter(): + conn = boto3.client("ec2", region_name="us-west-1") + + # We want to filter based on this one + reservation = conn.run_instances(ImageId="ami-1234abcd", MinCount=3, MaxCount=3) + instance1 = reservation["Instances"][0] + instance2 = reservation["Instances"][1] + instance3 = reservation["Instances"][2] + conn.stop_instances(InstanceIds=[instance1["InstanceId"]]) + stopped_instance_ids = [instance1["InstanceId"]] + running_instance_ids = sorted([instance2["InstanceId"], instance3["InstanceId"]]) + all_instance_ids = sorted(stopped_instance_ids + running_instance_ids) + + # Filter instance using the state name + state_name_filter = { + "running_and_stopped": [ + {"Name": "instance-state-name", "Values": ["running", "stopped"]} + ], + "running": [{"Name": "instance-state-name", "Values": ["running"]}], + "stopped": [{"Name": "instance-state-name", "Values": ["stopped"]}], + } + + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_name_filter["running_and_stopped"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] + sorted(found_instance_ids).should.equal(all_instance_ids) + + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_name_filter["running"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] + sorted(found_instance_ids).should.equal(running_instance_ids) + + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_name_filter["stopped"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] + sorted(found_instance_ids).should.equal(stopped_instance_ids) + + # Filter instance using the state code + state_code_filter = { + "running_and_stopped": [ + {"Name": "instance-state-code", "Values": ["16", "80"]} + ], + "running": [{"Name": "instance-state-code", "Values": ["16"]}], + "stopped": [{"Name": "instance-state-code", "Values": ["80"]}], + } + + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_code_filter["running_and_stopped"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] + sorted(found_instance_ids).should.equal(all_instance_ids) + + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_code_filter["running"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] + sorted(found_instance_ids).should.equal(running_instance_ids) + + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_code_filter["stopped"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] + sorted(found_instance_ids).should.equal(stopped_instance_ids) @requires_boto_gte("2.32.0") @@ -1202,13 +1534,13 @@ def test_get_instance_by_security_group(): security_group = conn.create_security_group("test", "test") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_instance_attribute( instance.id, "groupSet", [security_group.id], dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set" ) @@ -1226,14 +1558,14 @@ def test_modify_delete_on_termination(): result = ec2_client.create_instances(ImageId="ami-12345678", MinCount=1, MaxCount=1) instance = result[0] instance.load() - instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False) + instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True) instance.modify_attribute( BlockDeviceMappings=[ - {"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": True}} + {"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": False}} ] ) instance.load() - instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True) + instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False) @mock_ec2 @@ -1250,6 +1582,12 @@ def test_create_instance_ebs_optimized(): instance.load() instance.ebs_optimized.should.be(False) + instance = ec2_resource.create_instances( + ImageId="ami-12345678", MaxCount=1, MinCount=1, + )[0] + instance.load() + instance.ebs_optimized.should.be(False) + @mock_ec2 def test_run_multiple_instances_in_same_command(): @@ -1320,13 +1658,13 @@ def test_describe_instance_attribute(): ] for invalid_instance_attribute in invalid_instance_attributes: - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instance_attribute( InstanceId=instance_id, Attribute=invalid_instance_attribute ) - ex.exception.response["Error"]["Code"].should.equal("InvalidParameterValue") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) message = "Value ({invalid_instance_attribute}) for parameter attribute is invalid. Unknown attribute.".format( invalid_instance_attribute=invalid_instance_attribute ) - ex.exception.response["Error"]["Message"].should.equal(message) + ex.value.response["Error"]["Message"].should.equal(message) diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index 5941643cf..49cc6e38c 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -1,17 +1,18 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import re import boto +import boto3 + from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 VPC_CIDR = "10.0.0.0/16" @@ -26,11 +27,11 @@ def test_igw_create(): conn.get_all_internet_gateways().should.have.length_of(0) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: igw = conn.create_internet_gateway(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set" ) @@ -49,11 +50,11 @@ def test_igw_attach(): igw = conn.create_internet_gateway() vpc = conn.create_vpc(VPC_CIDR) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set" ) @@ -69,11 +70,11 @@ def test_igw_attach_bad_vpc(): conn = boto.connect_vpc("the_key", "the_secret") igw = conn.create_internet_gateway() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.attach_internet_gateway(igw.id, BAD_VPC) - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -85,11 +86,11 @@ def test_igw_attach_twice(): vpc2 = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc1.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.attach_internet_gateway(igw.id, vpc2.id) - cm.exception.code.should.equal("Resource.AlreadyAssociated") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Resource.AlreadyAssociated") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -100,11 +101,11 @@ def test_igw_detach(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set" ) @@ -122,11 +123,11 @@ def test_igw_detach_wrong_vpc(): vpc2 = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc1.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, vpc2.id) - cm.exception.code.should.equal("Gateway.NotAttached") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Gateway.NotAttached") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -137,11 +138,11 @@ def test_igw_detach_invalid_vpc(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, BAD_VPC) - cm.exception.code.should.equal("Gateway.NotAttached") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Gateway.NotAttached") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -151,11 +152,11 @@ def test_igw_detach_unattached(): igw = conn.create_internet_gateway() vpc = conn.create_vpc(VPC_CIDR) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, vpc.id) - cm.exception.code.should.equal("Gateway.NotAttached") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Gateway.NotAttached") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -167,11 +168,11 @@ def test_igw_delete(): igw = conn.create_internet_gateway() conn.get_all_internet_gateways().should.have.length_of(1) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.delete_internet_gateway(igw.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set" ) @@ -187,11 +188,11 @@ def test_igw_delete_attached(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_internet_gateway(igw.id) - cm.exception.code.should.equal("DependencyViolation") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("DependencyViolation") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -207,11 +208,11 @@ def test_igw_desribe(): def test_igw_describe_bad_id(): """ internet gateway fail to fetch by bad id """ conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_internet_gateways([BAD_IGW]) - cm.exception.code.should.equal("InvalidInternetGatewayID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInternetGatewayID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -269,3 +270,19 @@ def test_igw_filter_by_attachment_state(): result = conn.get_all_internet_gateways(filters={"attachment.state": "available"}) result.should.have.length_of(1) result[0].id.should.equal(igw1.id) + + +@mock_ec2 +def test_create_internet_gateway_with_tags(): + ec2 = boto3.resource("ec2", region_name="eu-central-1") + + igw = ec2.create_internet_gateway( + TagSpecifications=[ + { + "ResourceType": "internet-gateway", + "Tags": [{"Key": "test", "Value": "TestRouteTable"}], + } + ], + ) + igw.tags.should.have.length_of(1) + igw.tags.should.equal([{"Key": "test", "Value": "TestRouteTable"}]) diff --git a/tests/test_ec2/test_ip_addresses.py b/tests/test_ec2/test_ip_addresses.py index a8e927b00..60cf1cfc6 100644 --- a/tests/test_ec2/test_ip_addresses.py +++ b/tests/test_ec2/test_ip_addresses.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_ip_addresses(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_ip_addresses(): + pass diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index d632c2478..dcca8b116 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import boto import sure # noqa @@ -16,7 +15,7 @@ from .helpers import rsa_check_private_key RSA_PUBLIC_KEY_OPENSSH = b"""\ ssh-rsa \ AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H\ -6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\8kweyMQrhrt6HaKGgromRiz37LQx\ +6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\\8kweyMQrhrt6HaKGgromRiz37LQx\ 4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBp\ JzbZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6\ A3t8mL7r91aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2X\ @@ -56,22 +55,22 @@ def test_key_pairs_empty(): def test_key_pairs_invalid_id(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_key_pairs("foo") - cm.exception.code.should.equal("InvalidKeyPair.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidKeyPair.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated def test_key_pairs_create(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.create_key_pair("foo", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set" ) @@ -110,11 +109,11 @@ def test_key_pairs_create_exist(): conn.create_key_pair("foo") assert len(conn.get_all_key_pairs()) == 1 - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_key_pair("foo") - cm.exception.code.should.equal("InvalidKeyPair.Duplicate") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidKeyPair.Duplicate") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -130,11 +129,11 @@ def test_key_pairs_delete_exist(): conn = boto.connect_ec2("the_key", "the_secret") conn.create_key_pair("foo") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: r = conn.delete_key_pair("foo", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set" ) @@ -147,11 +146,11 @@ def test_key_pairs_delete_exist(): def test_key_pairs_import(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", RSA_PUBLIC_KEY_OPENSSH, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set" ) @@ -176,34 +175,34 @@ def test_key_pairs_import_exist(): assert kp.name == "foo" assert len(conn.get_all_key_pairs()) == 1 - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_key_pair("foo") - cm.exception.code.should.equal("InvalidKeyPair.Duplicate") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidKeyPair.Duplicate") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated def test_key_pairs_invalid(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", b"") - ex.exception.error_code.should.equal("InvalidKeyPair.Format") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal("Key is not in valid OpenSSH public key format") + ex.value.error_code.should.equal("InvalidKeyPair.Format") + ex.value.status.should.equal(400) + ex.value.message.should.equal("Key is not in valid OpenSSH public key format") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", b"garbage") - ex.exception.error_code.should.equal("InvalidKeyPair.Format") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal("Key is not in valid OpenSSH public key format") + ex.value.error_code.should.equal("InvalidKeyPair.Format") + ex.value.status.should.equal(400) + ex.value.message.should.equal("Key is not in valid OpenSSH public key format") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", DSA_PUBLIC_KEY_OPENSSH) - ex.exception.error_code.should.equal("InvalidKeyPair.Format") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal("Key is not in valid OpenSSH public key format") + ex.value.error_code.should.equal("InvalidKeyPair.Format") + ex.value.status.should.equal(400) + ex.value.message.should.equal("Key is not in valid OpenSSH public key format") @mock_ec2_deprecated diff --git a/tests/test_ec2/test_launch_templates.py b/tests/test_ec2/test_launch_templates.py index 4c37818d1..41896be96 100644 --- a/tests/test_ec2/test_launch_templates.py +++ b/tests/test_ec2/test_launch_templates.py @@ -1,7 +1,7 @@ import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from botocore.client import ClientError from moto import mock_ec2 @@ -30,7 +30,7 @@ def test_launch_template_create(): lt["DefaultVersionNumber"].should.equal(1) lt["LatestVersionNumber"].should.equal(1) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: cli.create_launch_template( LaunchTemplateName="test-template", LaunchTemplateData={ @@ -43,7 +43,7 @@ def test_launch_template_create(): }, ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidLaunchTemplateName.AlreadyExistsException) when calling the CreateLaunchTemplate operation: Launch template name already in use." ) diff --git a/tests/test_ec2/test_monitoring.py b/tests/test_ec2/test_monitoring.py index 03be93adf..95bd36e6a 100644 --- a/tests/test_ec2/test_monitoring.py +++ b/tests/test_ec2/test_monitoring.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_monitoring(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_monitoring(): + pass diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index fb62f7178..c2a790ed7 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals import boto import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from botocore.exceptions import ClientError from moto import mock_ec2_deprecated, mock_ec2 @@ -261,7 +261,7 @@ def test_duplicate_network_acl_entry(): RuleNumber=rule_number, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: default_network_acl.create_entry( CidrBlock="10.0.0.0/0", Egress=egress, @@ -269,9 +269,61 @@ def test_duplicate_network_acl_entry(): RuleAction="deny", RuleNumber=rule_number, ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry " "operation: The network acl entry identified by {} already exists.".format( rule_number ) ) + + +@mock_ec2 +def test_describe_network_acls(): + conn = boto3.client("ec2", region_name="us-west-2") + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + + network_acl = conn.create_network_acl(VpcId=vpc_id) + + network_acl_id = network_acl["NetworkAcl"]["NetworkAclId"] + + resp = conn.describe_network_acls(NetworkAclIds=[network_acl_id]) + result = resp["NetworkAcls"] + + result.should.have.length_of(1) + result[0]["NetworkAclId"].should.equal(network_acl_id) + + resp2 = conn.describe_network_acls()["NetworkAcls"] + resp2.should.have.length_of(3) + + with pytest.raises(ClientError) as ex: + conn.describe_network_acls(NetworkAclIds=["1"]) + + str(ex.value).should.equal( + "An error occurred (InvalidRouteTableID.NotFound) when calling the " + "DescribeNetworkAcls operation: The routeTable ID '1' does not exist" + ) + + +@mock_ec2 +def test_create_network_acl_with_tags(): + conn = boto3.client("ec2", region_name="us-west-2") + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + + network_acl = conn.create_network_acl( + VpcId=vpc_id, + TagSpecifications=[ + { + "ResourceType": "network-acl", + "Tags": [{"Key": "test", "Value": "TestTags"}], + } + ], + ) + + (len(network_acl.get("NetworkAcl").get("Tags"))).should.equal(1) + network_acl.get("NetworkAcl").get("Tags").should.equal( + [{"Key": "test", "Value": "TestTags"}] + ) diff --git a/tests/test_ec2/test_placement_groups.py b/tests/test_ec2/test_placement_groups.py index c7494228a..bc389488b 100644 --- a/tests/test_ec2/test_placement_groups.py +++ b/tests/test_ec2/test_placement_groups.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_placement_groups(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_placement_groups(): + pass diff --git a/tests/test_ec2/test_reserved_instances.py b/tests/test_ec2/test_reserved_instances.py index 437d3a95b..47456bc03 100644 --- a/tests/test_ec2/test_reserved_instances.py +++ b/tests/test_ec2/test_reserved_instances.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_reserved_instances(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_reserved_instances(): + pass diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 347464691..889515962 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import boto import boto3 @@ -61,22 +60,22 @@ def test_route_tables_additional(): local_route.state.should.equal("active") local_route.destination_cidr_block.should.equal(vpc.cidr_block) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc(vpc.id) - cm.exception.code.should.equal("DependencyViolation") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("DependencyViolation") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.delete_route_table(route_table.id) all_route_tables = conn.get_all_route_tables(filters={"vpc-id": vpc.id}) all_route_tables.should.have.length_of(1) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_route_table("rtb-1234abcd") - cm.exception.code.should.equal("InvalidRouteTableID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRouteTableID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -197,11 +196,11 @@ def test_route_table_associations(): association_id_idempotent.should.equal(association_id) # Error: Attempt delete associated route table. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_route_table(route_table.id) - cm.exception.code.should.equal("DependencyViolation") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("DependencyViolation") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Disassociate conn.disassociate_route_table(association_id) @@ -211,33 +210,33 @@ def test_route_table_associations(): route_table.associations.should.have.length_of(0) # Error: Disassociate with invalid association ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.disassociate_route_table(association_id) - cm.exception.code.should.equal("InvalidAssociationID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAssociationID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Associate with invalid subnet ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_route_table(route_table.id, "subnet-1234abcd") - cm.exception.code.should.equal("InvalidSubnetID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSubnetID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Associate with invalid route table ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_route_table("rtb-1234abcd", subnet.id) - cm.exception.code.should.equal("InvalidRouteTableID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRouteTableID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @requires_boto_gte("2.16.0") @mock_ec2_deprecated def test_route_table_replace_route_table_association(): """ - Note: Boto has deprecated replace_route_table_association (which returns status) - and now uses replace_route_table_association_with_assoc (which returns association ID). + Note: Boto has deprecated replace_route_table_association (which returns status) + and now uses replace_route_table_association_with_assoc (which returns association ID). """ conn = boto.connect_vpc("the_key", "the_secret") vpc = conn.create_vpc("10.0.0.0/16") @@ -293,20 +292,20 @@ def test_route_table_replace_route_table_association(): association_id_idempotent.should.equal(association_id2) # Error: Replace association with invalid association ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.replace_route_table_association_with_assoc( "rtbassoc-1234abcd", route_table1.id ) - cm.exception.code.should.equal("InvalidAssociationID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAssociationID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Replace association with invalid route table ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.replace_route_table_association_with_assoc(association_id2, "rtb-1234abcd") - cm.exception.code.should.equal("InvalidRouteTableID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRouteTableID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -389,11 +388,11 @@ def test_routes_additional(): ] new_routes.should.have.length_of(0) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_route(main_route_table.id, ROUTE_CIDR) - cm.exception.code.should.equal("InvalidRoute.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRoute.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -442,11 +441,11 @@ def test_routes_replace(): target_route.state.should.equal("active") target_route.destination_cidr_block.should.equal(ROUTE_CIDR) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.replace_route("rtb-1234abcd", ROUTE_CIDR, gateway_id=igw.id) - cm.exception.code.should.equal("InvalidRouteTableID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRouteTableID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @requires_boto_gte("2.19.0") @@ -462,7 +461,7 @@ def test_routes_not_supported(): # Create conn.create_route.when.called_with( main_route_table.id, ROUTE_CIDR, interface_id="eni-1234abcd" - ).should.throw(NotImplementedError) + ).should.throw("InvalidNetworkInterfaceID.NotFound") # Replace igw = conn.create_internet_gateway() @@ -571,17 +570,54 @@ def test_create_route_with_invalid_destination_cidr_block_parameter(): internet_gateway.reload() destination_cidr_block = "1000.1.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: route = route_table.create_route( DestinationCidrBlock=destination_cidr_block, GatewayId=internet_gateway.id ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateRoute " "operation: Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format( destination_cidr_block ) ) + route_table.create_route( + DestinationIpv6CidrBlock="2001:db8::/125", GatewayId=internet_gateway.id + ) + new_routes = [ + route + for route in route_table.routes + if route.destination_cidr_block != vpc.cidr_block + ] + new_routes.should.have.length_of(1) + new_routes[0].route_table_id.shouldnt.be.equal(None) + + +@mock_ec2 +def test_create_route_with_network_interface_id(): + ec2 = boto3.resource("ec2", region_name="us-west-2") + ec2_client = boto3.client("ec2", region_name="us-west-2") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-2a" + ) + + route_table = ec2_client.create_route_table(VpcId=vpc.id) + + route_table_id = route_table["RouteTable"]["RouteTableId"] + + eni1 = ec2_client.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress="10.0.10.5" + ) + + route = ec2_client.create_route( + NetworkInterfaceId=eni1["NetworkInterface"]["NetworkInterfaceId"], + RouteTableId=route_table_id, + ) + + route["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + @mock_ec2 def test_describe_route_tables_with_nat_gateway(): @@ -618,3 +654,82 @@ def test_describe_route_tables_with_nat_gateway(): nat_gw_routes[0]["DestinationCidrBlock"].should.equal("0.0.0.0/0") nat_gw_routes[0]["NatGatewayId"].should.equal(nat_gw_id) nat_gw_routes[0]["State"].should.equal("active") + + +@mock_ec2 +def test_create_vpc_end_point(): + + ec2 = boto3.client("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc["Vpc"]["VpcId"], CidrBlock="10.0.0.0/24") + + route_table = ec2.create_route_table(VpcId=vpc["Vpc"]["VpcId"]) + + # test without any end point type specified + vpc_end_point = ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + RouteTableIds=[route_table["RouteTable"]["RouteTableId"]], + ) + + vpc_end_point["VpcEndpoint"]["ServiceName"].should.equal( + "com.amazonaws.us-east-1.s3" + ) + vpc_end_point["VpcEndpoint"]["RouteTableIds"][0].should.equal( + route_table["RouteTable"]["RouteTableId"] + ) + vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) + vpc_end_point["VpcEndpoint"]["DnsEntries"].should.have.length_of(0) + + # test with any end point type as gateway + vpc_end_point = ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + RouteTableIds=[route_table["RouteTable"]["RouteTableId"]], + VpcEndpointType="gateway", + ) + + vpc_end_point["VpcEndpoint"]["ServiceName"].should.equal( + "com.amazonaws.us-east-1.s3" + ) + vpc_end_point["VpcEndpoint"]["RouteTableIds"][0].should.equal( + route_table["RouteTable"]["RouteTableId"] + ) + vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) + vpc_end_point["VpcEndpoint"]["DnsEntries"].should.have.length_of(0) + + # test with end point type as interface + vpc_end_point = ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + SubnetIds=[subnet["Subnet"]["SubnetId"]], + VpcEndpointType="interface", + ) + + vpc_end_point["VpcEndpoint"]["ServiceName"].should.equal( + "com.amazonaws.us-east-1.s3" + ) + vpc_end_point["VpcEndpoint"]["SubnetIds"][0].should.equal( + subnet["Subnet"]["SubnetId"] + ) + vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) + len(vpc_end_point["VpcEndpoint"]["DnsEntries"]).should.be.greater_than(0) + + +@mock_ec2 +def test_create_route_tables_with_tags(): + ec2 = boto3.resource("ec2", region_name="eu-central-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + + route_table = ec2.create_route_table( + VpcId=vpc.id, + TagSpecifications=[ + { + "ResourceType": "route-table", + "Tags": [{"Key": "test", "Value": "TestRouteTable"}], + } + ], + ) + + route_table.tags.should.have.length_of(1) diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index bb9c8f52a..7ce9f3c5c 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -1,10 +1,10 @@ from __future__ import unicode_literals import copy +import json -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import boto3 import boto @@ -19,13 +19,13 @@ from moto import mock_ec2, mock_ec2_deprecated def test_create_and_describe_security_group(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: security_group = conn.create_security_group( "test security group", "this is a test security group", dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set" ) @@ -37,13 +37,13 @@ def test_create_and_describe_security_group(): security_group.description.should.equal("this is a test security group") # Trying to create another group with the same name should throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_security_group( "test security group", "this is a test security group" ) - cm.exception.code.should.equal("InvalidGroup.Duplicate") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.Duplicate") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none all_groups = conn.get_all_security_groups() # The default group gets created automatically @@ -56,11 +56,11 @@ def test_create_and_describe_security_group(): def test_create_security_group_without_description_raises_error(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_security_group("test security group", "") - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -86,13 +86,13 @@ def test_create_and_describe_vpc_security_group(): # Trying to create another group with the same name in the same VPC should # throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_security_group( "test security group", "this is a test security group", vpc_id ) - cm.exception.code.should.equal("InvalidGroup.Duplicate") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.Duplicate") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none all_groups = conn.get_all_security_groups(filters={"vpc_id": [vpc_id]}) @@ -123,6 +123,19 @@ def test_create_two_security_groups_with_same_name_in_different_vpc(): set(group_names).should.equal(set(["default", "test security group"])) +@mock_ec2 +def test_create_two_security_groups_in_vpc_with_ipv6_enabled(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16", AmazonProvidedIpv6CidrBlock=True) + + security_group = ec2.create_security_group( + GroupName="sg01", Description="Test security group sg01", VpcId=vpc.id + ) + + # The security group must have two defaul egress rules (one for ipv4 and aonther for ipv6) + security_group.ip_permissions_egress.should.have.length_of(2) + + @mock_ec2_deprecated def test_deleting_security_groups(): conn = boto.connect_ec2("the_key", "the_secret") @@ -132,18 +145,18 @@ def test_deleting_security_groups(): conn.get_all_security_groups().should.have.length_of(4) # Deleting a group that doesn't exist should throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_security_group("foobar") - cm.exception.code.should.equal("InvalidGroup.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Delete by name - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.delete_security_group("test2", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set" ) @@ -170,7 +183,7 @@ def test_authorize_ip_range_and_revoke(): conn = boto.connect_ec2("the_key", "the_secret") security_group = conn.create_security_group("test", "test") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: success = security_group.authorize( ip_protocol="tcp", from_port="22", @@ -178,9 +191,9 @@ def test_authorize_ip_range_and_revoke(): cidr_ip="123.123.123.123/32", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set" ) @@ -194,19 +207,19 @@ def test_authorize_ip_range_and_revoke(): security_group.rules[0].grants[0].cidr_ip.should.equal("123.123.123.123/32") # Wrong Cidr should throw error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: security_group.revoke( ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32", ) - cm.exception.code.should.equal("InvalidPermission.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidPermission.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Actually revoke - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: security_group.revoke( ip_protocol="tcp", from_port="22", @@ -214,9 +227,9 @@ def test_authorize_ip_range_and_revoke(): cidr_ip="123.123.123.123/32", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set" ) @@ -232,7 +245,7 @@ def test_authorize_ip_range_and_revoke(): "testegress", "testegress", vpc_id="vpc-3432589" ) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: success = conn.authorize_security_group_egress( egress_security_group.id, "tcp", @@ -241,9 +254,9 @@ def test_authorize_ip_range_and_revoke(): cidr_ip="123.123.123.123/32", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set" ) @@ -259,9 +272,11 @@ def test_authorize_ip_range_and_revoke(): # There are two egress rules associated with the security group: # the default outbound rule and the new one int(egress_security_group.rules_egress[1].to_port).should.equal(2222) - egress_security_group.rules_egress[1].grants[0].cidr_ip.should.equal( - "123.123.123.123/32" - ) + actual_cidr = egress_security_group.rules_egress[1].grants[0].cidr_ip + # Deal with Python2 dict->unicode, instead of dict->string + if type(actual_cidr) == "unicode": + actual_cidr = json.loads(actual_cidr.replace("u'", "'").replace("'", '"')) + actual_cidr.should.equal("123.123.123.123/32") # Wrong Cidr should throw error egress_security_group.revoke.when.called_with( @@ -269,7 +284,7 @@ def test_authorize_ip_range_and_revoke(): ).should.throw(EC2ResponseError) # Actually revoke - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.revoke_security_group_egress( egress_security_group.id, "tcp", @@ -278,9 +293,9 @@ def test_authorize_ip_range_and_revoke(): cidr_ip="123.123.123.123/32", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set" ) @@ -319,13 +334,13 @@ def test_authorize_other_group_and_revoke(): security_group.rules[0].grants[0].group_id.should.equal(other_security_group.id) # Wrong source group should throw error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: security_group.revoke( ip_protocol="tcp", from_port="22", to_port="2222", src_group=wrong_group ) - cm.exception.code.should.equal("InvalidPermission.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidPermission.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Actually revoke security_group.revoke( @@ -424,11 +439,11 @@ def test_get_all_security_groups(): resp.should.have.length_of(1) resp[0].id.should.equal(sg1.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_security_groups(groupnames=["does_not_exist"]) - cm.exception.code.should.equal("InvalidGroup.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none resp.should.have.length_of(1) resp[0].id.should.equal(sg1.id) @@ -453,13 +468,13 @@ def test_get_all_security_groups(): def test_authorize_bad_cidr_throws_invalid_parameter_value(): conn = boto.connect_ec2("the_key", "the_secret") security_group = conn.create_security_group("test", "test") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: security_group.authorize( ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123" ) - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -469,11 +484,11 @@ def test_security_group_tagging(): sg = conn.create_security_group("test-sg", "Test SG", vpc.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: sg.add_tag("Test", "Tag", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -518,13 +533,13 @@ def test_sec_group_rule_limit(): other_sg = ec2_conn.create_security_group("test_2", "test_other") # INGRESS - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", cidr_ip=["{0}.0.0.0/0".format(i) for i in range(110)], ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") sg.rules.should.be.empty # authorize a rule targeting a different sec group (because this count too) @@ -540,17 +555,17 @@ def test_sec_group_rule_limit(): ) success.should.be.true # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", cidr_ip=["100.0.0.0/0"] ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", src_security_group_group_id=other_sg.id ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # EGRESS # authorize a rule targeting a different sec group (because this count too) @@ -565,17 +580,17 @@ def test_sec_group_rule_limit(): group_id=sg.id, ip_protocol="-1", cidr_ip="{0}.0.0.0/0".format(i) ) # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", cidr_ip="101.0.0.0/0" ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", src_group_id=other_sg.id ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") @mock_ec2_deprecated @@ -589,13 +604,13 @@ def test_sec_group_rule_limit_vpc(): other_sg = ec2_conn.create_security_group("test_2", "test", vpc_id=vpc.id) # INGRESS - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", cidr_ip=["{0}.0.0.0/0".format(i) for i in range(110)], ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") sg.rules.should.be.empty # authorize a rule targeting a different sec group (because this count too) @@ -611,17 +626,17 @@ def test_sec_group_rule_limit_vpc(): ) # verify that we cannot authorize past the limit for a CIDR IP success.should.be.true - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", cidr_ip=["100.0.0.0/0"] ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", src_security_group_group_id=other_sg.id ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # EGRESS # authorize a rule targeting a different sec group (because this count too) @@ -636,17 +651,17 @@ def test_sec_group_rule_limit_vpc(): group_id=sg.id, ip_protocol="-1", cidr_ip="{0}.0.0.0/0".format(i) ) # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", cidr_ip="50.0.0.0/0" ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", src_group_id=other_sg.id ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") """ @@ -673,25 +688,87 @@ def test_add_same_rule_twice_throws_error(): ] sg.authorize_ingress(IpPermissions=ip_permissions) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: sg.authorize_ingress(IpPermissions=ip_permissions) +@mock_ec2 +def test_description_in_ip_permissions(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + conn = boto3.client("ec2", region_name="us-east-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + sg = conn.create_security_group( + GroupName="sg1", Description="Test security group sg1", VpcId=vpc.id + ) + + ip_permissions = [ + { + "IpProtocol": "tcp", + "FromPort": 27017, + "ToPort": 27017, + "IpRanges": [{"CidrIp": "1.2.3.4/32", "Description": "testDescription"}], + } + ] + conn.authorize_security_group_ingress( + GroupId=sg["GroupId"], IpPermissions=ip_permissions + ) + + result = conn.describe_security_groups(GroupIds=[sg["GroupId"]]) + + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["Description"] + == "testDescription" + ) + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["CidrIp"] + == "1.2.3.4/32" + ) + + sg = conn.create_security_group( + GroupName="sg2", Description="Test security group sg1", VpcId=vpc.id + ) + + ip_permissions = [ + { + "IpProtocol": "tcp", + "FromPort": 27017, + "ToPort": 27017, + "IpRanges": [{"CidrIp": "1.2.3.4/32"}], + } + ] + conn.authorize_security_group_ingress( + GroupId=sg["GroupId"], IpPermissions=ip_permissions + ) + + result = conn.describe_security_groups(GroupIds=[sg["GroupId"]]) + + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0].get( + "Description" + ) + is None + ) + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["CidrIp"] + == "1.2.3.4/32" + ) + + @mock_ec2 def test_security_group_tagging_boto3(): conn = boto3.client("ec2", region_name="us-east-1") sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_tags( Resources=[sg["GroupId"]], Tags=[{"Key": "Test", "Value": "Tag"}], DryRun=True, ) - ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -733,7 +810,9 @@ def test_authorize_and_revoke_in_bulk(): sg03 = ec2.create_security_group( GroupName="sg03", Description="Test security group sg03" ) - + sg04 = ec2.create_security_group( + GroupName="sg04", Description="Test security group sg04" + ) ip_permissions = [ { "IpProtocol": "tcp", @@ -758,13 +837,31 @@ def test_authorize_and_revoke_in_bulk(): "UserIdGroupPairs": [{"GroupName": "sg03", "UserId": sg03.owner_id}], "IpRanges": [], }, + { + "IpProtocol": "tcp", + "FromPort": 27015, + "ToPort": 27015, + "UserIdGroupPairs": [{"GroupName": "sg04", "UserId": sg04.owner_id}], + "IpRanges": [ + {"CidrIp": "10.10.10.0/24", "Description": "Some Description"} + ], + }, + { + "IpProtocol": "tcp", + "FromPort": 27016, + "ToPort": 27016, + "UserIdGroupPairs": [{"GroupId": sg04.id, "UserId": sg04.owner_id}], + "IpRanges": [{"CidrIp": "10.10.10.0/24"}], + }, ] expected_ip_permissions = copy.deepcopy(ip_permissions) expected_ip_permissions[1]["UserIdGroupPairs"][0]["GroupName"] = "sg02" expected_ip_permissions[2]["UserIdGroupPairs"][0]["GroupId"] = sg03.id + expected_ip_permissions[3]["UserIdGroupPairs"][0]["GroupId"] = sg04.id + expected_ip_permissions[4]["UserIdGroupPairs"][0]["GroupName"] = "sg04" sg01.authorize_ingress(IpPermissions=ip_permissions) - sg01.ip_permissions.should.have.length_of(3) + sg01.ip_permissions.should.have.length_of(5) for ip_permission in expected_ip_permissions: sg01.ip_permissions.should.contain(ip_permission) @@ -774,7 +871,7 @@ def test_authorize_and_revoke_in_bulk(): sg01.ip_permissions.shouldnt.contain(ip_permission) sg01.authorize_egress(IpPermissions=ip_permissions) - sg01.ip_permissions_egress.should.have.length_of(4) + sg01.ip_permissions_egress.should.have.length_of(6) for ip_permission in expected_ip_permissions: sg01.ip_permissions_egress.should.contain(ip_permission) @@ -828,11 +925,11 @@ def test_get_all_security_groups_filter_with_same_vpc_id(): ) security_groups.should.have.length_of(1) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_security_groups(group_ids=["does_not_exist"]) - cm.exception.code.should.equal("InvalidGroup.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -853,11 +950,10 @@ def test_revoke_security_group_egress(): sg.revoke_egress( IpPermissions=[ { - "FromPort": 0, "IpProtocol": "-1", - "IpRanges": [{"CidrIp": "0.0.0.0/0"},], - "ToPort": 123, - }, + "IpRanges": [{"CidrIp": "0.0.0.0/0"}], + "UserIdGroupPairs": [], + } ] ) diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index cfc95bb82..bb4ccac3b 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from nose.tools import assert_raises +import pytest import datetime import boto @@ -9,8 +9,8 @@ from botocore.exceptions import ClientError import pytz import sure # noqa -from moto import mock_ec2, mock_ec2_deprecated -from moto.backends import get_model +from moto import mock_ec2, mock_ec2_deprecated, settings +from moto.ec2.models import ec2_backends from moto.core.utils import iso_8601_datetime_with_milliseconds @@ -31,7 +31,7 @@ def test_request_spot_instances(): start = iso_8601_datetime_with_milliseconds(start_dt) end = iso_8601_datetime_with_milliseconds(end_dt) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: request = conn.request_spot_instances( SpotPrice="0.5", InstanceCount=1, @@ -54,9 +54,9 @@ def test_request_spot_instances(): }, DryRun=True, ) - ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -155,11 +155,11 @@ def test_cancel_spot_instance_request(): requests = conn.get_all_spot_instance_requests() requests.should.have.length_of(1) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -184,13 +184,14 @@ def test_request_spot_instances_fulfilled(): request.state.should.equal("open") - get_model("SpotInstanceRequest", "us-east-1")[0].state = "active" + if not settings.TEST_SERVER_MODE: + ec2_backends["us-east-1"].spot_instance_requests[request.id].state = "active" - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] - request.state.should.equal("active") + request.state.should.equal("active") @mock_ec2_deprecated @@ -247,10 +248,11 @@ def test_request_spot_instances_setting_instance_id(): conn = boto.ec2.connect_to_region("us-east-1") request = conn.request_spot_instances(price=0.5, image_id="ami-abcd1234") - req = get_model("SpotInstanceRequest", "us-east-1")[0] - req.state = "active" - req.instance_id = "i-12345678" + if not settings.TEST_SERVER_MODE: + req = ec2_backends["us-east-1"].spot_instance_requests[request[0].id] + req.state = "active" + req.instance_id = "i-12345678" - request = conn.get_all_spot_instance_requests()[0] - assert request.state == "active" - assert request.instance_id == "i-12345678" + request = conn.get_all_spot_instance_requests()[0] + assert request.state == "active" + assert request.instance_id == "i-12345678" diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 7bb57aab4..76e525990 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -1,19 +1,17 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import boto3 -import boto -import boto.vpc -from boto.exception import EC2ResponseError -from botocore.exceptions import ParamValidationError, ClientError -import json -import sure # noqa import random -from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated +import boto +import boto3 +import boto.vpc + +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest +import sure # noqa +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError, ParamValidationError +from moto import mock_ec2, mock_ec2_deprecated @mock_ec2_deprecated @@ -31,22 +29,22 @@ def test_subnets(): all_subnets = conn.get_all_subnets() all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_subnet(subnet.id) - cm.exception.code.should.equal("InvalidSubnetID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSubnetID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated def test_subnet_create_vpc_validation(): conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -75,6 +73,18 @@ def test_subnet_should_have_proper_availability_zone_set(): subnetA.availability_zone.should.equal("us-west-1b") +@mock_ec2 +def test_availability_zone_in_create_subnet(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + + vpc = ec2.create_vpc(CidrBlock="172.31.0.0/16") + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock="172.31.48.0/20", AvailabilityZoneId="use1-az6" + ) + subnet.availability_zone_id.should.equal("use1-az6") + + @mock_ec2 def test_default_subnet(): ec2 = boto3.resource("ec2", region_name="us-west-1") @@ -191,7 +201,7 @@ def test_modify_subnet_attribute_validation(): VpcId=vpc.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-1a" ) - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): client.modify_subnet_attribute( SubnetId=subnet.id, MapPublicIpOnLaunch={"Value": "invalid"} ) @@ -217,11 +227,11 @@ def test_subnet_get_by_id(): subnetA.id.should.be.within(subnets_by_id) subnetB1.id.should.be.within(subnets_by_id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_subnets(subnet_ids=["subnet-does_not_exist"]) - cm.exception.code.should.equal("InvalidSubnetID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSubnetID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -299,38 +309,6 @@ def test_get_subnets_filtering(): ).should.throw(NotImplementedError) -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_subnet_tags_through_cloudformation(): - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - subnet_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testSubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": vpc.id, - "CidrBlock": "10.0.0.0/24", - "AvailabilityZone": "us-west-1b", - "Tags": [ - {"Key": "foo", "Value": "bar"}, - {"Key": "blah", "Value": "baz"}, - ], - }, - } - }, - } - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - template_json = json.dumps(subnet_template) - cf_conn.create_stack("test_stack", template_body=template_json) - - subnet = vpc_conn.get_all_subnets(filters={"cidrBlock": "10.0.0.0/24"})[0] - subnet.tags["foo"].should.equal("bar") - subnet.tags["blah"].should.equal("baz") - - @mock_ec2 def test_create_subnet_response_fields(): ec2 = boto3.resource("ec2", region_name="us-west-1") @@ -348,7 +326,7 @@ def test_create_subnet_response_fields(): subnet.should.have.key("State") subnet.should.have.key("SubnetId") subnet.should.have.key("VpcId") - subnet.shouldnt.have.key("Tags") + subnet.should.have.key("Tags") subnet.should.have.key("DefaultForAz").which.should.equal(False) subnet.should.have.key("MapPublicIpOnLaunch").which.should.equal(False) subnet.should.have.key("OwnerId") @@ -407,13 +385,13 @@ def test_create_subnet_with_invalid_availability_zone(): vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") subnet_availability_zone = "asfasfas" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet = client.create_subnet( VpcId=vpc.id, CidrBlock="10.0.0.0/24", AvailabilityZone=subnet_availability_zone, ) - assert str(ex.exception).startswith( + assert str(ex.value).startswith( "An error occurred (InvalidParameterValue) when calling the CreateSubnet " "operation: Value ({}) for parameter availabilityZone is invalid. Subnets can currently only be created in the following availability zones: ".format( subnet_availability_zone @@ -430,9 +408,27 @@ def test_create_subnet_with_invalid_cidr_range(): vpc.is_default.shouldnt.be.ok subnet_cidr_block = "10.1.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( + "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " + "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block) + ) + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_range_multiple_vpc_cidr_blocks(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + ec2.meta.client.associate_vpc_cidr_block(CidrBlock="10.1.0.0/16", VpcId=vpc.id) + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = "10.2.0.0/20" + with pytest.raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.value).should.equal( "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block) ) @@ -447,9 +443,9 @@ def test_create_subnet_with_invalid_cidr_block_parameter(): vpc.is_default.shouldnt.be.ok subnet_cidr_block = "1000.1.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateSubnet " "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format( subnet_cidr_block @@ -457,6 +453,46 @@ def test_create_subnet_with_invalid_cidr_block_parameter(): ) +@mock_ec2 +def test_create_subnets_with_multiple_vpc_cidr_blocks(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + ec2.meta.client.associate_vpc_cidr_block(CidrBlock="10.1.0.0/16", VpcId=vpc.id) + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block_primary = "10.0.0.0/24" + subnet_primary = ec2.create_subnet( + VpcId=vpc.id, CidrBlock=subnet_cidr_block_primary + ) + + subnet_cidr_block_secondary = "10.1.0.0/24" + subnet_secondary = ec2.create_subnet( + VpcId=vpc.id, CidrBlock=subnet_cidr_block_secondary + ) + + subnets = client.describe_subnets( + SubnetIds=[subnet_primary.id, subnet_secondary.id] + )["Subnets"] + subnets.should.have.length_of(2) + + for subnet in subnets: + subnet.should.have.key("AvailabilityZone") + subnet.should.have.key("AvailabilityZoneId") + subnet.should.have.key("AvailableIpAddressCount") + subnet.should.have.key("CidrBlock") + subnet.should.have.key("State") + subnet.should.have.key("SubnetId") + subnet.should.have.key("VpcId") + subnet.shouldnt.have.key("Tags") + subnet.should.have.key("DefaultForAz").which.should.equal(False) + subnet.should.have.key("MapPublicIpOnLaunch").which.should.equal(False) + subnet.should.have.key("OwnerId") + subnet.should.have.key("AssignIpv6AddressOnCreation").which.should.equal(False) + + @mock_ec2 def test_create_subnets_with_overlapping_cidr_blocks(): ec2 = boto3.resource("ec2", region_name="us-west-1") @@ -466,10 +502,10 @@ def test_create_subnets_with_overlapping_cidr_blocks(): vpc.is_default.shouldnt.be.ok subnet_cidr_block = "10.0.0.0/24" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet " "operation: The CIDR '{}' conflicts with another subnet".format( subnet_cidr_block @@ -477,6 +513,23 @@ def test_create_subnets_with_overlapping_cidr_blocks(): ) +@mock_ec2 +def test_create_subnet_with_tags(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="172.31.0.0/16") + + subnet = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock="172.31.48.0/20", + AvailabilityZoneId="use1-az6", + TagSpecifications=[ + {"ResourceType": "subnet", "Tags": [{"Key": "name", "Value": "some-vpc"}]} + ], + ) + + assert subnet.tags == [{"Key": "name", "Value": "some-vpc"}] + + @mock_ec2 def test_available_ip_addresses_in_subnet(): ec2 = boto3.resource("ec2", region_name="us-west-1") @@ -599,3 +652,84 @@ def validate_subnet_details_after_creating_eni( for eni in enis_created: client.delete_network_interface(NetworkInterfaceId=eni["NetworkInterfaceId"]) client.delete_subnet(SubnetId=subnet["SubnetId"]) + + +@mock_ec2 +def test_run_instances_should_attach_to_default_subnet(): + # https://github.com/spulec/moto/issues/2877 + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + ec2.create_security_group(GroupName="sg01", Description="Test security group sg01") + # run_instances + instances = client.run_instances(MinCount=1, MaxCount=1, SecurityGroups=["sg01"],) + # Assert subnet is created appropriately + subnets = client.describe_subnets()["Subnets"] + default_subnet_id = subnets[0]["SubnetId"] + if len(subnets) > 1: + default_subnet_id1 = subnets[1]["SubnetId"] + assert ( + instances["Instances"][0]["NetworkInterfaces"][0]["SubnetId"] + == default_subnet_id + or instances["Instances"][0]["NetworkInterfaces"][0]["SubnetId"] + == default_subnet_id1 + ) + assert ( + subnets[0]["AvailableIpAddressCount"] == 4090 + or subnets[1]["AvailableIpAddressCount"] == 4090 + ) + + +@mock_ec2 +def test_describe_subnets_by_vpc_id(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet1 = ec2.create_subnet( + VpcId=vpc1.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-1a" + ) + vpc2 = ec2.create_vpc(CidrBlock="172.31.0.0/16") + subnet2 = ec2.create_subnet( + VpcId=vpc2.id, CidrBlock="172.31.48.0/20", AvailabilityZone="us-west-1b" + ) + + subnets = client.describe_subnets( + Filters=[{"Name": "vpc-id", "Values": [vpc1.id]}] + ).get("Subnets", []) + subnets.should.have.length_of(1) + subnets[0]["SubnetId"].should.equal(subnet1.id) + + subnets = client.describe_subnets( + Filters=[{"Name": "vpc-id", "Values": [vpc2.id]}] + ).get("Subnets", []) + subnets.should.have.length_of(1) + subnets[0]["SubnetId"].should.equal(subnet2.id) + + # Specify multiple VPCs in Filter. + subnets = client.describe_subnets( + Filters=[{"Name": "vpc-id", "Values": [vpc1.id, vpc2.id]}] + ).get("Subnets", []) + subnets.should.have.length_of(2) + + # Specify mismatched SubnetIds/Filters. + subnets = client.describe_subnets( + SubnetIds=[subnet1.id], Filters=[{"Name": "vpc-id", "Values": [vpc2.id]}] + ).get("Subnets", []) + subnets.should.have.length_of(0) + + +@mock_ec2 +def test_describe_subnets_by_state(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + ec2.create_subnet( + VpcId=vpc.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-1a" + ) + + subnets = client.describe_subnets( + Filters=[{"Name": "state", "Values": ["available"]}] + ).get("Subnets", []) + for subnet in subnets: + subnet["State"].should.equal("available") diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 29d2cb1e3..82a23c91c 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from nose.tools import assert_raises +import pytest import itertools @@ -11,7 +11,7 @@ from boto.ec2.instance import Reservation import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 -from nose.tools import assert_raises +import pytest @mock_ec2_deprecated @@ -20,11 +20,11 @@ def test_add_tag(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.add_tag("a key", "some value", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -51,11 +51,11 @@ def test_remove_tag(): tag.name.should.equal("a key") tag.value.should.equal("some value") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.remove_tag("a key", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -106,11 +106,11 @@ def test_create_tags(): "blank key": "", } - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.create_tags(instance.id, tag_dict, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -131,18 +131,18 @@ def test_tag_limit_exceeded(): for i in range(51): tag_dict["{0:02d}".format(i + 1)] = "" - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_tags(instance.id, tag_dict) - cm.exception.code.should.equal("TagLimitExceeded") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("TagLimitExceeded") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none instance.add_tag("a key", "a value") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_tags(instance.id, tag_dict) - cm.exception.code.should.equal("TagLimitExceeded") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("TagLimitExceeded") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none tags = conn.get_all_tags() tag = tags[0] @@ -157,27 +157,27 @@ def test_invalid_parameter_tag_null(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: instance.add_tag("a key", None) - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated def test_invalid_id(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_tags("ami-blah", {"key": "tag"}) - cm.exception.code.should.equal("InvalidID") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidID") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_tags("blah-blah", {"key": "tag"}) - cm.exception.code.should.equal("InvalidID") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidID") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -287,13 +287,13 @@ def test_get_all_tags_value_filter(): tags = conn.get_all_tags(filters={"value": "*some*value*"}) tags.should.have.length_of(3) - tags = conn.get_all_tags(filters={"value": "*value\*"}) + tags = conn.get_all_tags(filters={"value": r"*value\*"}) tags.should.have.length_of(1) - tags = conn.get_all_tags(filters={"value": "*value\*\*"}) + tags = conn.get_all_tags(filters={"value": r"*value\*\*"}) tags.should.have.length_of(1) - tags = conn.get_all_tags(filters={"value": "*value\*\?"}) + tags = conn.get_all_tags(filters={"value": r"*value\*\?"}) tags.should.have.length_of(1) @@ -449,10 +449,10 @@ def test_create_tag_empty_resource(): # create ec2 client in us-west-1 client = boto3.client("ec2", region_name="us-west-1") # create tag with empty resource - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_tags(Resources=[], Tags=[{"Key": "Value"}]) - ex.exception.response["Error"]["Code"].should.equal("MissingParameter") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("MissingParameter") + ex.value.response["Error"]["Message"].should.equal( "The request must contain the parameter resourceIdSet" ) @@ -462,9 +462,42 @@ def test_delete_tag_empty_resource(): # create ec2 client in us-west-1 client = boto3.client("ec2", region_name="us-west-1") # delete tag with empty resource - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.delete_tags(Resources=[], Tags=[{"Key": "Value"}]) - ex.exception.response["Error"]["Code"].should.equal("MissingParameter") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("MissingParameter") + ex.value.response["Error"]["Message"].should.equal( "The request must contain the parameter resourceIdSet" ) + + +@mock_ec2 +def test_retrieve_resource_with_multiple_tags(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + blue, green = ec2.create_instances(ImageId="ANY_ID", MinCount=2, MaxCount=2) + ec2.create_tags( + Resources=[blue.instance_id], + Tags=[ + {"Key": "environment", "Value": "blue"}, + {"Key": "application", "Value": "api"}, + ], + ) + ec2.create_tags( + Resources=[green.instance_id], + Tags=[ + {"Key": "environment", "Value": "green"}, + {"Key": "application", "Value": "api"}, + ], + ) + green_instances = list(ec2.instances.filter(Filters=(get_filter("green")))) + green_instances.should.equal([green]) + blue_instances = list(ec2.instances.filter(Filters=(get_filter("blue")))) + blue_instances.should.equal([blue]) + + +def get_filter(color): + return [ + {"Name": "tag-key", "Values": ["application"]}, + {"Name": "tag-value", "Values": ["api"]}, + {"Name": "tag-key", "Values": ["environment"]}, + {"Name": "tag-value", "Values": [color]}, + ] diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py index f778ac3e5..23139c08e 100644 --- a/tests/test_ec2/test_virtual_private_gateways.py +++ b/tests/test_ec2/test_virtual_private_gateways.py @@ -1,96 +1,229 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_virtual_private_gateways(): - conn = boto.connect_vpc("the_key", "the_secret") - - vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") - vpn_gateway.should_not.be.none - vpn_gateway.id.should.match(r"vgw-\w+") - vpn_gateway.type.should.equal("ipsec.1") - vpn_gateway.state.should.equal("available") - vpn_gateway.availability_zone.should.equal("us-east-1a") - - -@mock_ec2_deprecated -def test_describe_vpn_gateway(): - conn = boto.connect_vpc("the_key", "the_secret") - vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") - - vgws = conn.get_all_vpn_gateways() - vgws.should.have.length_of(1) - - gateway = vgws[0] - gateway.id.should.match(r"vgw-\w+") - gateway.id.should.equal(vpn_gateway.id) - vpn_gateway.type.should.equal("ipsec.1") - vpn_gateway.state.should.equal("available") - vpn_gateway.availability_zone.should.equal("us-east-1a") - - -@mock_ec2_deprecated -def test_vpn_gateway_vpc_attachment(): - conn = boto.connect_vpc("the_key", "the_secret") - vpc = conn.create_vpc("10.0.0.0/16") - vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") - - conn.attach_vpn_gateway(vpn_gateway_id=vpn_gateway.id, vpc_id=vpc.id) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(1) - attachments[0].vpc_id.should.equal(vpc.id) - attachments[0].state.should.equal("attached") - - -@mock_ec2_deprecated -def test_delete_vpn_gateway(): - conn = boto.connect_vpc("the_key", "the_secret") - vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") - - conn.delete_vpn_gateway(vpn_gateway.id) - vgws = conn.get_all_vpn_gateways() - vgws.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_vpn_gateway_tagging(): - conn = boto.connect_vpc("the_key", "the_secret") - vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") - vpn_gateway.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the subnet - vpn_gateway = conn.get_all_vpn_gateways()[0] - vpn_gateway.tags.should.have.length_of(1) - vpn_gateway.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_detach_vpn_gateway(): - - conn = boto.connect_vpc("the_key", "the_secret") - vpc = conn.create_vpc("10.0.0.0/16") - vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") - - conn.attach_vpn_gateway(vpn_gateway_id=vpn_gateway.id, vpc_id=vpc.id) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(1) - attachments[0].vpc_id.should.equal(vpc.id) - attachments[0].state.should.equal("attached") - - conn.detach_vpn_gateway(vpn_gateway_id=vpn_gateway.id, vpc_id=vpc.id) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(0) +from __future__ import unicode_literals +import boto +import boto3 +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 + + +@mock_ec2_deprecated +def test_virtual_private_gateways(): + conn = boto.connect_vpc("the_key", "the_secret") + + vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") + vpn_gateway.should_not.be.none + vpn_gateway.id.should.match(r"vgw-\w+") + vpn_gateway.type.should.equal("ipsec.1") + vpn_gateway.state.should.equal("available") + vpn_gateway.availability_zone.should.equal("us-east-1a") + + +@mock_ec2_deprecated +def test_describe_vpn_gateway(): + conn = boto.connect_vpc("the_key", "the_secret") + vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") + + vgws = conn.get_all_vpn_gateways() + vgws.should.have.length_of(1) + + gateway = vgws[0] + gateway.id.should.match(r"vgw-\w+") + gateway.id.should.equal(vpn_gateway.id) + vpn_gateway.type.should.equal("ipsec.1") + vpn_gateway.state.should.equal("available") + vpn_gateway.availability_zone.should.equal("us-east-1a") + + +@mock_ec2 +def test_describe_vpn_connections_attachment_vpc_id_filter(): + """ describe_vpn_gateways attachment.vpc-id filter """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + ec2.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=gateway_id) + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "attachment.vpc-id", "Values": [vpc_id]}] + ) + + gateways["VpnGateways"].should.have.length_of(1) + gateways["VpnGateways"][0]["VpnGatewayId"].should.equal(gateway_id) + gateways["VpnGateways"][0]["VpcAttachments"].should.contain( + {"State": "attached", "VpcId": vpc_id} + ) + + +@mock_ec2 +def test_describe_vpn_connections_state_filter_attached(): + """ describe_vpn_gateways attachment.state filter - match attached """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + ec2.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=gateway_id) + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "attachment.state", "Values": ["attached"]}] + ) + + gateways["VpnGateways"].should.have.length_of(1) + gateways["VpnGateways"][0]["VpnGatewayId"].should.equal(gateway_id) + gateways["VpnGateways"][0]["VpcAttachments"].should.contain( + {"State": "attached", "VpcId": vpc_id} + ) + + +@mock_ec2 +def test_describe_vpn_connections_state_filter_deatched(): + """ describe_vpn_gateways attachment.state filter - don't match detatched """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + ec2.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=gateway_id) + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "attachment.state", "Values": ["detached"]}] + ) + + gateways["VpnGateways"].should.have.length_of(0) + + +@mock_ec2 +def test_describe_vpn_connections_id_filter_match(): + """ describe_vpn_gateways vpn-gateway-id filter - match correct id """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "vpn-gateway-id", "Values": [gateway_id]}] + ) + + gateways["VpnGateways"].should.have.length_of(1) + gateways["VpnGateways"][0]["VpnGatewayId"].should.equal(gateway_id) + + +@mock_ec2 +def test_describe_vpn_connections_id_filter_miss(): + """ describe_vpn_gateways vpn-gateway-id filter - don't match """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "vpn-gateway-id", "Values": ["unknown_gateway_id"]}] + ) + + gateways["VpnGateways"].should.have.length_of(0) + + +@mock_ec2 +def test_describe_vpn_connections_type_filter_match(): + """ describe_vpn_gateways type filter - match """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "type", "Values": ["ipsec.1"]}] + ) + + gateways["VpnGateways"].should.have.length_of(1) + gateways["VpnGateways"][0]["VpnGatewayId"].should.equal(gateway_id) + + +@mock_ec2 +def test_describe_vpn_connections_type_filter_miss(): + """ describe_vpn_gateways type filter - don't match """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "type", "Values": ["unknown_type"]}] + ) + + gateways["VpnGateways"].should.have.length_of(0) + + +@mock_ec2_deprecated +def test_vpn_gateway_vpc_attachment(): + conn = boto.connect_vpc("the_key", "the_secret") + vpc = conn.create_vpc("10.0.0.0/16") + vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") + + conn.attach_vpn_gateway(vpn_gateway_id=vpn_gateway.id, vpc_id=vpc.id) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(1) + attachments[0].vpc_id.should.equal(vpc.id) + attachments[0].state.should.equal("attached") + + +@mock_ec2_deprecated +def test_delete_vpn_gateway(): + conn = boto.connect_vpc("the_key", "the_secret") + vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") + + conn.delete_vpn_gateway(vpn_gateway.id) + vgws = conn.get_all_vpn_gateways() + vgws.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_vpn_gateway_tagging(): + conn = boto.connect_vpc("the_key", "the_secret") + vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") + vpn_gateway.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the subnet + vpn_gateway = conn.get_all_vpn_gateways()[0] + vpn_gateway.tags.should.have.length_of(1) + vpn_gateway.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_detach_vpn_gateway(): + + conn = boto.connect_vpc("the_key", "the_secret") + vpc = conn.create_vpc("10.0.0.0/16") + vpn_gateway = conn.create_vpn_gateway("ipsec.1", "us-east-1a") + + conn.attach_vpn_gateway(vpn_gateway_id=vpn_gateway.id, vpc_id=vpc.id) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(1) + attachments[0].vpc_id.should.equal(vpc.id) + attachments[0].state.should.equal("attached") + + conn.detach_vpn_gateway(vpn_gateway_id=vpn_gateway.id, vpc_id=vpc.id) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(0) diff --git a/tests/test_ec2/test_vm_export.py b/tests/test_ec2/test_vm_export.py index f8b24f6d4..08215d067 100644 --- a/tests/test_ec2/test_vm_export.py +++ b/tests/test_ec2/test_vm_export.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_vm_export(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_vm_export(): + pass diff --git a/tests/test_ec2/test_vm_import.py b/tests/test_ec2/test_vm_import.py index 66c7561a7..0ebfaaa0c 100644 --- a/tests/test_ec2/test_vm_import.py +++ b/tests/test_ec2/test_vm_import.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_vm_import(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_vm_import(): + pass diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index fc1646961..2ffe89fca 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest from moto.ec2.exceptions import EC2ClientError from botocore.exceptions import ClientError @@ -49,11 +48,11 @@ def test_vpc_peering_connections_accept(): vpc_pcx = conn.accept_vpc_peering_connection(vpc_pcx.id) vpc_pcx._status.code.should.equal("active") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.reject_vpc_peering_connection(vpc_pcx.id) - cm.exception.code.should.equal("InvalidStateTransition") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidStateTransition") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none all_vpc_pcxs = conn.get_all_vpc_peering_connections() all_vpc_pcxs.should.have.length_of(1) @@ -69,11 +68,11 @@ def test_vpc_peering_connections_reject(): verdict = conn.reject_vpc_peering_connection(vpc_pcx.id) verdict.should.equal(True) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.accept_vpc_peering_connection(vpc_pcx.id) - cm.exception.code.should.equal("InvalidStateTransition") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidStateTransition") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none all_vpc_pcxs = conn.get_all_vpc_peering_connections() all_vpc_pcxs.should.have.length_of(1) @@ -93,11 +92,11 @@ def test_vpc_peering_connections_delete(): all_vpc_pcxs.should.have.length_of(1) all_vpc_pcxs[0]._status.code.should.equal("deleted") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc_peering_connection("pcx-1234abcd") - cm.exception.code.should.equal("InvalidVpcPeeringConnectionId.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcPeeringConnectionId.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -129,11 +128,11 @@ def test_vpc_peering_connections_cross_region_fail(): ec2_apn1 = boto3.resource("ec2", region_name="ap-northeast-1") vpc_apn1 = ec2_apn1.create_vpc(CidrBlock="10.20.0.0/16") # create peering wrong region with no vpc - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_usw1.create_vpc_peering_connection( VpcId=vpc_usw1.id, PeerVpcId=vpc_apn1.id, PeerRegion="ap-northeast-2" ) - cm.exception.response["Error"]["Code"].should.equal("InvalidVpcID.NotFound") + cm.value.response["Error"]["Code"].should.equal("InvalidVpcID.NotFound") @mock_ec2 @@ -160,8 +159,26 @@ def test_vpc_peering_connections_cross_region_accept(): VpcPeeringConnectionIds=[vpc_pcx_usw1.id] ) acp_pcx_apn1["VpcPeeringConnection"]["Status"]["Code"].should.equal("active") + acp_pcx_apn1["VpcPeeringConnection"]["AccepterVpcInfo"]["Region"].should.equal( + "ap-northeast-1" + ) + acp_pcx_apn1["VpcPeeringConnection"]["RequesterVpcInfo"]["Region"].should.equal( + "us-west-1" + ) des_pcx_apn1["VpcPeeringConnections"][0]["Status"]["Code"].should.equal("active") + des_pcx_apn1["VpcPeeringConnections"][0]["AccepterVpcInfo"]["Region"].should.equal( + "ap-northeast-1" + ) + des_pcx_apn1["VpcPeeringConnections"][0]["RequesterVpcInfo"]["Region"].should.equal( + "us-west-1" + ) des_pcx_usw1["VpcPeeringConnections"][0]["Status"]["Code"].should.equal("active") + des_pcx_usw1["VpcPeeringConnections"][0]["AccepterVpcInfo"]["Region"].should.equal( + "ap-northeast-1" + ) + des_pcx_usw1["VpcPeeringConnections"][0]["RequesterVpcInfo"]["Region"].should.equal( + "us-west-1" + ) @mock_ec2 @@ -235,15 +252,15 @@ def test_vpc_peering_connections_cross_region_accept_wrong_region(): # accept wrong peering from us-west-1 which will raise error ec2_apn1 = boto3.client("ec2", region_name="ap-northeast-1") ec2_usw1 = boto3.client("ec2", region_name="us-west-1") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_usw1.accept_vpc_peering_connection(VpcPeeringConnectionId=vpc_pcx_usw1.id) - cm.exception.response["Error"]["Code"].should.equal("OperationNotPermitted") + cm.value.response["Error"]["Code"].should.equal("OperationNotPermitted") exp_msg = ( "Incorrect region ({0}) specified for this request.VPC " "peering connection {1} must be " "accepted in region {2}".format("us-west-1", vpc_pcx_usw1.id, "ap-northeast-1") ) - cm.exception.response["Error"]["Message"].should.equal(exp_msg) + cm.value.response["Error"]["Message"].should.equal(exp_msg) @mock_ec2 @@ -260,12 +277,12 @@ def test_vpc_peering_connections_cross_region_reject_wrong_region(): # reject wrong peering from us-west-1 which will raise error ec2_apn1 = boto3.client("ec2", region_name="ap-northeast-1") ec2_usw1 = boto3.client("ec2", region_name="us-west-1") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_usw1.reject_vpc_peering_connection(VpcPeeringConnectionId=vpc_pcx_usw1.id) - cm.exception.response["Error"]["Code"].should.equal("OperationNotPermitted") + cm.value.response["Error"]["Code"].should.equal("OperationNotPermitted") exp_msg = ( "Incorrect region ({0}) specified for this request.VPC " "peering connection {1} must be accepted or " "rejected in region {2}".format("us-west-1", vpc_pcx_usw1.id, "ap-northeast-1") ) - cm.exception.response["Error"]["Message"].should.equal(exp_msg) + cm.value.response["Error"]["Message"].should.equal(exp_msg) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 1bc3ddd98..5344098ba 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest from moto.ec2.exceptions import EC2ClientError from botocore.exceptions import ClientError @@ -31,11 +30,11 @@ def test_vpcs(): all_vpcs = conn.get_all_vpcs() all_vpcs.should.have.length_of(1) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc("vpc-1234abcd") - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -114,11 +113,11 @@ def test_vpc_get_by_id(): vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_vpcs(vpc_ids=["vpc-does_not_exist"]) - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -402,11 +401,11 @@ def test_associate_vpc_ipv4_cidr_block(): ) # Check error on adding 6th association. - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: response = ec2.meta.client.associate_vpc_cidr_block( VpcId=vpc.id, CidrBlock="10.10.50.0/22" ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format( vpc.id @@ -447,11 +446,11 @@ def test_disassociate_vpc_ipv4_cidr_block(): ) # Error attempting to delete a non-existent CIDR_BLOCK association - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: response = ec2.meta.client.disassociate_vpc_cidr_block( AssociationId="vpc-cidr-assoc-BORING123" ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " "'vpc-cidr-assoc-BORING123' does not exist" @@ -469,11 +468,11 @@ def test_disassociate_vpc_ipv4_cidr_block(): {}, )["AssociationId"] - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: response = ec2.meta.client.disassociate_vpc_cidr_block( AssociationId=vpc_base_cidr_assoc_id ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id) @@ -549,11 +548,11 @@ def test_vpc_associate_ipv6_cidr_block(): ipv6_cidr_block_association_set["AssociationId"].should.contain("vpc-cidr-assoc") # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: response = ec2.meta.client.associate_vpc_cidr_block( VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format( vpc.id @@ -657,9 +656,9 @@ def test_create_vpc_with_invalid_cidr_block_parameter(): ec2 = boto3.resource("ec2", region_name="us-west-1") vpc_cidr_block = "1000.1.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateVpc " "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format( vpc_cidr_block @@ -672,14 +671,27 @@ def test_create_vpc_with_invalid_cidr_range(): ec2 = boto3.resource("ec2", region_name="us-west-1") vpc_cidr_block = "10.1.0.0/29" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidVpc.Range) when calling the CreateVpc " "operation: The CIDR '{}' is invalid.".format(vpc_cidr_block) ) +@mock_ec2 +def test_create_vpc_with_tags(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + # Create VPC + vpc = ec2.create_vpc( + CidrBlock="10.0.0.0/16", + TagSpecifications=[ + {"ResourceType": "vpc", "Tags": [{"Key": "name", "Value": "some-vpc"}]} + ], + ) + assert vpc.tags == [{"Key": "name", "Value": "some-vpc"}] + + @mock_ec2 def test_enable_vpc_classic_link(): ec2 = boto3.resource("ec2", region_name="us-west-1") @@ -825,3 +837,34 @@ def test_describe_classic_link_dns_support_multiple(): assert response.get("Vpcs").sort(key=lambda x: x["VpcId"]) == expected.sort( key=lambda x: x["VpcId"] ) + + +@mock_ec2 +def test_describe_vpc_end_point_services(): + ec2 = boto3.client("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + + route_table = ec2.create_route_table(VpcId=vpc["Vpc"]["VpcId"]) + + ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + RouteTableIds=[route_table["RouteTable"]["RouteTableId"]], + VpcEndpointType="gateway", + ) + + vpc_end_point_services = ec2.describe_vpc_endpoint_services() + + assert vpc_end_point_services.get("ServiceDetails").should.be.true + assert vpc_end_point_services.get("ServiceNames").should.be.true + assert vpc_end_point_services.get("ServiceNames") == ["com.amazonaws.us-east-1.s3"] + assert ( + vpc_end_point_services.get("ServiceDetails")[0] + .get("ServiceType", [])[0] + .get("ServiceType") + == "gateway" + ) + assert vpc_end_point_services.get("ServiceDetails")[0].get("AvailabilityZones") == [ + "us-west-1a", + "us-west-1b", + ] diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index 24396d3d1..ca8897417 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -1,10 +1,11 @@ from __future__ import unicode_literals + import boto -from nose.tools import assert_raises +import boto3 +import pytest import sure # noqa from boto.exception import EC2ResponseError - -from moto import mock_ec2_deprecated +from moto import mock_ec2, mock_ec2_deprecated @mock_ec2_deprecated @@ -34,7 +35,7 @@ def test_delete_vpn_connections(): @mock_ec2_deprecated def test_delete_vpn_connections_bad_id(): conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn.delete_vpn_connection("vpn-0123abcd") @@ -51,3 +52,24 @@ def test_describe_vpn_connections(): list_of_vpn_connections.should.have.length_of(2) list_of_vpn_connections = conn.get_all_vpn_connections(vpn.id) list_of_vpn_connections.should.have.length_of(1) + + +@mock_ec2 +def test_create_vpn_connection_with_vpn_gateway(): + client = boto3.client("ec2", region_name="us-east-1") + + vpn_gateway = client.create_vpn_gateway(Type="ipsec.1").get("VpnGateway", {}) + customer_gateway = client.create_customer_gateway( + Type="ipsec.1", PublicIp="205.251.242.54", BgpAsn=65534, + ).get("CustomerGateway", {}) + vpn_connection = client.create_vpn_connection( + Type="ipsec.1", + VpnGatewayId=vpn_gateway["VpnGatewayId"], + CustomerGatewayId=customer_gateway["CustomerGatewayId"], + ).get("VpnConnection", {}) + + vpn_connection["Type"].should.equal("ipsec.1") + vpn_connection["VpnGatewayId"].should.equal(vpn_gateway["VpnGatewayId"]) + vpn_connection["CustomerGatewayId"].should.equal( + customer_gateway["CustomerGatewayId"] + ) diff --git a/tests/test_ec2/test_windows.py b/tests/test_ec2/test_windows.py index 364ac2f8a..ae2f7b29a 100644 --- a/tests/test_ec2/test_windows.py +++ b/tests/test_ec2/test_windows.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_windows(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_windows(): + pass diff --git a/tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py b/tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py similarity index 92% rename from tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py rename to tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py index eb685d80a..3f676af96 100644 --- a/tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py +++ b/tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py @@ -1,6 +1,6 @@ import boto3 -from moto import mock_ec2_instance_connect +from moto import mock_ec2instanceconnect pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDV5+voluw2zmzqpqCAqtsyoP01TQ8Ydx1eS1yD6wUsHcPqMIqpo57YxiC8XPwrdeKQ6GG6MC3bHsgXoPypGP0LyixbiuLTU31DnnqorcHt4bWs6rQa7dK2pCCflz2fhYRt5ZjqSNsAKivIbqkH66JozN0SySIka3kEV79GdB0BicioKeEJlCwM9vvxafyzjWf/z8E0lh4ni3vkLpIVJ0t5l+Qd9QMJrT6Is0SCQPVagTYZoi8+fWDoGsBa8vyRwDjEzBl28ZplKh9tSyDkRIYszWTpmK8qHiqjLYZBfAxXjGJbEYL1iig4ZxvbYzKEiKSBi1ZMW9iWjHfZDZuxXAmB @@ -8,7 +8,7 @@ example """ -@mock_ec2_instance_connect +@mock_ec2instanceconnect def test_send_ssh_public_key(): client = boto3.client("ec2-instance-connect", region_name="us-east-1") fake_request_id = "example-2a47-4c91-9700-e37e85162cb6" diff --git a/tests/test_ecr/__init__.py b/tests/test_ecr/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_ecr/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 82a2c7521..e44307bee 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -15,7 +15,7 @@ from botocore.exceptions import ClientError, ParamValidationError from dateutil.tz import tzlocal from moto import mock_ecr -from nose import SkipTest +from unittest import SkipTest def _create_image_digest(contents=None): @@ -318,6 +318,9 @@ def test_list_images(): type(response["imageIds"]).should.be(list) len(response["imageIds"]).should.be(3) + for image in response["imageIds"]: + image["imageDigest"].should.contain("sha") + image_tags = ["latest", "v1", "v2"] set( [ @@ -331,6 +334,7 @@ def test_list_images(): type(response["imageIds"]).should.be(list) len(response["imageIds"]).should.be(1) response["imageIds"][0]["imageTag"].should.equal("oldest") + response["imageIds"][0]["imageDigest"].should.contain("sha") @mock_ecr @@ -538,7 +542,7 @@ def test_describe_image_that_doesnt_exist(): repositoryName="test_repository", imageIds=[{"imageTag": "testtag"}], registryId="123", - ).should.throw(ClientError, error_msg1) + ).should.throw(client.exceptions.ImageNotFoundException, error_msg1) error_msg2 = re.compile( r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", diff --git a/tests/test_ecs/__init__.py b/tests/test_ecs/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_ecs/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 69c920192..8b6b27987 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1,8 +1,6 @@ from __future__ import unicode_literals from datetime import datetime -from copy import deepcopy - from botocore.exceptions import ClientError import boto3 import sure # noqa @@ -10,10 +8,9 @@ import json from moto.ec2 import utils as ec2_utils from uuid import UUID -from moto import mock_cloudformation, mock_elbv2 from moto import mock_ecs from moto import mock_ec2 -from nose.tools import assert_raises +import pytest @mock_ecs @@ -254,6 +251,7 @@ def test_describe_task_definition(): "logConfiguration": {"logDriver": "json-file"}, } ], + tags=[{"key": "Name", "value": "test_ecs_task"}], ) _ = client.register_task_definition( family="test_ecs_task", @@ -297,6 +295,11 @@ def test_describe_task_definition(): "arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2" ) + response = client.describe_task_definition( + taskDefinition="test_ecs_task:1", include=["TAGS"] + ) + response["tags"].should.equal([{"key": "Name", "value": "test_ecs_task"}]) + @mock_ecs def test_deregister_task_definition(): @@ -512,6 +515,7 @@ def test_describe_services(): serviceName="test_ecs_service1", taskDefinition="test_ecs_task", desiredCount=2, + tags=[{"key": "Name", "value": "test_ecs_service1"}], ) _ = client.create_service( cluster="test_ecs_cluster", @@ -554,6 +558,18 @@ def test_describe_services(): datetime.now() - response["services"][0]["deployments"][0]["updatedAt"].replace(tzinfo=None) ).seconds.should.be.within(0, 10) + response = client.describe_services( + cluster="test_ecs_cluster", + services=[ + "test_ecs_service1", + "arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2", + ], + include=["TAGS"], + ) + response["services"][0]["tags"].should.equal( + [{"key": "Name", "value": "test_ecs_service1"}] + ) + response["services"][1]["tags"].should.equal([]) @mock_ecs @@ -660,6 +676,15 @@ def test_update_service(): response["service"]["desiredCount"].should.equal(0) response["service"]["schedulingStrategy"].should.equal("REPLICA") + # Verify we can pass the ARNs of the cluster and service + response = client.update_service( + cluster=response["service"]["clusterArn"], + service=response["service"]["serviceArn"], + taskDefinition="test_ecs_task", + desiredCount=1, + ) + response["service"]["desiredCount"].should.equal(1) + @mock_ecs def test_update_missing_service(): @@ -835,7 +860,7 @@ def test_deregister_container_instance(): containerInstances=[container_instance_id], startedBy="moto", ) - with assert_raises(Exception) as e: + with pytest.raises(Exception) as e: ecs_client.deregister_container_instance( cluster=test_cluster_name, containerInstance=container_instance_id ).should.have.raised(Exception) @@ -925,8 +950,9 @@ def test_describe_container_instances(): for instance in response["containerInstances"]: instance.keys().should.contain("runningTasksCount") instance.keys().should.contain("pendingTasksCount") + instance["registeredAt"].should.be.a("datetime.datetime") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: ecs_client.describe_container_instances( cluster=test_cluster_name, containerInstances=[] ) @@ -1122,6 +1148,71 @@ def test_run_task(): response["tasks"][0]["stoppedReason"].should.equal("") +@mock_ec2 +@mock_ecs +def test_run_task_default_cluster(): + client = boto3.client("ecs", region_name="us-east-1") + ec2 = boto3.resource("ec2", region_name="us-east-1") + + test_cluster_name = "default" + + _ = client.create_cluster(clusterName=test_cluster_name) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", MinCount=1, MaxCount=1 + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family="test_ecs_task", + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + response = client.run_task( + launchType="FARGATE", + overrides={}, + taskDefinition="test_ecs_task", + count=2, + startedBy="moto", + ) + len(response["tasks"]).should.equal(2) + response["tasks"][0]["taskArn"].should.contain( + "arn:aws:ecs:us-east-1:012345678910:task/" + ) + response["tasks"][0]["clusterArn"].should.equal( + "arn:aws:ecs:us-east-1:012345678910:cluster/default" + ) + response["tasks"][0]["taskDefinitionArn"].should.equal( + "arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1" + ) + response["tasks"][0]["containerInstanceArn"].should.contain( + "arn:aws:ecs:us-east-1:012345678910:container-instance/" + ) + response["tasks"][0]["overrides"].should.equal({}) + response["tasks"][0]["lastStatus"].should.equal("RUNNING") + response["tasks"][0]["desiredStatus"].should.equal("RUNNING") + response["tasks"][0]["startedBy"].should.equal("moto") + response["tasks"][0]["stoppedReason"].should.equal("") + + @mock_ec2 @mock_ecs def test_start_task(): @@ -1556,120 +1647,6 @@ def test_resource_reservation_and_release_memory_reservation(): container_instance_description["runningTasksCount"].should.equal(0) -@mock_ecs -@mock_cloudformation -def test_create_cluster_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": {"ClusterName": "testcluster"}, - } - }, - } - template_json = json.dumps(template) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_clusters() - len(resp["clusterArns"]).should.equal(0) - - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) - - resp = ecs_conn.list_clusters() - len(resp["clusterArns"]).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_create_cluster_through_cloudformation_no_name(): - # cloudformation should create a cluster name for you if you do not provide it - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": {"testCluster": {"Type": "AWS::ECS::Cluster"}}, - } - template_json = json.dumps(template) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_clusters() - len(resp["clusterArns"]).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": {"ClusterName": "testcluster1"}, - } - }, - } - template2 = deepcopy(template1) - template2["Resources"]["testCluster"]["Properties"]["ClusterName"] = "testcluster2" - template1_json = json.dumps(template1) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - stack_resp = cfn_conn.create_stack( - StackName="test_stack", TemplateBody=template1_json - ) - - template2_json = json.dumps(template2) - cfn_conn.update_stack(StackName=stack_resp["StackId"], TemplateBody=template2_json) - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_clusters() - len(resp["clusterArns"]).should.equal(1) - resp["clusterArns"][0].endswith("testcluster2").should.be.true - - -@mock_ecs -@mock_cloudformation -def test_create_task_definition_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true", - } - ], - "Volumes": [], - }, - } - }, - } - template_json = json.dumps(template) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - stack_name = "test_stack" - cfn_conn.create_stack(StackName=stack_name, TemplateBody=template_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_task_definitions() - len(resp["taskDefinitionArns"]).should.equal(1) - task_definition_arn = resp["taskDefinitionArns"][0] - - task_definition_details = cfn_conn.describe_stack_resource( - StackName=stack_name, LogicalResourceId="testTaskDefinition" - )["StackResourceDetail"] - task_definition_details["PhysicalResourceId"].should.equal(task_definition_arn) - - @mock_ec2 @mock_ecs def test_task_definitions_unable_to_be_placed(): @@ -1784,142 +1761,6 @@ def test_task_definitions_with_port_clash(): response["tasks"][0]["stoppedReason"].should.equal("") -@mock_ecs -@mock_cloudformation -def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "Family": "testTaskDefinition1", - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true", - } - ], - "Volumes": [], - }, - } - }, - } - template1_json = json.dumps(template1) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template1_json) - - template2 = deepcopy(template1) - template2["Resources"]["testTaskDefinition"]["Properties"][ - "Family" - ] = "testTaskDefinition2" - template2_json = json.dumps(template2) - cfn_conn.update_stack(StackName="test_stack", TemplateBody=template2_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_task_definitions(familyPrefix="testTaskDefinition2") - len(resp["taskDefinitionArns"]).should.equal(1) - resp["taskDefinitionArns"][0].endswith("testTaskDefinition2:1").should.be.true - - -@mock_ecs -@mock_cloudformation -def test_create_service_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": {"ClusterName": "testcluster"}, - }, - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true", - } - ], - "Volumes": [], - }, - }, - "testService": { - "Type": "AWS::ECS::Service", - "Properties": { - "Cluster": {"Ref": "testCluster"}, - "DesiredCount": 10, - "TaskDefinition": {"Ref": "testTaskDefinition"}, - }, - }, - }, - } - template_json = json.dumps(template) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_services(cluster="testcluster") - len(resp["serviceArns"]).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_update_service_through_cloudformation_should_trigger_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": {"ClusterName": "testcluster"}, - }, - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true", - } - ], - "Volumes": [], - }, - }, - "testService": { - "Type": "AWS::ECS::Service", - "Properties": { - "Cluster": {"Ref": "testCluster"}, - "TaskDefinition": {"Ref": "testTaskDefinition"}, - "DesiredCount": 10, - }, - }, - }, - } - template_json1 = json.dumps(template1) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json1) - template2 = deepcopy(template1) - template2["Resources"]["testService"]["Properties"]["DesiredCount"] = 5 - template2_json = json.dumps(template2) - cfn_conn.update_stack(StackName="test_stack", TemplateBody=template2_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_services(cluster="testcluster") - len(resp["serviceArns"]).should.equal(1) - - @mock_ec2 @mock_ecs def test_attributes(): @@ -2539,3 +2380,466 @@ def test_ecs_service_untag_resource_multiple_tags(): resourceArn=response["service"]["serviceArn"] ) response["tags"].should.equal([{"key": "hello", "value": "world"}]) + + +@mock_ecs +def test_ecs_task_definition_placement_constraints(): + client = boto3.client("ecs", region_name="us-east-1") + response = client.register_task_definition( + family="test_ecs_task", + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + networkMode="bridge", + tags=[ + {"key": "createdBy", "value": "moto-unittest"}, + {"key": "foo", "value": "bar"}, + ], + placementConstraints=[ + {"type": "memberOf", "expression": "attribute:ecs.instance-type =~ t2.*"} + ], + ) + type(response["taskDefinition"]["placementConstraints"]).should.be(list) + response["taskDefinition"]["placementConstraints"].should.equal( + [{"type": "memberOf", "expression": "attribute:ecs.instance-type =~ t2.*"}] + ) + + +@mock_ecs +def test_create_task_set(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family="test_ecs_task", + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + taskDefinition=task_def_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + load_balancers = [ + { + "targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a", + "containerName": "hello_world", + "containerPort": 8080, + }, + ] + + task_set = client.create_task_set( + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, + loadBalancers=load_balancers, + )["taskSet"] + + cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][ + "clusterArn" + ] + service_arn = client.describe_services( + cluster=cluster_name, services=[service_name] + )["services"][0]["serviceArn"] + assert task_set["clusterArn"] == cluster_arn + assert task_set["serviceArn"] == service_arn + assert task_set["taskDefinition"].endswith("{0}:1".format(task_def_name)) + assert task_set["scale"] == {"value": 100.0, "unit": "PERCENT"} + assert ( + task_set["loadBalancers"][0]["targetGroupArn"] + == "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a" + ) + assert task_set["loadBalancers"][0]["containerPort"] == 8080 + assert task_set["loadBalancers"][0]["containerName"] == "hello_world" + + +@mock_ecs +def test_describe_task_sets(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family=task_def_name, + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + taskDefinition=task_def_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + + load_balancers = [ + { + "targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a", + "containerName": "hello_world", + "containerPort": 8080, + } + ] + + _ = client.create_task_set( + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, + loadBalancers=load_balancers, + ) + task_sets = client.describe_task_sets(cluster=cluster_name, service=service_name)[ + "taskSets" + ] + assert "tags" not in task_sets[0] + + task_sets = client.describe_task_sets( + cluster=cluster_name, service=service_name, include=["TAGS"], + )["taskSets"] + + cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][ + "clusterArn" + ] + + service_arn = client.describe_services( + cluster=cluster_name, services=[service_name] + )["services"][0]["serviceArn"] + + assert "tags" in task_sets[0] + assert len(task_sets) == 1 + assert task_sets[0]["taskDefinition"].endswith("{0}:1".format(task_def_name)) + assert task_sets[0]["clusterArn"] == cluster_arn + assert task_sets[0]["serviceArn"] == service_arn + assert task_sets[0]["serviceArn"].endswith(service_name) + assert task_sets[0]["scale"] == {"value": 100.0, "unit": "PERCENT"} + assert task_sets[0]["taskSetArn"].endswith(task_sets[0]["id"]) + assert ( + task_sets[0]["loadBalancers"][0]["targetGroupArn"] + == "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a" + ) + assert task_sets[0]["loadBalancers"][0]["containerPort"] == 8080 + assert task_sets[0]["loadBalancers"][0]["containerName"] == "hello_world" + + +@mock_ecs +def test_delete_task_set(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family=task_def_name, + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + taskDefinition=task_def_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + + task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + + task_sets = client.describe_task_sets( + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + )["taskSets"] + + assert len(task_sets) == 1 + + response = client.delete_task_set( + cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], + ) + assert response["taskSet"]["taskSetArn"] == task_set["taskSetArn"] + + task_sets = client.describe_task_sets( + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + )["taskSets"] + + assert len(task_sets) == 0 + + with pytest.raises(ClientError): + _ = client.delete_task_set( + cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], + ) + + +@mock_ecs +def test_update_service_primary_task_set(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family="test_ecs_task", + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + + task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + + service = client.describe_services(cluster=cluster_name, services=[service_name],)[ + "services" + ][0] + + _ = client.update_service_primary_task_set( + cluster=cluster_name, + service=service_name, + primaryTaskSet=task_set["taskSetArn"], + ) + + service = client.describe_services(cluster=cluster_name, services=[service_name],)[ + "services" + ][0] + assert service["taskSets"][0]["status"] == "PRIMARY" + assert service["taskDefinition"] == service["taskSets"][0]["taskDefinition"] + + another_task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + service = client.describe_services(cluster=cluster_name, services=[service_name],)[ + "services" + ][0] + assert service["taskSets"][1]["status"] == "ACTIVE" + + _ = client.update_service_primary_task_set( + cluster=cluster_name, + service=service_name, + primaryTaskSet=another_task_set["taskSetArn"], + ) + service = client.describe_services(cluster=cluster_name, services=[service_name],)[ + "services" + ][0] + assert service["taskSets"][0]["status"] == "ACTIVE" + assert service["taskSets"][1]["status"] == "PRIMARY" + assert service["taskDefinition"] == service["taskSets"][1]["taskDefinition"] + + +@mock_ecs +def test_update_task_set(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family=task_def_name, + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + + task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + + another_task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + assert another_task_set["scale"]["unit"] == "PERCENT" + assert another_task_set["scale"]["value"] == 100.0 + + client.update_task_set( + cluster=cluster_name, + service=service_name, + taskSet=task_set["taskSetArn"], + scale={"value": 25.0, "unit": "PERCENT"}, + ) + + updated_task_set = client.describe_task_sets( + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + )["taskSets"][0] + assert updated_task_set["scale"]["value"] == 25.0 + assert updated_task_set["scale"]["unit"] == "PERCENT" + + +@mock_ec2 +@mock_ecs +def test_list_tasks_with_filters(): + ecs = boto3.client("ecs", region_name="us-east-1") + ec2 = boto3.resource("ec2", region_name="us-east-1") + + _ = ecs.create_cluster(clusterName="test_cluster_1") + _ = ecs.create_cluster(clusterName="test_cluster_2") + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", MinCount=1, MaxCount=1 + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = ecs.register_container_instance( + cluster="test_cluster_1", instanceIdentityDocument=instance_id_document + ) + _ = ecs.register_container_instance( + cluster="test_cluster_2", instanceIdentityDocument=instance_id_document + ) + + container_instances = ecs.list_container_instances(cluster="test_cluster_1") + container_id_1 = container_instances["containerInstanceArns"][0].split("/")[-1] + container_instances = ecs.list_container_instances(cluster="test_cluster_2") + container_id_2 = container_instances["containerInstanceArns"][0].split("/")[-1] + + test_container_def = { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}], + "logConfiguration": {"logDriver": "json-file"}, + } + + _ = ecs.register_task_definition( + family="test_task_def_1", containerDefinitions=[test_container_def], + ) + + _ = ecs.register_task_definition( + family="test_task_def_2", containerDefinitions=[test_container_def], + ) + + _ = ecs.start_task( + cluster="test_cluster_1", + taskDefinition="test_task_def_1", + overrides={}, + containerInstances=[container_id_1], + startedBy="foo", + ) + + resp = ecs.start_task( + cluster="test_cluster_2", + taskDefinition="test_task_def_2", + overrides={}, + containerInstances=[container_id_2], + startedBy="foo", + ) + task_to_stop = resp["tasks"][0]["taskArn"] + + _ = ecs.start_task( + cluster="test_cluster_1", + taskDefinition="test_task_def_1", + overrides={}, + containerInstances=[container_id_1], + startedBy="bar", + ) + + len(ecs.list_tasks()["taskArns"]).should.equal(3) + + len(ecs.list_tasks(cluster="test_cluster_1")["taskArns"]).should.equal(2) + len(ecs.list_tasks(cluster="test_cluster_2")["taskArns"]).should.equal(1) + + len(ecs.list_tasks(containerInstance="bad-id")["taskArns"]).should.equal(0) + len(ecs.list_tasks(containerInstance=container_id_1)["taskArns"]).should.equal(2) + len(ecs.list_tasks(containerInstance=container_id_2)["taskArns"]).should.equal(1) + + len(ecs.list_tasks(family="non-existent-family")["taskArns"]).should.equal(0) + len(ecs.list_tasks(family="test_task_def_1")["taskArns"]).should.equal(2) + len(ecs.list_tasks(family="test_task_def_2")["taskArns"]).should.equal(1) + + len(ecs.list_tasks(startedBy="non-existent-entity")["taskArns"]).should.equal(0) + len(ecs.list_tasks(startedBy="foo")["taskArns"]).should.equal(2) + len(ecs.list_tasks(startedBy="bar")["taskArns"]).should.equal(1) + + len(ecs.list_tasks(desiredStatus="RUNNING")["taskArns"]).should.equal(3) + _ = ecs.stop_task(cluster="test_cluster_2", task=task_to_stop, reason="for testing") + len(ecs.list_tasks(desiredStatus="RUNNING")["taskArns"]).should.equal(2) + len(ecs.list_tasks(desiredStatus="STOPPED")["taskArns"]).should.equal(1) + + resp = ecs.list_tasks(cluster="test_cluster_1", startedBy="foo") + len(resp["taskArns"]).should.equal(1) + + resp = ecs.list_tasks(containerInstance=container_id_1, startedBy="bar") + len(resp["taskArns"]).should.equal(1) diff --git a/tests/test_ecs/test_ecs_cloudformation.py b/tests/test_ecs/test_ecs_cloudformation.py new file mode 100644 index 000000000..fcb1beec7 --- /dev/null +++ b/tests/test_ecs/test_ecs_cloudformation.py @@ -0,0 +1,274 @@ +import boto3 +import json +from copy import deepcopy +from moto import mock_cloudformation, mock_ecs +from moto.core.utils import pascal_to_camelcase, remap_nested_keys +import sure # noqa + + +@mock_ecs +@mock_cloudformation +def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "Family": "testTaskDefinition1", + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true", + } + ], + "Volumes": [], + }, + } + }, + } + template1_json = json.dumps(template1) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template1_json) + + template2 = deepcopy(template1) + template2["Resources"]["testTaskDefinition"]["Properties"][ + "Family" + ] = "testTaskDefinition2" + template2_json = json.dumps(template2) + cfn_conn.update_stack(StackName="test_stack", TemplateBody=template2_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_task_definitions(familyPrefix="testTaskDefinition2") + len(resp["taskDefinitionArns"]).should.equal(1) + resp["taskDefinitionArns"][0].endswith("testTaskDefinition2:1").should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_service_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "testcluster"}, + }, + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true", + } + ], + "Volumes": [], + }, + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "DesiredCount": 10, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + }, + }, + }, + } + template_json = json.dumps(template) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_services(cluster="testcluster") + len(resp["serviceArns"]).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_service_through_cloudformation_should_trigger_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "testcluster"}, + }, + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true", + } + ], + "Volumes": [], + }, + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + "DesiredCount": 10, + }, + }, + }, + } + template_json1 = json.dumps(template1) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json1) + template2 = deepcopy(template1) + template2["Resources"]["testService"]["Properties"]["DesiredCount"] = 5 + template2_json = json.dumps(template2) + cfn_conn.update_stack(StackName="test_stack", TemplateBody=template2_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_services(cluster="testcluster") + len(resp["serviceArns"]).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "testcluster"}, + } + }, + } + template_json = json.dumps(template) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_clusters() + len(resp["clusterArns"]).should.equal(0) + + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) + + resp = ecs_conn.list_clusters() + len(resp["clusterArns"]).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation_no_name(): + # cloudformation should create a cluster name for you if you do not provide it + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": {"testCluster": {"Type": "AWS::ECS::Cluster"}}, + } + template_json = json.dumps(template) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_clusters() + len(resp["clusterArns"]).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "testcluster1"}, + } + }, + } + template2 = deepcopy(template1) + template2["Resources"]["testCluster"]["Properties"]["ClusterName"] = "testcluster2" + template1_json = json.dumps(template1) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + stack_resp = cfn_conn.create_stack( + StackName="test_stack", TemplateBody=template1_json + ) + + template2_json = json.dumps(template2) + cfn_conn.update_stack(StackName=stack_resp["StackId"], TemplateBody=template2_json) + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_clusters() + len(resp["clusterArns"]).should.equal(1) + resp["clusterArns"][0].endswith("testcluster2").should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_task_definition_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true", + "PortMappings": [ + { + "ContainerPort": 123, + "HostPort": 123, + "Protocol": "tcp", + }, + ], + } + ], + "Volumes": [{"Name": "ecs-vol"}], + }, + } + }, + } + template_json = json.dumps(template) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + stack_name = "test_stack" + cfn_conn.create_stack(StackName=stack_name, TemplateBody=template_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_task_definitions() + len(resp["taskDefinitionArns"]).should.equal(1) + task_definition_arn = resp["taskDefinitionArns"][0] + + task_definition_details = cfn_conn.describe_stack_resource( + StackName=stack_name, LogicalResourceId="testTaskDefinition" + )["StackResourceDetail"] + task_definition_details["PhysicalResourceId"].should.equal(task_definition_arn) + + task_definition = ecs_conn.describe_task_definition( + taskDefinition=task_definition_arn + ).get("taskDefinition") + expected_properties = remap_nested_keys( + template["Resources"]["testTaskDefinition"]["Properties"], pascal_to_camelcase + ) + task_definition["volumes"].should.equal(expected_properties["volumes"]) + task_definition["containerDefinitions"].should.equal( + expected_properties["containerDefinitions"] + ) diff --git a/tests/test_elasticbeanstalk/test_eb.py b/tests/test_elasticbeanstalk/test_eb.py new file mode 100644 index 000000000..42eb09be3 --- /dev/null +++ b/tests/test_elasticbeanstalk/test_eb.py @@ -0,0 +1,130 @@ +import boto3 +import sure # noqa +from botocore.exceptions import ClientError + +from moto import mock_elasticbeanstalk + + +@mock_elasticbeanstalk +def test_create_application(): + # Create Elastic Beanstalk Application + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + app = conn.create_application(ApplicationName="myapp",) + app["Application"]["ApplicationName"].should.equal("myapp") + + +@mock_elasticbeanstalk +def test_create_application_dup(): + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) + conn.create_application.when.called_with(ApplicationName="myapp",).should.throw( + ClientError + ) + + +@mock_elasticbeanstalk +def test_describe_applications(): + # Create Elastic Beanstalk Application + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) + + apps = conn.describe_applications() + len(apps["Applications"]).should.equal(1) + apps["Applications"][0]["ApplicationName"].should.equal("myapp") + + +@mock_elasticbeanstalk +def test_create_environment(): + # Create Elastic Beanstalk Environment + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + app = conn.create_application(ApplicationName="myapp",) + env = conn.create_environment(ApplicationName="myapp", EnvironmentName="myenv",) + env["EnvironmentName"].should.equal("myenv") + + +@mock_elasticbeanstalk +def test_describe_environments(): + # List Elastic Beanstalk Envs + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) + conn.create_environment( + ApplicationName="myapp", EnvironmentName="myenv", + ) + + envs = conn.describe_environments() + envs = envs["Environments"] + len(envs).should.equal(1) + envs[0]["ApplicationName"].should.equal("myapp") + envs[0]["EnvironmentName"].should.equal("myenv") + + +def tags_dict_to_list(tag_dict): + tag_list = [] + for key, value in tag_dict.items(): + tag_list.append({"Key": key, "Value": value}) + return tag_list + + +def tags_list_to_dict(tag_list): + tag_dict = {} + for tag in tag_list: + tag_dict[tag["Key"]] = tag["Value"] + return tag_dict + + +@mock_elasticbeanstalk +def test_create_environment_tags(): + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) + env_tags = {"initial key": "initial value"} + env = conn.create_environment( + ApplicationName="myapp", + EnvironmentName="myenv", + Tags=tags_dict_to_list(env_tags), + ) + + tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],) + tags["ResourceArn"].should.equal(env["EnvironmentArn"]) + tags_list_to_dict(tags["ResourceTags"]).should.equal(env_tags) + + +@mock_elasticbeanstalk +def test_update_tags(): + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) + env_tags = { + "initial key": "initial value", + "to remove": "delete me", + "to update": "original", + } + env = conn.create_environment( + ApplicationName="myapp", + EnvironmentName="myenv", + Tags=tags_dict_to_list(env_tags), + ) + + extra_env_tags = { + "to update": "new", + "extra key": "extra value", + } + conn.update_tags_for_resource( + ResourceArn=env["EnvironmentArn"], + TagsToAdd=tags_dict_to_list(extra_env_tags), + TagsToRemove=["to remove"], + ) + + total_env_tags = env_tags.copy() + total_env_tags.update(extra_env_tags) + del total_env_tags["to remove"] + + tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],) + tags["ResourceArn"].should.equal(env["EnvironmentArn"]) + tags_list_to_dict(tags["ResourceTags"]).should.equal(total_env_tags) + + +@mock_elasticbeanstalk +def test_list_available_solution_stacks(): + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + stacks = conn.list_available_solution_stacks() + len(stacks["SolutionStacks"]).should.be.greater_than(0) + len(stacks["SolutionStacks"]).should.be.equal(len(stacks["SolutionStackDetails"])) diff --git a/tests/test_elb/__init__.py b/tests/test_elb/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_elb/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 1583ea544..427cb740c 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -11,7 +11,7 @@ from boto.ec2.elb.attributes import ( ) from botocore.exceptions import ClientError from boto.exception import BotoServerError -from nose.tools import assert_raises +import pytest import sure # noqa from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated @@ -123,7 +123,7 @@ def test_create_and_delete_boto3_support(): def test_create_load_balancer_with_no_listeners_defined(): client = boto3.client("elb", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): client.create_load_balancer( LoadBalancerName="my-lb", Listeners=[], @@ -180,12 +180,12 @@ def test_apply_security_groups_to_load_balancer(): assert balancer["SecurityGroups"] == [security_group.id] # Using a not-real security group raises an error - with assert_raises(ClientError) as error: + with pytest.raises(ClientError) as error: response = client.apply_security_groups_to_load_balancer( LoadBalancerName="my-lb", SecurityGroups=["not-really-a-security-group"] ) assert "One or more of the specified security groups do not exist." in str( - error.exception + error.value ) @@ -255,7 +255,7 @@ def test_create_and_delete_listener_boto3_support(): balancer["ListenerDescriptions"][1]["Listener"]["InstancePort"].should.equal(8443) # Creating this listener with an conflicting definition throws error - with assert_raises(ClientError): + with pytest.raises(ClientError): client.create_load_balancer_listeners( LoadBalancerName="my-lb", Listeners=[ diff --git a/tests/test_elbv2/__init__.py b/tests/test_elbv2/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_elbv2/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index af1b19f09..cb8e13e52 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,14 +1,13 @@ from __future__ import unicode_literals -import json import os import boto3 import botocore from botocore.exceptions import ClientError, ParamValidationError -from nose.tools import assert_raises +import pytest import sure # noqa -from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation +from moto import mock_elbv2, mock_ec2, mock_acm from moto.elbv2 import elbv2_backends from moto.core import ACCOUNT_ID @@ -97,9 +96,9 @@ def test_describe_load_balancers(): response = conn.describe_load_balancers(Names=["my-lb"]) response.get("LoadBalancers")[0].get("LoadBalancerName").should.equal("my-lb") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_load_balancers(LoadBalancerArns=["not-a/real/arn"]) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_load_balancers(Names=["nope"]) @@ -133,7 +132,7 @@ def test_add_remove_tags(): lbs.should.have.length_of(1) lb = lbs[0] - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.add_tags(ResourceArns=["missing-arn"], Tags=[{"Key": "a", "Value": "b"}]) conn.add_tags( @@ -275,7 +274,7 @@ def test_create_target_group_and_listeners(): load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn") # Can't create a target group with an invalid protocol - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_target_group( Name="a-target", Protocol="HTTP", @@ -390,10 +389,10 @@ def test_create_target_group_and_listeners(): # Try to delete the target group and it fails because there's a # listener referencing it - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: conn.delete_target_group(TargetGroupArn=target_group.get("TargetGroupArn")) - e.exception.operation_name.should.equal("DeleteTargetGroup") - e.exception.args.should.equal( + e.value.operation_name.should.equal("DeleteTargetGroup") + e.value.args.should.equal( ( "An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", ) @@ -478,7 +477,7 @@ def test_create_invalid_target_group(): # Fail to create target group with name which length is 33 long_name = "A" * 33 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_target_group( Name=long_name, Protocol="HTTP", @@ -496,7 +495,7 @@ def test_create_invalid_target_group(): invalid_names = ["-name", "name-", "-name-", "example.com", "test@test", "Na--me"] for name in invalid_names: - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_target_group( Name=name, Protocol="HTTP", @@ -942,7 +941,7 @@ def test_handle_listener_rules(): load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn") # Can't create a target group with an invalid protocol - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_target_group( Name="a-target", Protocol="HTTP", @@ -994,12 +993,17 @@ def test_handle_listener_rules(): priority = 100 host = "xxx.example.com" path_pattern = "foobar" + pathpatternconfig_pattern = "foobar2" created_rule = conn.create_rule( ListenerArn=http_listener_arn, Priority=priority, Conditions=[ {"Field": "host-header", "Values": [host]}, {"Field": "path-pattern", "Values": [path_pattern]}, + { + "Field": "path-pattern", + "PathPatternConfig": {"Values": [pathpatternconfig_pattern]}, + }, ], Actions=[ {"TargetGroupArn": target_group.get("TargetGroupArn"), "Type": "forward"} @@ -1017,6 +1021,10 @@ def test_handle_listener_rules(): Conditions=[ {"Field": "host-header", "Values": [host]}, {"Field": "path-pattern", "Values": [path_pattern]}, + { + "Field": "path-pattern", + "PathPatternConfig": {"Values": [pathpatternconfig_pattern]}, + }, ], Actions=[ {"TargetGroupArn": target_group.get("TargetGroupArn"), "Type": "forward"} @@ -1024,13 +1032,17 @@ def test_handle_listener_rules(): ) # test for PriorityInUse - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=priority, Conditions=[ {"Field": "host-header", "Values": [host]}, {"Field": "path-pattern", "Values": [path_pattern]}, + { + "Field": "path-pattern", + "PathPatternConfig": {"Values": [pathpatternconfig_pattern]}, + }, ], Actions=[ { @@ -1067,11 +1079,11 @@ def test_handle_listener_rules(): ) # test for invalid describe rule request - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_rules() - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_rules(RuleArns=[]) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_rules( ListenerArn=http_listener_arn, RuleArns=[first_rule["RuleArn"]] ) @@ -1079,11 +1091,16 @@ def test_handle_listener_rules(): # modify rule partially new_host = "new.example.com" new_path_pattern = "new_path" + new_pathpatternconfig_pattern = "new_path2" modified_rule = conn.modify_rule( RuleArn=first_rule["RuleArn"], Conditions=[ {"Field": "host-header", "Values": [new_host]}, {"Field": "path-pattern", "Values": [new_path_pattern]}, + { + "Field": "path-pattern", + "PathPatternConfig": {"Values": [new_pathpatternconfig_pattern]}, + }, ], )["Rules"][0] @@ -1092,6 +1109,9 @@ def test_handle_listener_rules(): modified_rule.should.equal(obtained_rule) obtained_rule["Conditions"][0]["Values"][0].should.equal(new_host) obtained_rule["Conditions"][1]["Values"][0].should.equal(new_path_pattern) + obtained_rule["Conditions"][2]["Values"][0].should.equal( + new_pathpatternconfig_pattern + ) obtained_rule["Actions"][0]["TargetGroupArn"].should.equal( target_group.get("TargetGroupArn") ) @@ -1105,7 +1125,7 @@ def test_handle_listener_rules(): } ] ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.set_rule_priorities( RulePriorities=[ {"RuleArn": first_rule["RuleArn"], "Priority": 999}, @@ -1121,7 +1141,7 @@ def test_handle_listener_rules(): # test for invalid action type safe_priority = 2 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1140,7 +1160,7 @@ def test_handle_listener_rules(): # test for invalid action type safe_priority = 2 invalid_target_group_arn = target_group.get("TargetGroupArn") + "x" - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1153,7 +1173,7 @@ def test_handle_listener_rules(): # test for invalid condition field_name safe_priority = 2 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1168,7 +1188,7 @@ def test_handle_listener_rules(): # test for emptry condition value safe_priority = 2 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1183,7 +1203,7 @@ def test_handle_listener_rules(): # test for multiple condition value safe_priority = 2 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1240,7 +1260,7 @@ def test_describe_invalid_target_group(): ) # Check error raises correctly - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_target_groups(Names=["invalid"]) @@ -1338,7 +1358,7 @@ def test_set_ip_address_type(): arn = response["LoadBalancers"][0]["LoadBalancerArn"] # Internal LBs cant be dualstack yet - with assert_raises(ClientError): + with pytest.raises(ClientError): client.set_ip_address_type(LoadBalancerArn=arn, IpAddressType="dualstack") # Create internet facing one @@ -1390,7 +1410,7 @@ def test_set_security_groups(): resp = client.describe_load_balancers(LoadBalancerArns=[arn]) len(resp["LoadBalancers"][0]["SecurityGroups"]).should.equal(2) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.set_security_groups(LoadBalancerArn=arn, SecurityGroups=["non_existent"]) @@ -1431,11 +1451,11 @@ def test_set_subnets(): len(resp["LoadBalancers"][0]["AvailabilityZones"]).should.equal(3) # Only 1 AZ - with assert_raises(ClientError): + with pytest.raises(ClientError): client.set_subnets(LoadBalancerArn=arn, Subnets=[subnet1.id]) # Multiple subnets in same AZ - with assert_raises(ClientError): + with pytest.raises(ClientError): client.set_subnets( LoadBalancerArn=arn, Subnets=[subnet1.id, subnet2.id, subnet2.id] ) @@ -1624,7 +1644,7 @@ def test_modify_listener_http_to_https(): listener.certificate.should.equal(yahoo_arn) # No default cert - with assert_raises(ClientError): + with pytest.raises(ClientError): client.modify_listener( ListenerArn=listener_arn, Port=443, @@ -1635,7 +1655,7 @@ def test_modify_listener_http_to_https(): ) # Bad cert - with assert_raises(ClientError): + with pytest.raises(ClientError): client.modify_listener( ListenerArn=listener_arn, Port=443, @@ -1646,82 +1666,6 @@ def test_modify_listener_http_to_https(): ) -@mock_ec2 -@mock_elbv2 -@mock_cloudformation -def test_create_target_groups_through_cloudformation(): - cfn_conn = boto3.client("cloudformation", region_name="us-east-1") - elbv2_client = boto3.client("elbv2", region_name="us-east-1") - - # test that setting a name manually as well as letting cloudformation create a name both work - # this is a special case because test groups have a name length limit of 22 characters, and must be unique - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": {"CidrBlock": "10.0.0.0/16"}, - }, - "testGroup1": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 80, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup2": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 90, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup3": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "MyTargetGroup", - "Port": 70, - "Protocol": "HTTPS", - "VpcId": {"Ref": "testVPC"}, - }, - }, - }, - } - template_json = json.dumps(template) - cfn_conn.create_stack(StackName="test-stack", TemplateBody=template_json) - - describe_target_groups_response = elbv2_client.describe_target_groups() - target_group_dicts = describe_target_groups_response["TargetGroups"] - assert len(target_group_dicts) == 3 - - # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) - # and one named MyTargetGroup - assert ( - len( - [ - tg - for tg in target_group_dicts - if tg["TargetGroupName"] == "MyTargetGroup" - ] - ) - == 1 - ) - assert ( - len( - [ - tg - for tg in target_group_dicts - if tg["TargetGroupName"].startswith("test-stack") - ] - ) - == 2 - ) - - @mock_elbv2 @mock_ec2 def test_redirect_action_listener_rule(): @@ -1795,95 +1739,6 @@ def test_redirect_action_listener_rule(): modify_listener_actions.should.equal(expected_default_actions) -@mock_elbv2 -@mock_cloudformation -def test_redirect_action_listener_rule_cloudformation(): - cnf_conn = boto3.client("cloudformation", region_name="us-east-1") - elbv2_client = boto3.client("elbv2", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": {"CidrBlock": "10.0.0.0/16"}, - }, - "subnet1": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.0.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "subnet2": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.1.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "testLb": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "my-lb", - "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], - "Type": "application", - "SecurityGroups": [], - }, - }, - "testListener": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "LoadBalancerArn": {"Ref": "testLb"}, - "Port": 80, - "Protocol": "HTTP", - "DefaultActions": [ - { - "Type": "redirect", - "RedirectConfig": { - "Port": "443", - "Protocol": "HTTPS", - "StatusCode": "HTTP_301", - }, - } - ], - }, - }, - }, - } - template_json = json.dumps(template) - cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) - - describe_load_balancers_response = elbv2_client.describe_load_balancers( - Names=["my-lb"] - ) - describe_load_balancers_response["LoadBalancers"].should.have.length_of(1) - load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ - "LoadBalancerArn" - ] - - describe_listeners_response = elbv2_client.describe_listeners( - LoadBalancerArn=load_balancer_arn - ) - - describe_listeners_response["Listeners"].should.have.length_of(1) - describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( - [ - { - "Type": "redirect", - "RedirectConfig": { - "Port": "443", - "Protocol": "HTTPS", - "StatusCode": "HTTP_301", - }, - } - ] - ) - - @mock_elbv2 @mock_ec2 def test_cognito_action_listener_rule(): @@ -1941,97 +1796,6 @@ def test_cognito_action_listener_rule(): describe_listener_actions.should.equal(action) -@mock_elbv2 -@mock_cloudformation -def test_cognito_action_listener_rule_cloudformation(): - cnf_conn = boto3.client("cloudformation", region_name="us-east-1") - elbv2_client = boto3.client("elbv2", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": {"CidrBlock": "10.0.0.0/16"}, - }, - "subnet1": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.0.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "subnet2": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.1.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "testLb": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "my-lb", - "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], - "Type": "application", - "SecurityGroups": [], - }, - }, - "testListener": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "LoadBalancerArn": {"Ref": "testLb"}, - "Port": 80, - "Protocol": "HTTP", - "DefaultActions": [ - { - "Type": "authenticate-cognito", - "AuthenticateCognitoConfig": { - "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( - ACCOUNT_ID - ), - "UserPoolClientId": "abcd1234abcd", - "UserPoolDomain": "testpool", - }, - } - ], - }, - }, - }, - } - template_json = json.dumps(template) - cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) - - describe_load_balancers_response = elbv2_client.describe_load_balancers( - Names=["my-lb"] - ) - load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ - "LoadBalancerArn" - ] - describe_listeners_response = elbv2_client.describe_listeners( - LoadBalancerArn=load_balancer_arn - ) - - describe_listeners_response["Listeners"].should.have.length_of(1) - describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( - [ - { - "Type": "authenticate-cognito", - "AuthenticateCognitoConfig": { - "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( - ACCOUNT_ID - ), - "UserPoolClientId": "abcd1234abcd", - "UserPoolDomain": "testpool", - }, - } - ] - ) - - @mock_elbv2 @mock_ec2 def test_fixed_response_action_listener_rule(): @@ -2087,93 +1851,6 @@ def test_fixed_response_action_listener_rule(): describe_listener_actions.should.equal(action) -@mock_elbv2 -@mock_cloudformation -def test_fixed_response_action_listener_rule_cloudformation(): - cnf_conn = boto3.client("cloudformation", region_name="us-east-1") - elbv2_client = boto3.client("elbv2", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": {"CidrBlock": "10.0.0.0/16"}, - }, - "subnet1": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.0.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "subnet2": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.1.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "testLb": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "my-lb", - "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], - "Type": "application", - "SecurityGroups": [], - }, - }, - "testListener": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "LoadBalancerArn": {"Ref": "testLb"}, - "Port": 80, - "Protocol": "HTTP", - "DefaultActions": [ - { - "Type": "fixed-response", - "FixedResponseConfig": { - "ContentType": "text/plain", - "MessageBody": "This page does not exist", - "StatusCode": "404", - }, - } - ], - }, - }, - }, - } - template_json = json.dumps(template) - cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) - - describe_load_balancers_response = elbv2_client.describe_load_balancers( - Names=["my-lb"] - ) - load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ - "LoadBalancerArn" - ] - describe_listeners_response = elbv2_client.describe_listeners( - LoadBalancerArn=load_balancer_arn - ) - - describe_listeners_response["Listeners"].should.have.length_of(1) - describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( - [ - { - "Type": "fixed-response", - "FixedResponseConfig": { - "ContentType": "text/plain", - "MessageBody": "This page does not exist", - "StatusCode": "404", - }, - } - ] - ) - - @mock_elbv2 @mock_ec2 def test_fixed_response_action_listener_rule_validates_status_code(): @@ -2207,7 +1884,7 @@ def test_fixed_response_action_listener_rule_validates_status_code(): "MessageBody": "This page does not exist", }, } - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): conn.create_listener( LoadBalancerArn=load_balancer_arn, Protocol="HTTP", @@ -2257,7 +1934,7 @@ def test_fixed_response_action_listener_rule_validates_status_code(): "MessageBody": "This page does not exist", }, } - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): conn.create_listener( LoadBalancerArn=load_balancer_arn, Protocol="HTTP", @@ -2274,7 +1951,7 @@ def test_fixed_response_action_listener_rule_validates_status_code(): }, } - with assert_raises(ClientError) as invalid_status_code_exception: + with pytest.raises(ClientError) as invalid_status_code_exception: conn.create_listener( LoadBalancerArn=load_balancer_arn, Protocol="HTTP", @@ -2282,7 +1959,7 @@ def test_fixed_response_action_listener_rule_validates_status_code(): DefaultActions=[invalid_status_code_action], ) - invalid_status_code_exception.exception.response["Error"]["Code"].should.equal( + invalid_status_code_exception.value.response["Error"]["Code"].should.equal( "ValidationError" ) @@ -2321,13 +1998,13 @@ def test_fixed_response_action_listener_rule_validates_content_type(): "StatusCode": "200", }, } - with assert_raises(ClientError) as invalid_content_type_exception: + with pytest.raises(ClientError) as invalid_content_type_exception: conn.create_listener( LoadBalancerArn=load_balancer_arn, Protocol="HTTP", Port=80, DefaultActions=[invalid_content_type_action], ) - invalid_content_type_exception.exception.response["Error"]["Code"].should.equal( + invalid_content_type_exception.value.response["Error"]["Code"].should.equal( "InvalidLoadBalancerAction" ) diff --git a/tests/test_elbv2/test_elbv2_cloudformation.py b/tests/test_elbv2/test_elbv2_cloudformation.py new file mode 100644 index 000000000..cc7ba8246 --- /dev/null +++ b/tests/test_elbv2/test_elbv2_cloudformation.py @@ -0,0 +1,348 @@ +import boto3 +import json + +from moto import mock_elbv2, mock_ec2, mock_cloudformation +from moto.core import ACCOUNT_ID + + +@mock_elbv2 +@mock_cloudformation +def test_redirect_action_listener_rule_cloudformation(): + cnf_conn = boto3.client("cloudformation", region_name="us-east-1") + elbv2_client = boto3.client("elbv2", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": {"CidrBlock": "10.0.0.0/16"}, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + }, + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [ + { + "Type": "redirect", + "RedirectConfig": { + "Port": "443", + "Protocol": "HTTPS", + "StatusCode": "HTTP_301", + }, + } + ], + }, + }, + }, + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers( + Names=["my-lb"] + ) + describe_load_balancers_response["LoadBalancers"].should.have.length_of(1) + load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ + "LoadBalancerArn" + ] + + describe_listeners_response = elbv2_client.describe_listeners( + LoadBalancerArn=load_balancer_arn + ) + + describe_listeners_response["Listeners"].should.have.length_of(1) + describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( + [ + { + "Type": "redirect", + "RedirectConfig": { + "Port": "443", + "Protocol": "HTTPS", + "StatusCode": "HTTP_301", + }, + } + ] + ) + + +@mock_elbv2 +@mock_cloudformation +def test_cognito_action_listener_rule_cloudformation(): + cnf_conn = boto3.client("cloudformation", region_name="us-east-1") + elbv2_client = boto3.client("elbv2", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": {"CidrBlock": "10.0.0.0/16"}, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + }, + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [ + { + "Type": "authenticate-cognito", + "AuthenticateCognitoConfig": { + "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( + ACCOUNT_ID + ), + "UserPoolClientId": "abcd1234abcd", + "UserPoolDomain": "testpool", + }, + } + ], + }, + }, + }, + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers( + Names=["my-lb"] + ) + load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ + "LoadBalancerArn" + ] + describe_listeners_response = elbv2_client.describe_listeners( + LoadBalancerArn=load_balancer_arn + ) + + describe_listeners_response["Listeners"].should.have.length_of(1) + describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( + [ + { + "Type": "authenticate-cognito", + "AuthenticateCognitoConfig": { + "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( + ACCOUNT_ID + ), + "UserPoolClientId": "abcd1234abcd", + "UserPoolDomain": "testpool", + }, + } + ] + ) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_create_target_groups_through_cloudformation(): + cfn_conn = boto3.client("cloudformation", region_name="us-east-1") + elbv2_client = boto3.client("elbv2", region_name="us-east-1") + + # test that setting a name manually as well as letting cloudformation create a name both work + # this is a special case because test groups have a name length limit of 22 characters, and must be unique + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": {"CidrBlock": "10.0.0.0/16"}, + }, + "testGroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 80, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 90, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup3": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Name": "MyTargetGroup", + "Port": 70, + "Protocol": "HTTPS", + "VpcId": {"Ref": "testVPC"}, + }, + }, + }, + } + template_json = json.dumps(template) + cfn_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_target_groups_response = elbv2_client.describe_target_groups() + target_group_dicts = describe_target_groups_response["TargetGroups"] + assert len(target_group_dicts) == 3 + + # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) + # and one named MyTargetGroup + assert ( + len( + [ + tg + for tg in target_group_dicts + if tg["TargetGroupName"] == "MyTargetGroup" + ] + ) + == 1 + ) + assert ( + len( + [ + tg + for tg in target_group_dicts + if tg["TargetGroupName"].startswith("test-stack") + ] + ) + == 2 + ) + + +@mock_elbv2 +@mock_cloudformation +def test_fixed_response_action_listener_rule_cloudformation(): + cnf_conn = boto3.client("cloudformation", region_name="us-east-1") + elbv2_client = boto3.client("elbv2", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": {"CidrBlock": "10.0.0.0/16"}, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + }, + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [ + { + "Type": "fixed-response", + "FixedResponseConfig": { + "ContentType": "text/plain", + "MessageBody": "This page does not exist", + "StatusCode": "404", + }, + } + ], + }, + }, + }, + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers( + Names=["my-lb"] + ) + load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ + "LoadBalancerArn" + ] + describe_listeners_response = elbv2_client.describe_listeners( + LoadBalancerArn=load_balancer_arn + ) + + describe_listeners_response["Listeners"].should.have.length_of(1) + describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( + [ + { + "Type": "fixed-response", + "FixedResponseConfig": { + "ContentType": "text/plain", + "MessageBody": "This page does not exist", + "StatusCode": "404", + }, + } + ] + ) diff --git a/tests/test_emr/__init__.py b/tests/test_emr/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_emr/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 212444abf..e2aa49444 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -5,11 +5,12 @@ from copy import deepcopy from datetime import datetime import boto3 +import json import pytz import six import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_emr @@ -60,6 +61,15 @@ input_instance_groups = [ "Market": "SPOT", "Name": "task-2", "BidPrice": "0.05", + "EbsConfiguration": { + "EbsBlockDeviceConfigs": [ + { + "VolumeSpecification": {"VolumeType": "gp2", "SizeInGB": 800}, + "VolumesPerInstance": 6, + }, + ], + "EbsOptimized": True, + }, }, ] @@ -97,7 +107,15 @@ def test_describe_cluster(): args["Instances"]["EmrManagedSlaveSecurityGroup"] = "slave-security-group" args["Instances"]["KeepJobFlowAliveWhenNoSteps"] = False args["Instances"]["ServiceAccessSecurityGroup"] = "service-access-security-group" + args["KerberosAttributes"] = { + "Realm": "MY-REALM.COM", + "KdcAdminPassword": "SuperSecretPassword2", + "CrossRealmTrustPrincipalPassword": "SuperSecretPassword3", + "ADDomainJoinUser": "Bob", + "ADDomainJoinPassword": "SuperSecretPassword4", + } args["Tags"] = [{"Key": "tag1", "Value": "val1"}, {"Key": "tag2", "Value": "val2"}] + args["SecurityConfiguration"] = "my-security-configuration" cluster_id = client.run_job_flow(**args)["JobFlowId"] @@ -135,6 +153,7 @@ def test_describe_cluster(): args["Instances"]["ServiceAccessSecurityGroup"] ) cl["Id"].should.equal(cluster_id) + cl["KerberosAttributes"].should.equal(args["KerberosAttributes"]) cl["LogUri"].should.equal(args["LogUri"]) cl["MasterPublicDnsName"].should.be.a(six.string_types) cl["Name"].should.equal(args["Name"]) @@ -142,7 +161,8 @@ def test_describe_cluster(): # cl['ReleaseLabel'].should.equal('emr-5.0.0') cl.shouldnt.have.key("RequestedAmiVersion") cl["RunningAmiVersion"].should.equal("1.0.0") - # cl['SecurityConfiguration'].should.be.a(six.string_types) + cl["SecurityConfiguration"].should.be.a(six.string_types) + cl["SecurityConfiguration"].should.equal(args["SecurityConfiguration"]) cl["ServiceRole"].should.equal(args["ServiceRole"]) status = cl["Status"] @@ -386,13 +406,13 @@ def test_run_job_flow(): @mock_emr def test_run_job_flow_with_invalid_params(): client = boto3.client("emr", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: # cannot set both AmiVersion and ReleaseLabel args = deepcopy(run_job_flow_args) args["AmiVersion"] = "2.4" args["ReleaseLabel"] = "emr-5.0.0" client.run_job_flow(**args) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") @mock_emr @@ -430,6 +450,21 @@ def test_run_job_flow_with_visible_to_all_users(): resp["Cluster"]["VisibleToAllUsers"].should.equal(expected) +def _do_assertion_ebs_configuration(x, y): + total_volumes = 0 + total_size = 0 + for ebs_block in y["EbsConfiguration"]["EbsBlockDeviceConfigs"]: + total_volumes += ebs_block["VolumesPerInstance"] + total_size += ebs_block["VolumeSpecification"]["SizeInGB"] + # Multiply by total volumes + total_size = total_size * total_volumes + comp_total_size = 0 + for ebs_block in x["EbsBlockDevices"]: + comp_total_size += ebs_block["VolumeSpecification"]["SizeInGB"] + len(x["EbsBlockDevices"]).should.equal(total_volumes) + comp_total_size.should.equal(comp_total_size) + + @mock_emr def test_run_job_flow_with_instance_groups(): input_groups = dict((g["Name"], g) for g in input_instance_groups) @@ -448,50 +483,195 @@ def test_run_job_flow_with_instance_groups(): if "BidPrice" in y: x["BidPrice"].should.equal(y["BidPrice"]) + if "EbsConfiguration" in y: + _do_assertion_ebs_configuration(x, y) + + +auto_scaling_policy = { + "Constraints": {"MinCapacity": 2, "MaxCapacity": 10}, + "Rules": [ + { + "Name": "Default-scale-out", + "Description": "Replicates the default scale-out rule in the console for YARN memory.", + "Action": { + "SimpleScalingPolicyConfiguration": { + "AdjustmentType": "CHANGE_IN_CAPACITY", + "ScalingAdjustment": 1, + "CoolDown": 300, + } + }, + "Trigger": { + "CloudWatchAlarmDefinition": { + "ComparisonOperator": "LESS_THAN", + "EvaluationPeriods": 1, + "MetricName": "YARNMemoryAvailablePercentage", + "Namespace": "AWS/ElasticMapReduce", + "Period": 300, + "Threshold": 15.0, + "Statistic": "AVERAGE", + "Unit": "PERCENT", + "Dimensions": [{"Key": "JobFlowId", "Value": "${emr.clusterId}"}], + } + }, + } + ], +} + + +@mock_emr +def test_run_job_flow_with_instance_groups_with_autoscaling(): + input_groups = dict((g["Name"], g) for g in input_instance_groups) + + input_groups["core"]["AutoScalingPolicy"] = auto_scaling_policy + input_groups["task-1"]["AutoScalingPolicy"] = auto_scaling_policy + + client = boto3.client("emr", region_name="us-east-1") + args = deepcopy(run_job_flow_args) + args["Instances"] = {"InstanceGroups": input_instance_groups} + cluster_id = client.run_job_flow(**args)["JobFlowId"] + groups = client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] + for x in groups: + y = deepcopy(input_groups[x["Name"]]) + if "AutoScalingPolicy" in y: + x["AutoScalingPolicy"]["Status"]["State"].should.equal("ATTACHED") + returned_policy = deepcopy(x["AutoScalingPolicy"]) + auto_scaling_policy_with_cluster_id = _patch_cluster_id_placeholder_in_autoscaling_policy( + y["AutoScalingPolicy"], cluster_id + ) + del returned_policy["Status"] + returned_policy.should.equal(auto_scaling_policy_with_cluster_id) + + +@mock_emr +def test_put_remove_auto_scaling_policy(): + input_groups = dict((g["Name"], g) for g in input_instance_groups) + client = boto3.client("emr", region_name="us-east-1") + args = deepcopy(run_job_flow_args) + args["Instances"] = {"InstanceGroups": input_instance_groups} + cluster_id = client.run_job_flow(**args)["JobFlowId"] + + core_instance_group = [ + ig + for ig in client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] + if ig["InstanceGroupType"] == "CORE" + ][0] + + resp = client.put_auto_scaling_policy( + ClusterId=cluster_id, + InstanceGroupId=core_instance_group["Id"], + AutoScalingPolicy=auto_scaling_policy, + ) + + auto_scaling_policy_with_cluster_id = _patch_cluster_id_placeholder_in_autoscaling_policy( + auto_scaling_policy, cluster_id + ) + del resp["AutoScalingPolicy"]["Status"] + resp["AutoScalingPolicy"].should.equal(auto_scaling_policy_with_cluster_id) + + core_instance_group = [ + ig + for ig in client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] + if ig["InstanceGroupType"] == "CORE" + ][0] + + ("AutoScalingPolicy" in core_instance_group).should.equal(True) + + client.remove_auto_scaling_policy( + ClusterId=cluster_id, InstanceGroupId=core_instance_group["Id"] + ) + + core_instance_group = [ + ig + for ig in client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] + if ig["InstanceGroupType"] == "CORE" + ][0] + + ("AutoScalingPolicy" not in core_instance_group).should.equal(True) + + +def _patch_cluster_id_placeholder_in_autoscaling_policy( + auto_scaling_policy, cluster_id +): + policy_copy = deepcopy(auto_scaling_policy) + for rule in policy_copy["Rules"]: + for dimension in rule["Trigger"]["CloudWatchAlarmDefinition"]["Dimensions"]: + dimension["Value"] = cluster_id + return policy_copy + @mock_emr def test_run_job_flow_with_custom_ami(): client = boto3.client("emr", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: # CustomAmiId available in Amazon EMR 5.7.0 and later args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomId" args["ReleaseLabel"] = "emr-5.6.0" client.run_job_flow(**args) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal("Custom AMI is not allowed") + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal("Custom AMI is not allowed") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomId" args["AmiVersion"] = "3.8.1" client.run_job_flow(**args) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Custom AMI is not supported in this version of EMR" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: # AMI version and release label exception raises before CustomAmi exception args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomId" args["ReleaseLabel"] = "emr-5.6.0" args["AmiVersion"] = "3.8.1" client.run_job_flow(**args) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( "Only one AMI version and release label may be specified." ) args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomAmi" - args["ReleaseLabel"] = "emr-5.7.0" + args["ReleaseLabel"] = "emr-5.31.0" cluster_id = client.run_job_flow(**args)["JobFlowId"] resp = client.describe_cluster(ClusterId=cluster_id) resp["Cluster"]["CustomAmiId"].should.equal("MyEmrCustomAmi") +@mock_emr +def test_run_job_flow_with_step_concurrency(): + client = boto3.client("emr", region_name="us-east-1") + args = deepcopy(run_job_flow_args) + args["StepConcurrencyLevel"] = 2 + cluster_id = client.run_job_flow(**args)["JobFlowId"] + resp = client.describe_cluster(ClusterId=cluster_id)["Cluster"] + resp["Name"].should.equal(args["Name"]) + resp["Status"]["State"].should.equal("WAITING") + resp["StepConcurrencyLevel"].should.equal(2) + + +@mock_emr +def test_modify_cluster(): + client = boto3.client("emr", region_name="us-east-1") + args = deepcopy(run_job_flow_args) + args["StepConcurrencyLevel"] = 2 + cluster_id = client.run_job_flow(**args)["JobFlowId"] + resp = client.describe_cluster(ClusterId=cluster_id)["Cluster"] + resp["Name"].should.equal(args["Name"]) + resp["Status"]["State"].should.equal("WAITING") + resp["StepConcurrencyLevel"].should.equal(2) + + resp = client.modify_cluster(ClusterId=cluster_id, StepConcurrencyLevel=4) + resp["StepConcurrencyLevel"].should.equal(4) + + resp = client.describe_cluster(ClusterId=cluster_id)["Cluster"] + resp["StepConcurrencyLevel"].should.equal(4) + + @mock_emr def test_set_termination_protection(): client = boto3.client("emr", region_name="us-east-1") @@ -592,8 +772,11 @@ def test_instance_groups(): jf = client.describe_job_flows(JobFlowIds=[cluster_id])["JobFlows"][0] base_instance_count = jf["Instances"]["InstanceCount"] + instance_groups_to_add = deepcopy(input_instance_groups[2:]) + instance_groups_to_add[0]["AutoScalingPolicy"] = auto_scaling_policy + instance_groups_to_add[1]["AutoScalingPolicy"] = auto_scaling_policy client.add_instance_groups( - JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:] + JobFlowId=cluster_id, InstanceGroups=instance_groups_to_add ) jf = client.describe_job_flows(JobFlowIds=[cluster_id])["JobFlows"][0] @@ -602,8 +785,8 @@ def test_instance_groups(): ) for x in jf["Instances"]["InstanceGroups"]: y = input_groups[x["Name"]] - if hasattr(y, "BidPrice"): - x["BidPrice"].should.equal("BidPrice") + if "BidPrice" in y: + x["BidPrice"].should.equal(y["BidPrice"]) x["CreationDateTime"].should.be.a("datetime.datetime") # x['EndDateTime'].should.be.a('datetime.datetime') x.should.have.key("InstanceGroupId") @@ -620,9 +803,21 @@ def test_instance_groups(): groups = client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] for x in groups: - y = input_groups[x["Name"]] - if hasattr(y, "BidPrice"): - x["BidPrice"].should.equal("BidPrice") + y = deepcopy(input_groups[x["Name"]]) + if "BidPrice" in y: + x["BidPrice"].should.equal(y["BidPrice"]) + if "AutoScalingPolicy" in y: + x["AutoScalingPolicy"]["Status"]["State"].should.equal("ATTACHED") + returned_policy = dict(x["AutoScalingPolicy"]) + del returned_policy["Status"] + policy = json.loads( + json.dumps(y["AutoScalingPolicy"]).replace( + "${emr.clusterId}", cluster_id + ) + ) + returned_policy.should.equal(policy) + if "EbsConfiguration" in y: + _do_assertion_ebs_configuration(x, y) # Configurations # EbsBlockDevices # EbsOptimized @@ -752,7 +947,9 @@ def test_steps(): # StateChangeReason x["Status"]["Timeline"]["CreationDateTime"].should.be.a("datetime.datetime") # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') + # Only the first step will have started - we don't know anything about when it finishes, so the second step never starts + if x["Name"] == "My wordcount example": + x["Status"]["Timeline"]["StartDateTime"].should.be.a("datetime.datetime") x = client.describe_step(ClusterId=cluster_id, StepId=x["Id"])["Step"] x["ActionOnFailure"].should.equal("TERMINATE_CLUSTER") @@ -798,3 +995,53 @@ def test_tags(): client.remove_tags(ResourceId=cluster_id, TagKeys=[t["Key"] for t in input_tags]) resp = client.describe_cluster(ClusterId=cluster_id)["Cluster"] resp["Tags"].should.equal([]) + + +@mock_emr +def test_security_configurations(): + + client = boto3.client("emr", region_name="us-east-1") + + security_configuration_name = "MySecurityConfiguration" + + security_configuration = """ +{ + "EncryptionConfiguration": { + "AtRestEncryptionConfiguration": { + "S3EncryptionConfiguration": { + "EncryptionMode": "SSE-S3" + } + }, + "EnableInTransitEncryption": false, + "EnableAtRestEncryption": true + } +} + """.strip() + + resp = client.create_security_configuration( + Name=security_configuration_name, SecurityConfiguration=security_configuration + ) + + resp["Name"].should.equal(security_configuration_name) + resp["CreationDateTime"].should.be.a("datetime.datetime") + + resp = client.describe_security_configuration(Name=security_configuration_name) + resp["Name"].should.equal(security_configuration_name) + resp["SecurityConfiguration"].should.equal(security_configuration) + resp["CreationDateTime"].should.be.a("datetime.datetime") + + client.delete_security_configuration(Name=security_configuration_name) + + with pytest.raises(ClientError) as ex: + client.describe_security_configuration(Name=security_configuration_name) + ex.value.response["Error"]["Code"].should.equal("InvalidRequestException") + ex.value.response["Error"]["Message"].should.match( + r"Security configuration with name .* does not exist." + ) + + with pytest.raises(ClientError) as ex: + client.delete_security_configuration(Name=security_configuration_name) + ex.value.response["Error"]["Code"].should.equal("InvalidRequestException") + ex.value.response["Error"]["Message"].should.match( + r"Security configuration with name .* does not exist." + ) diff --git a/tests/test_emr/test_utils.py b/tests/test_emr/test_utils.py new file mode 100644 index 000000000..b836ebf48 --- /dev/null +++ b/tests/test_emr/test_utils.py @@ -0,0 +1,49 @@ +import pytest + +from moto.emr.utils import ReleaseLabel + + +def test_invalid_release_labels_raise_exception(): + invalid_releases = [ + "", + "0", + "1.0", + "emr-2.0", + ] + for invalid_release in invalid_releases: + with pytest.raises(ValueError): + ReleaseLabel(invalid_release) + + +def test_release_label_comparisons(): + assert str(ReleaseLabel("emr-5.1.2")) == "emr-5.1.2" + + assert ReleaseLabel("emr-5.0.0") != ReleaseLabel("emr-5.0.1") + assert ReleaseLabel("emr-5.0.0") == ReleaseLabel("emr-5.0.0") + + assert ReleaseLabel("emr-5.31.0") > ReleaseLabel("emr-5.7.0") + assert ReleaseLabel("emr-6.0.0") > ReleaseLabel("emr-5.7.0") + + assert ReleaseLabel("emr-5.7.0") < ReleaseLabel("emr-5.10.0") + assert ReleaseLabel("emr-5.10.0") < ReleaseLabel("emr-5.10.1") + + assert ReleaseLabel("emr-5.60.0") >= ReleaseLabel("emr-5.7.0") + assert ReleaseLabel("emr-6.0.0") >= ReleaseLabel("emr-6.0.0") + + assert ReleaseLabel("emr-5.7.0") <= ReleaseLabel("emr-5.17.0") + assert ReleaseLabel("emr-5.7.0") <= ReleaseLabel("emr-5.7.0") + + releases_unsorted = [ + ReleaseLabel("emr-5.60.2"), + ReleaseLabel("emr-4.0.1"), + ReleaseLabel("emr-4.0.0"), + ReleaseLabel("emr-5.7.3"), + ] + releases_sorted = [str(label) for label in sorted(releases_unsorted)] + expected = [ + "emr-4.0.0", + "emr-4.0.1", + "emr-5.7.3", + "emr-5.60.2", + ] + assert releases_sorted == expected diff --git a/tests/test_events/__init__.py b/tests/test_events/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_events/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 14d872806..3719692f8 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -1,12 +1,17 @@ -import random -import boto3 import json +import random +import unittest + +import boto3 import sure # noqa -from moto.events import mock_events from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest + from moto.core import ACCOUNT_ID +from moto.core.exceptions import JsonRESTError +from moto.events import mock_events +from moto.events.models import EventsBackend RULES = [ {"Name": "test1", "ScheduleExpression": "rate(5 minutes)"}, @@ -72,6 +77,30 @@ def generate_environment(): return client +@mock_events +def test_put_rule(): + client = boto3.client("events", "us-west-2") + client.list_rules()["Rules"].should.have.length_of(0) + + rule_data = { + "Name": "my-event", + "ScheduleExpression": "rate(5 minutes)", + "EventPattern": '{"source": ["test-source"]}', + "EventBusName": "test-bus", + } + + client.put_rule(**rule_data) + + rules = client.list_rules()["Rules"] + + rules.should.have.length_of(1) + rules[0]["Name"].should.equal(rule_data["Name"]) + rules[0]["ScheduleExpression"].should.equal(rule_data["ScheduleExpression"]) + rules[0]["EventPattern"].should.equal(rule_data["EventPattern"]) + rules[0]["EventBusName"].should.equal(rule_data["EventBusName"]) + rules[0]["State"].should.equal("ENABLED") + + @mock_events def test_list_rules(): client = generate_environment() @@ -136,14 +165,6 @@ def test_list_rule_names_by_target(): assert rule in test_2_target["Rules"] -@mock_events -def test_list_rules(): - client = generate_environment() - - rules = client.list_rules() - assert len(rules["Rules"]) == len(RULES) - - @mock_events def test_delete_rule(): client = generate_environment() @@ -176,13 +197,56 @@ def test_remove_targets(): targets_before = len(targets) assert targets_before > 0 - client.remove_targets(Rule=rule_name, Ids=[targets[0]["Id"]]) + response = client.remove_targets(Rule=rule_name, Ids=[targets[0]["Id"]]) + response["FailedEntryCount"].should.equal(0) + response["FailedEntries"].should.have.length_of(0) targets = client.list_targets_by_rule(Rule=rule_name)["Targets"] targets_after = len(targets) assert targets_before - 1 == targets_after +@mock_events +def test_remove_targets_errors(): + client = boto3.client("events", "us-east-1") + + client.remove_targets.when.called_with( + Rule="non-existent", Ids=["Id12345678"] + ).should.throw( + client.exceptions.ResourceNotFoundException, + "An entity that you specified does not exist", + ) + + +@mock_events +def test_put_targets(): + client = boto3.client("events", "us-west-2") + rule_name = "my-event" + rule_data = { + "Name": rule_name, + "ScheduleExpression": "rate(5 minutes)", + "EventPattern": '{"source": ["test-source"]}', + } + + client.put_rule(**rule_data) + + targets = client.list_targets_by_rule(Rule=rule_name)["Targets"] + targets_before = len(targets) + assert targets_before == 0 + + targets_data = [{"Arn": "test_arn", "Id": "test_id"}] + resp = client.put_targets(Rule=rule_name, Targets=targets_data) + assert resp["FailedEntryCount"] == 0 + assert len(resp["FailedEntries"]) == 0 + + targets = client.list_targets_by_rule(Rule=rule_name)["Targets"] + targets_after = len(targets) + assert targets_before + 1 == targets_after + + assert targets[0]["Arn"] == "test_arn" + assert targets[0]["Id"] == "test_id" + + @mock_events def test_permissions(): client = boto3.client("events", "eu-central-1") @@ -264,10 +328,12 @@ def test_put_events(): "DetailType": "myDetailType", } - client.put_events(Entries=[event]) + response = client.put_events(Entries=[event]) # Boto3 would error if it didn't return 200 OK + response["FailedEntryCount"].should.equal(0) + response["Entries"].should.have.length_of(1) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.put_events(Entries=[event] * 20) @@ -461,3 +527,50 @@ def test_delete_event_bus_errors(): client.delete_event_bus.when.called_with(Name="default").should.throw( ClientError, "Cannot delete event bus default." ) + + +@mock_events +def test_rule_tagging_happy(): + client = generate_environment() + rule_name = get_random_rule()["Name"] + rule_arn = client.describe_rule(Name=rule_name).get("Arn") + + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + client.tag_resource(ResourceARN=rule_arn, Tags=tags) + + actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags") + tc = unittest.TestCase("__init__") + expected = [{"Value": "value1", "Key": "key1"}, {"Value": "value2", "Key": "key2"}] + tc.assertTrue( + (expected[0] == actual[0] and expected[1] == actual[1]) + or (expected[1] == actual[0] and expected[0] == actual[1]) + ) + + client.untag_resource(ResourceARN=rule_arn, TagKeys=["key1"]) + + actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags") + expected = [{"Key": "key2", "Value": "value2"}] + assert expected == actual + + +@mock_events +def test_rule_tagging_sad(): + back_end = EventsBackend("us-west-2") + + try: + back_end.tag_resource("unknown", []) + raise "tag_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass + + try: + back_end.untag_resource("unknown", []) + raise "untag_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass + + try: + back_end.list_tags_for_resource("unknown") + raise "list_tags_for_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass diff --git a/tests/test_forecast/__init__.py b/tests/test_forecast/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_forecast/test_forecast.py b/tests/test_forecast/test_forecast.py new file mode 100644 index 000000000..7936a500d --- /dev/null +++ b/tests/test_forecast/test_forecast.py @@ -0,0 +1,218 @@ +from __future__ import unicode_literals + +import boto3 +import pytest +import sure # noqa +from botocore.exceptions import ClientError +from moto import mock_forecast +from moto.core import ACCOUNT_ID + +region = "us-east-1" +account_id = None +valid_domains = [ + "RETAIL", + "CUSTOM", + "INVENTORY_PLANNING", + "EC2_CAPACITY", + "WORK_FORCE", + "WEB_TRAFFIC", + "METRICS", +] + + +@pytest.mark.parametrize("domain", valid_domains) +@mock_forecast +def test_forecast_dataset_group_create(domain): + name = "example_dataset_group" + client = boto3.client("forecast", region_name=region) + response = client.create_dataset_group(DatasetGroupName=name, Domain=domain) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response["DatasetGroupArn"].should.equal( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + name + ) + + +@mock_forecast +def test_forecast_dataset_group_create_invalid_domain(): + name = "example_dataset_group" + client = boto3.client("forecast", region_name=region) + invalid_domain = "INVALID" + + with pytest.raises(ClientError) as exc: + client.create_dataset_group(DatasetGroupName=name, Domain=invalid_domain) + exc.value.response["Error"]["Code"].should.equal("ValidationException") + exc.value.response["Error"]["Message"].should.equal( + "1 validation error detected: Value '" + + invalid_domain + + "' at 'domain' failed to satisfy constraint: Member must satisfy enum value set ['INVENTORY_PLANNING', 'METRICS', 'RETAIL', 'EC2_CAPACITY', 'CUSTOM', 'WEB_TRAFFIC', 'WORK_FORCE']" + ) + + +@pytest.mark.parametrize("name", [" ", "a" * 64]) +@mock_forecast +def test_forecast_dataset_group_create_invalid_name(name): + client = boto3.client("forecast", region_name=region) + + with pytest.raises(ClientError) as exc: + client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") + exc.value.response["Error"]["Code"].should.equal("ValidationException") + exc.value.response["Error"]["Message"].should.contain( + "1 validation error detected: Value '" + + name + + "' at 'datasetGroupName' failed to satisfy constraint: Member must" + ) + + +@mock_forecast +def test_forecast_dataset_group_create_duplicate_fails(): + client = boto3.client("forecast", region_name=region) + client.create_dataset_group(DatasetGroupName="name", Domain="RETAIL") + + with pytest.raises(ClientError) as exc: + client.create_dataset_group(DatasetGroupName="name", Domain="RETAIL") + + exc.value.response["Error"]["Code"].should.equal("ResourceAlreadyExistsException") + + +@mock_forecast +def test_forecast_dataset_group_list_default_empty(): + client = boto3.client("forecast", region_name=region) + + list = client.list_dataset_groups() + list["DatasetGroups"].should.be.empty + + +@mock_forecast +def test_forecast_dataset_group_list_some(): + client = boto3.client("forecast", region_name=region) + + client.create_dataset_group(DatasetGroupName="hello", Domain="CUSTOM") + result = client.list_dataset_groups() + + assert len(result["DatasetGroups"]) == 1 + result["DatasetGroups"][0]["DatasetGroupArn"].should.equal( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/hello" + ) + + +@mock_forecast +def test_forecast_delete_dataset_group(): + dataset_group_name = "name" + dataset_group_arn = ( + "arn:aws:forecast:" + + region + + ":" + + ACCOUNT_ID + + ":dataset-group/" + + dataset_group_name + ) + client = boto3.client("forecast", region_name=region) + client.create_dataset_group(DatasetGroupName=dataset_group_name, Domain="CUSTOM") + client.delete_dataset_group(DatasetGroupArn=dataset_group_arn) + + +@mock_forecast +def test_forecast_delete_dataset_group_missing(): + client = boto3.client("forecast", region_name=region) + missing_dsg_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/missing" + ) + + with pytest.raises(ClientError) as exc: + client.delete_dataset_group(DatasetGroupArn=missing_dsg_arn) + exc.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.value.response["Error"]["Message"].should.equal( + "No resource found " + missing_dsg_arn + ) + + +@mock_forecast +def test_forecast_update_dataset_arns_empty(): + dataset_group_name = "name" + dataset_group_arn = ( + "arn:aws:forecast:" + + region + + ":" + + ACCOUNT_ID + + ":dataset-group/" + + dataset_group_name + ) + client = boto3.client("forecast", region_name=region) + client.create_dataset_group(DatasetGroupName=dataset_group_name, Domain="CUSTOM") + client.update_dataset_group(DatasetGroupArn=dataset_group_arn, DatasetArns=[]) + + +@mock_forecast +def test_forecast_update_dataset_group_not_found(): + client = boto3.client("forecast", region_name=region) + dataset_group_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + "test" + ) + with pytest.raises(ClientError) as exc: + client.update_dataset_group(DatasetGroupArn=dataset_group_arn, DatasetArns=[]) + exc.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.value.response["Error"]["Message"].should.equal( + "No resource found " + dataset_group_arn + ) + + +@mock_forecast +def test_describe_dataset_group(): + name = "test" + client = boto3.client("forecast", region_name=region) + dataset_group_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + name + ) + client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") + result = client.describe_dataset_group(DatasetGroupArn=dataset_group_arn) + assert result.get("DatasetGroupArn") == dataset_group_arn + assert result.get("Domain") == "CUSTOM" + assert result.get("DatasetArns") == [] + + +@mock_forecast +def test_describe_dataset_group_missing(): + client = boto3.client("forecast", region_name=region) + dataset_group_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/name" + ) + with pytest.raises(ClientError) as exc: + client.describe_dataset_group(DatasetGroupArn=dataset_group_arn) + exc.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.value.response["Error"]["Message"].should.equal( + "No resource found " + dataset_group_arn + ) + + +@mock_forecast +def test_create_dataset_group_missing_datasets(): + client = boto3.client("forecast", region_name=region) + dataset_arn = "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset/name" + with pytest.raises(ClientError) as exc: + client.create_dataset_group( + DatasetGroupName="name", Domain="CUSTOM", DatasetArns=[dataset_arn] + ) + exc.value.response["Error"]["Code"].should.equal("InvalidInputException") + exc.value.response["Error"]["Message"].should.equal( + "Dataset arns: [" + dataset_arn + "] are not found" + ) + + +@mock_forecast +def test_update_dataset_group_missing_datasets(): + name = "test" + client = boto3.client("forecast", region_name=region) + dataset_group_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + name + ) + client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") + dataset_arn = "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset/name" + + with pytest.raises(ClientError) as exc: + client.update_dataset_group( + DatasetGroupArn=dataset_group_arn, DatasetArns=[dataset_arn] + ) + exc.value.response["Error"]["Code"].should.equal("InvalidInputException") + exc.value.response["Error"]["Message"].should.equal( + "Dataset arns: [" + dataset_arn + "] are not found" + ) diff --git a/tests/test_glacier/__init__.py b/tests/test_glacier/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_glacier/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_glacier/test_glacier_archives.py b/tests/test_glacier/test_glacier_archives.py index e8fa6045e..ec43e613c 100644 --- a/tests/test_glacier/test_glacier_archives.py +++ b/tests/test_glacier/test_glacier_archives.py @@ -1,21 +1,21 @@ -from __future__ import unicode_literals - -from tempfile import NamedTemporaryFile -import boto.glacier -import sure # noqa - -from moto import mock_glacier_deprecated - - -@mock_glacier_deprecated -def test_create_and_delete_archive(): - the_file = NamedTemporaryFile(delete=False) - the_file.write(b"some stuff") - the_file.close() - - conn = boto.glacier.connect_to_region("us-west-2") - vault = conn.create_vault("my_vault") - - archive_id = vault.upload_archive(the_file.name) - - vault.delete_archive(archive_id) +from __future__ import unicode_literals + +from tempfile import NamedTemporaryFile +import boto.glacier +import sure # noqa + +from moto import mock_glacier_deprecated + + +@mock_glacier_deprecated +def test_create_and_delete_archive(): + the_file = NamedTemporaryFile(delete=False) + the_file.write(b"some stuff") + the_file.close() + + conn = boto.glacier.connect_to_region("us-west-2") + vault = conn.create_vault("my_vault") + + archive_id = vault.upload_archive(the_file.name) + + vault.delete_archive(archive_id) diff --git a/tests/test_glacier/test_glacier_vaults.py b/tests/test_glacier/test_glacier_vaults.py index e64f40a90..93c79423e 100644 --- a/tests/test_glacier/test_glacier_vaults.py +++ b/tests/test_glacier/test_glacier_vaults.py @@ -1,31 +1,31 @@ -from __future__ import unicode_literals - -import boto.glacier -import sure # noqa - -from moto import mock_glacier_deprecated - - -@mock_glacier_deprecated -def test_create_vault(): - conn = boto.glacier.connect_to_region("us-west-2") - - conn.create_vault("my_vault") - - vaults = conn.list_vaults() - vaults.should.have.length_of(1) - vaults[0].name.should.equal("my_vault") - - -@mock_glacier_deprecated -def test_delete_vault(): - conn = boto.glacier.connect_to_region("us-west-2") - - conn.create_vault("my_vault") - - vaults = conn.list_vaults() - vaults.should.have.length_of(1) - - conn.delete_vault("my_vault") - vaults = conn.list_vaults() - vaults.should.have.length_of(0) +from __future__ import unicode_literals + +import boto.glacier +import sure # noqa + +from moto import mock_glacier_deprecated + + +@mock_glacier_deprecated +def test_create_vault(): + conn = boto.glacier.connect_to_region("us-west-2") + + conn.create_vault("my_vault") + + vaults = conn.list_vaults() + vaults.should.have.length_of(1) + vaults[0].name.should.equal("my_vault") + + +@mock_glacier_deprecated +def test_delete_vault(): + conn = boto.glacier.connect_to_region("us-west-2") + + conn.create_vault("my_vault") + + vaults = conn.list_vaults() + vaults.should.have.length_of(1) + + conn.delete_vault("my_vault") + vaults = conn.list_vaults() + vaults.should.have.length_of(0) diff --git a/tests/test_glacier/test_glacier_server.py b/tests/test_glacier/test_server.py similarity index 100% rename from tests/test_glacier/test_glacier_server.py rename to tests/test_glacier/test_server.py diff --git a/tests/test_glue/__init__.py b/tests/test_glue/__init__.py index baffc4882..78b780d97 100644 --- a/tests/test_glue/__init__.py +++ b/tests/test_glue/__init__.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_glue/fixtures/__init__.py b/tests/test_glue/fixtures/__init__.py index baffc4882..78b780d97 100644 --- a/tests/test_glue/fixtures/__init__.py +++ b/tests/test_glue/fixtures/__init__.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_glue/helpers.py b/tests/test_glue/helpers.py index 130a879bc..9003a1358 100644 --- a/tests/test_glue/helpers.py +++ b/tests/test_glue/helpers.py @@ -1,97 +1,97 @@ -from __future__ import unicode_literals - -import copy - -from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT - - -def create_database(client, database_name): - return client.create_database(DatabaseInput={"Name": database_name}) - - -def get_database(client, database_name): - return client.get_database(Name=database_name) - - -def create_table_input(database_name, table_name, columns=[], partition_keys=[]): - table_input = copy.deepcopy(TABLE_INPUT) - table_input["Name"] = table_name - table_input["PartitionKeys"] = partition_keys - table_input["StorageDescriptor"]["Columns"] = columns - table_input["StorageDescriptor"][ - "Location" - ] = "s3://my-bucket/{database_name}/{table_name}".format( - database_name=database_name, table_name=table_name - ) - return table_input - - -def create_table(client, database_name, table_name, table_input=None, **kwargs): - if table_input is None: - table_input = create_table_input(database_name, table_name, **kwargs) - - return client.create_table(DatabaseName=database_name, TableInput=table_input) - - -def update_table(client, database_name, table_name, table_input=None, **kwargs): - if table_input is None: - table_input = create_table_input(database_name, table_name, **kwargs) - - return client.update_table(DatabaseName=database_name, TableInput=table_input) - - -def get_table(client, database_name, table_name): - return client.get_table(DatabaseName=database_name, Name=table_name) - - -def get_tables(client, database_name): - return client.get_tables(DatabaseName=database_name) - - -def get_table_versions(client, database_name, table_name): - return client.get_table_versions(DatabaseName=database_name, TableName=table_name) - - -def get_table_version(client, database_name, table_name, version_id): - return client.get_table_version( - DatabaseName=database_name, TableName=table_name, VersionId=version_id - ) - - -def create_partition_input(database_name, table_name, values=[], columns=[]): - root_path = "s3://my-bucket/{database_name}/{table_name}".format( - database_name=database_name, table_name=table_name - ) - - part_input = copy.deepcopy(PARTITION_INPUT) - part_input["Values"] = values - part_input["StorageDescriptor"]["Columns"] = columns - part_input["StorageDescriptor"]["SerdeInfo"]["Parameters"]["path"] = root_path - return part_input - - -def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): - if partiton_input is None: - partiton_input = create_partition_input(database_name, table_name, **kwargs) - return client.create_partition( - DatabaseName=database_name, TableName=table_name, PartitionInput=partiton_input - ) - - -def update_partition( - client, database_name, table_name, old_values=[], partiton_input=None, **kwargs -): - if partiton_input is None: - partiton_input = create_partition_input(database_name, table_name, **kwargs) - return client.update_partition( - DatabaseName=database_name, - TableName=table_name, - PartitionInput=partiton_input, - PartitionValueList=old_values, - ) - - -def get_partition(client, database_name, table_name, values): - return client.get_partition( - DatabaseName=database_name, TableName=table_name, PartitionValues=values - ) +from __future__ import unicode_literals + +import copy + +from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT + + +def create_database(client, database_name): + return client.create_database(DatabaseInput={"Name": database_name}) + + +def get_database(client, database_name): + return client.get_database(Name=database_name) + + +def create_table_input(database_name, table_name, columns=[], partition_keys=[]): + table_input = copy.deepcopy(TABLE_INPUT) + table_input["Name"] = table_name + table_input["PartitionKeys"] = partition_keys + table_input["StorageDescriptor"]["Columns"] = columns + table_input["StorageDescriptor"][ + "Location" + ] = "s3://my-bucket/{database_name}/{table_name}".format( + database_name=database_name, table_name=table_name + ) + return table_input + + +def create_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.create_table(DatabaseName=database_name, TableInput=table_input) + + +def update_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.update_table(DatabaseName=database_name, TableInput=table_input) + + +def get_table(client, database_name, table_name): + return client.get_table(DatabaseName=database_name, Name=table_name) + + +def get_tables(client, database_name): + return client.get_tables(DatabaseName=database_name) + + +def get_table_versions(client, database_name, table_name): + return client.get_table_versions(DatabaseName=database_name, TableName=table_name) + + +def get_table_version(client, database_name, table_name, version_id): + return client.get_table_version( + DatabaseName=database_name, TableName=table_name, VersionId=version_id + ) + + +def create_partition_input(database_name, table_name, values=[], columns=[]): + root_path = "s3://my-bucket/{database_name}/{table_name}".format( + database_name=database_name, table_name=table_name + ) + + part_input = copy.deepcopy(PARTITION_INPUT) + part_input["Values"] = values + part_input["StorageDescriptor"]["Columns"] = columns + part_input["StorageDescriptor"]["SerdeInfo"]["Parameters"]["path"] = root_path + return part_input + + +def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.create_partition( + DatabaseName=database_name, TableName=table_name, PartitionInput=partiton_input + ) + + +def update_partition( + client, database_name, table_name, old_values=[], partiton_input=None, **kwargs +): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.update_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input, + PartitionValueList=old_values, + ) + + +def get_partition(client, database_name, table_name, values): + return client.get_partition( + DatabaseName=database_name, TableName=table_name, PartitionValues=values + ) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 31731e598..38a3831d5 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals import sure # noqa import re -from nose.tools import assert_raises +import pytest import boto3 from botocore.client import ClientError @@ -32,10 +32,10 @@ def test_create_database_already_exists(): database_name = "cantcreatethisdatabasetwice" helpers.create_database(client, database_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.create_database(client, database_name) - exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") + exc.value.response["Error"]["Code"].should.equal("AlreadyExistsException") @mock_glue @@ -43,15 +43,38 @@ def test_get_database_not_exits(): client = boto3.client("glue", region_name="us-east-1") database_name = "nosuchdatabase" - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_database(client, database_name) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Database nosuchdatabase not found" ) +@mock_glue +def test_get_databases_empty(): + client = boto3.client("glue", region_name="us-east-1") + response = client.get_databases() + response["DatabaseList"].should.have.length_of(0) + + +@mock_glue +def test_get_databases_several_items(): + client = boto3.client("glue", region_name="us-east-1") + database_name_1, database_name_2 = "firstdatabase", "seconddatabase" + + helpers.create_database(client, database_name_1) + helpers.create_database(client, database_name_2) + + database_list = sorted( + client.get_databases()["DatabaseList"], key=lambda x: x["Name"] + ) + database_list.should.have.length_of(2) + database_list[0].should.equal({"Name": database_name_1}) + database_list[1].should.equal({"Name": database_name_2}) + + @mock_glue def test_create_table(): client = boto3.client("glue", region_name="us-east-1") @@ -79,10 +102,10 @@ def test_create_table_already_exists(): table_name = "cantcreatethistabletwice" helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.create_table(client, database_name, table_name) - exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") + exc.value.response["Error"]["Code"].should.equal("AlreadyExistsException") @mock_glue @@ -169,11 +192,11 @@ def test_get_table_version_not_found(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table_version(client, database_name, "myfirsttable", "20") - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match("version", re.I) + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("version", re.I) @mock_glue @@ -184,10 +207,10 @@ def test_get_table_version_invalid_input(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table_version(client, database_name, "myfirsttable", "10not-an-int") - exc.exception.response["Error"]["Code"].should.equal("InvalidInputException") + exc.value.response["Error"]["Code"].should.equal("InvalidInputException") @mock_glue @@ -196,13 +219,11 @@ def test_get_table_not_exits(): database_name = "myspecialdatabase" helpers.create_database(client, database_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, "myfirsttable") - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( - "Table myfirsttable not found" - ) + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("Table myfirsttable not found") @mock_glue @@ -210,11 +231,11 @@ def test_get_table_when_database_not_exits(): client = boto3.client("glue", region_name="us-east-1") database_name = "nosuchdatabase" - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, "myfirsttable") - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Database nosuchdatabase not found" ) @@ -233,11 +254,11 @@ def test_delete_table(): result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) # confirm table is deleted - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, table_name) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Table myspecialtable not found" ) @@ -258,11 +279,11 @@ def test_batch_delete_table(): result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) # confirm table is deleted - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, table_name) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Table myspecialtable not found" ) @@ -327,10 +348,10 @@ def test_create_partition_already_exist(): helpers.create_partition(client, database_name, table_name, values=values) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.create_partition(client, database_name, table_name, values=values) - exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") + exc.value.response["Error"]["Code"].should.equal("AlreadyExistsException") @mock_glue @@ -343,11 +364,11 @@ def test_get_partition_not_found(): helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_partition(client, database_name, table_name, values) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match("partition") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("partition") @mock_glue @@ -519,7 +540,7 @@ def test_update_partition_not_found_moving(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.update_partition( client, database_name, @@ -528,8 +549,8 @@ def test_update_partition_not_found_moving(): values=["2018-10-02"], ) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match("partition") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("partition") @mock_glue @@ -542,13 +563,13 @@ def test_update_partition_not_found_change_in_place(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.update_partition( client, database_name, table_name, old_values=values, values=values ) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match("partition") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("partition") @mock_glue @@ -565,12 +586,12 @@ def test_update_partition_cannot_overwrite(): helpers.create_partition(client, database_name, table_name, values=values[0]) helpers.create_partition(client, database_name, table_name, values=values[1]) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.update_partition( client, database_name, table_name, old_values=values[0], values=values[1] ) - exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") + exc.value.response["Error"]["Code"].should.equal("AlreadyExistsException") @mock_glue @@ -625,11 +646,11 @@ def test_update_partition_move(): columns=[{"Name": "country", "Type": "string"}], ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_partition(client, database_name, table_name, values) # Old partition shouldn't exist anymore - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") response = client.get_partition( DatabaseName=database_name, TableName=table_name, PartitionValues=new_values @@ -674,12 +695,12 @@ def test_delete_partition_bad_partition(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.delete_partition( DatabaseName=database_name, TableName=table_name, PartitionValues=values ) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") @mock_glue diff --git a/tests/test_iam/__init__.py b/tests/test_iam/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_iam/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 995895437..dd5a6991f 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -4,22 +4,21 @@ import json import boto import boto3 -import os +import csv import sure # noqa -import sys from boto.exception import BotoServerError from botocore.exceptions import ClientError -from dateutil.tz import tzutc -from moto import mock_iam, mock_iam_deprecated -from moto.iam.models import aws_managed_policies +from moto import mock_config, mock_iam, mock_iam_deprecated, settings from moto.core import ACCOUNT_ID -from nose.tools import assert_raises, assert_equals -from nose.tools import raises +from moto.iam.models import aws_managed_policies +from moto.backends import get_backend +import pytest from datetime import datetime from tests.helpers import requires_boto_gte from uuid import uuid4 +from six.moves.urllib import parse MOCK_CERT = """-----BEGIN CERTIFICATE----- @@ -93,7 +92,7 @@ def test_get_all_server_certs(): def test_get_server_cert_doesnt_exist(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_server_certificate("NonExistant") @@ -128,14 +127,14 @@ def test_delete_server_cert(): conn.upload_server_cert("certname", "certbody", "privatekey") conn.get_server_certificate("certname") conn.delete_server_cert("certname") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_server_certificate("certname") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.delete_server_cert("certname") @mock_iam_deprecated() -@raises(BotoServerError) +@pytest.mark.xfail(raises=BotoServerError) def test_get_role__should_throw__when_role_does_not_exist(): conn = boto.connect_iam() @@ -143,7 +142,7 @@ def test_get_role__should_throw__when_role_does_not_exist(): @mock_iam_deprecated() -@raises(BotoServerError) +@pytest.mark.xfail(raises=BotoServerError) def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): conn = boto.connect_iam() @@ -155,13 +154,13 @@ def test_create_role_and_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") conn.create_role( - "my-role", assume_role_policy_document="some policy", path="my-path" + "my-role", assume_role_policy_document="some policy", path="/my-path/" ) conn.add_role_to_instance_profile("my-profile", "my-role") role = conn.get_role("my-role") - role.path.should.equal("my-path") + role.path.should.equal("/my-path/") role.assume_role_policy_document.should.equal("some policy") profile = conn.get_instance_profile("my-profile") @@ -181,7 +180,7 @@ def test_create_role_and_instance_profile(): def test_create_instance_profile_should_throw_when_name_is_not_unique(): conn = boto3.client("iam", region_name="us-east-1") conn.create_instance_profile(InstanceProfileName="unique-instance-profile") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_instance_profile(InstanceProfileName="unique-instance-profile") @@ -204,6 +203,26 @@ def test_remove_role_from_instance_profile(): dict(profile.roles).should.be.empty +@mock_iam() +def test_delete_instance_profile(): + conn = boto3.client("iam", region_name="us-east-1") + conn.create_role( + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", + ) + conn.create_instance_profile(InstanceProfileName="my-profile") + conn.add_role_to_instance_profile( + InstanceProfileName="my-profile", RoleName="my-role" + ) + with pytest.raises(conn.exceptions.DeleteConflictException): + conn.delete_instance_profile(InstanceProfileName="my-profile") + conn.remove_role_from_instance_profile( + InstanceProfileName="my-profile", RoleName="my-role" + ) + conn.delete_instance_profile(InstanceProfileName="my-profile") + with pytest.raises(conn.exceptions.NoSuchEntityException): + profile = conn.get_instance_profile(InstanceProfileName="my-profile") + + @mock_iam() def test_get_login_profile(): conn = boto3.client("iam", region_name="us-east-1") @@ -233,62 +252,62 @@ def test_update_login_profile(): def test_delete_role(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.delete_role(RoleName="my-role") # Test deletion failure with a managed policy conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) response = conn.create_policy( PolicyName="my-managed-policy", PolicyDocument=MOCK_POLICY ) conn.attach_role_policy(PolicyArn=response["Policy"]["Arn"], RoleName="my-role") - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_role(RoleName="my-role") conn.detach_role_policy(PolicyArn=response["Policy"]["Arn"], RoleName="my-role") conn.delete_policy(PolicyArn=response["Policy"]["Arn"]) conn.delete_role(RoleName="my-role") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role(RoleName="my-role") # Test deletion failure with an inline policy conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.put_role_policy( - RoleName="my-role", PolicyName="my-role-policy", PolicyDocument=MOCK_POLICY + RoleName="my-role", PolicyName="my-role-policy", PolicyDocument=MOCK_POLICY, ) - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_role(RoleName="my-role") conn.delete_role_policy(RoleName="my-role", PolicyName="my-role-policy") conn.delete_role(RoleName="my-role") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role(RoleName="my-role") # Test deletion failure with attachment to an instance profile conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.create_instance_profile(InstanceProfileName="my-profile") conn.add_role_to_instance_profile( InstanceProfileName="my-profile", RoleName="my-role" ) - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_role(RoleName="my-role") conn.remove_role_from_instance_profile( InstanceProfileName="my-profile", RoleName="my-role" ) conn.delete_role(RoleName="my-role") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role(RoleName="my-role") # Test deletion with no conflicts conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.delete_role(RoleName="my-role") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role(RoleName="my-role") @@ -312,7 +331,7 @@ def test_list_instance_profiles_for_role(): conn = boto.connect_iam() conn.create_role( - role_name="my-role", assume_role_policy_document="some policy", path="my-path" + role_name="my-role", assume_role_policy_document="some policy", path="my-path", ) conn.create_role( role_name="my-role2", @@ -324,7 +343,7 @@ def test_list_instance_profiles_for_role(): profile_path_list = ["my-path", "my-path2"] for profile_count in range(0, 2): conn.create_instance_profile( - profile_name_list[profile_count], path=profile_path_list[profile_count] + profile_name_list[profile_count], path=profile_path_list[profile_count], ) for profile_count in range(0, 2): @@ -369,7 +388,7 @@ def test_list_role_policies(): role.policy_names.should.have.length_of(1) role.policy_names[0].should.equal("test policy 2") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.delete_role_policy("my-role", "test policy") @@ -390,9 +409,9 @@ def test_put_role_policy(): def test_get_role_policy(): conn = boto3.client("iam", region_name="us-east-1") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="my-path" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="my-path", ) - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role_policy(RoleName="my-role", PolicyName="does-not-exist") @@ -422,13 +441,13 @@ def test_create_policy_already_exists(): response = conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=MOCK_POLICY ) - with assert_raises(conn.exceptions.EntityAlreadyExistsException) as ex: + with pytest.raises(conn.exceptions.EntityAlreadyExistsException) as ex: response = conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=MOCK_POLICY ) - ex.exception.response["Error"]["Code"].should.equal("EntityAlreadyExists") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) - ex.exception.response["Error"]["Message"].should.contain("TestCreatePolicy") + ex.value.response["Error"]["Code"].should.equal("EntityAlreadyExists") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) + ex.value.response["Error"]["Message"].should.contain("TestCreatePolicy") @mock_iam @@ -447,7 +466,7 @@ def test_delete_policy(): @mock_iam def test_create_policy_versions(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestCreatePolicyVersion".format( ACCOUNT_ID @@ -488,7 +507,7 @@ def test_create_many_policy_versions(): ), PolicyDocument=MOCK_POLICY, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestCreateManyPolicyVersions".format( ACCOUNT_ID @@ -529,6 +548,59 @@ def test_set_default_policy_version(): versions.get("Versions")[2].get("Document").should.equal(json.loads(MOCK_POLICY_3)) versions.get("Versions")[2].get("IsDefaultVersion").should.be.ok + conn.set_default_policy_version( + PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( + ACCOUNT_ID + ), + VersionId="v1", + ) + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( + ACCOUNT_ID + ) + ) + versions.get("Versions")[0].get("Document").should.equal(json.loads(MOCK_POLICY)) + versions.get("Versions")[0].get("IsDefaultVersion").should.be.ok + versions.get("Versions")[1].get("Document").should.equal(json.loads(MOCK_POLICY_2)) + versions.get("Versions")[1].get("IsDefaultVersion").shouldnt.be.ok + versions.get("Versions")[2].get("Document").should.equal(json.loads(MOCK_POLICY_3)) + versions.get("Versions")[2].get("IsDefaultVersion").shouldnt.be.ok + + # Set default version for non-existing policy + conn.set_default_policy_version.when.called_with( + PolicyArn="arn:aws:iam::{}:policy/TestNonExistingPolicy".format(ACCOUNT_ID), + VersionId="v1", + ).should.throw( + ClientError, + "Policy arn:aws:iam::{}:policy/TestNonExistingPolicy not found".format( + ACCOUNT_ID + ), + ) + + # Set default version for incorrect version + conn.set_default_policy_version.when.called_with( + PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( + ACCOUNT_ID + ), + VersionId="wrong_version_id", + ).should.throw( + ClientError, + "Value 'wrong_version_id' at 'versionId' failed to satisfy constraint: Member must satisfy regular expression pattern: v[1-9][0-9]*(\.[A-Za-z0-9-]*)?", + ) + + # Set default version for non-existing version + conn.set_default_policy_version.when.called_with( + PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( + ACCOUNT_ID + ), + VersionId="v4", + ).should.throw( + ClientError, + "Policy arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion version v4 does not exist or is not attachable.".format( + ACCOUNT_ID + ), + ) + @mock_iam def test_get_policy(): @@ -566,7 +638,7 @@ def test_get_policy_version(): PolicyArn="arn:aws:iam::{}:policy/TestGetPolicyVersion".format(ACCOUNT_ID), PolicyDocument=MOCK_POLICY, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestGetPolicyVersion".format(ACCOUNT_ID), VersionId="v2-does-not-exist", @@ -588,7 +660,7 @@ def test_get_aws_managed_policy_version(): managed_policy_version_create_date = datetime.strptime( "2015-04-09T15:03:43+00:00", "%Y-%m-%dT%H:%M:%S+00:00" ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_policy_version( PolicyArn=managed_policy_arn, VersionId="v2-does-not-exist" ) @@ -606,7 +678,7 @@ def test_get_aws_managed_policy_v4_version(): managed_policy_version_create_date = datetime.strptime( "2018-10-08T21:33:45+00:00", "%Y-%m-%dT%H:%M:%S+00:00" ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_policy_version( PolicyArn=managed_policy_arn, VersionId="v2-does-not-exist" ) @@ -620,7 +692,7 @@ def test_get_aws_managed_policy_v4_version(): @mock_iam def test_list_policy_versions(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::{}:policy/TestListPolicyVersions".format(ACCOUNT_ID) ) @@ -656,7 +728,7 @@ def test_delete_policy_version(): PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format(ACCOUNT_ID), PolicyDocument=MOCK_POLICY, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format( ACCOUNT_ID @@ -681,7 +753,7 @@ def test_delete_default_policy_version(): PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format(ACCOUNT_ID), PolicyDocument=MOCK_POLICY_2, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format( ACCOUNT_ID @@ -694,14 +766,14 @@ def test_delete_default_policy_version(): def test_create_user(): conn = boto.connect_iam() conn.create_user("my-user") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.create_user("my-user") @mock_iam_deprecated() def test_get_user(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_user("my-user") conn.create_user("my-user") conn.get_user("my-user") @@ -710,13 +782,13 @@ def test_get_user(): @mock_iam() def test_update_user(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.update_user(UserName="my-user") conn.create_user(UserName="my-user") conn.update_user(UserName="my-user", NewPath="/new-path/", NewUserName="new-user") response = conn.get_user(UserName="new-user") response["User"].get("Path").should.equal("/new-path/") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName="my-user") @@ -740,6 +812,12 @@ def test_list_users(): user["Path"].should.equal("/") user["Arn"].should.equal("arn:aws:iam::{}:user/my-user".format(ACCOUNT_ID)) + conn.create_user(UserName="my-user-1", Path="myUser") + response = conn.list_users(PathPrefix="my") + user = response["Users"][0] + user["UserName"].should.equal("my-user-1") + user["Path"].should.equal("myUser") + @mock_iam() def test_user_policies(): @@ -767,11 +845,11 @@ def test_user_policies(): @mock_iam_deprecated() def test_create_login_profile(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.create_login_profile("my-user", "my-pass") conn.create_user("my-user") conn.create_login_profile("my-user", "my-pass") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.create_login_profile("my-user", "my-pass") @@ -779,7 +857,7 @@ def test_create_login_profile(): def test_delete_login_profile(): conn = boto.connect_iam() conn.create_user("my-user") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.delete_login_profile("my-user") conn.create_login_profile("my-user", "my-pass") conn.delete_login_profile("my-user") @@ -788,7 +866,7 @@ def test_delete_login_profile(): @mock_iam def test_create_access_key(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_access_key(UserName="my-user") conn.create_user(UserName="my-user") access_key = conn.create_access_key(UserName="my-user")["AccessKey"] @@ -820,22 +898,19 @@ def test_get_all_access_keys(): conn = boto.connect_iam() conn.create_user("my-user") response = conn.get_all_access_keys("my-user") - assert_equals( + assert ( response["list_access_keys_response"]["list_access_keys_result"][ "access_key_metadata" - ], - [], + ] + == [] ) conn.create_access_key("my-user") response = conn.get_all_access_keys("my-user") - assert_equals( - sorted( - response["list_access_keys_response"]["list_access_keys_result"][ - "access_key_metadata" - ][0].keys() - ), - sorted(["status", "create_date", "user_name", "access_key_id"]), - ) + assert sorted( + response["list_access_keys_response"]["list_access_keys_result"][ + "access_key_metadata" + ][0].keys() + ) == sorted(["status", "create_date", "user_name", "access_key_id"]) @mock_iam @@ -843,14 +918,11 @@ def test_list_access_keys(): conn = boto3.client("iam", region_name="us-east-1") conn.create_user(UserName="my-user") response = conn.list_access_keys(UserName="my-user") - assert_equals( - response["AccessKeyMetadata"], [], - ) + assert response["AccessKeyMetadata"] == [] access_key = conn.create_access_key(UserName="my-user")["AccessKey"] response = conn.list_access_keys(UserName="my-user") - assert_equals( - sorted(response["AccessKeyMetadata"][0].keys()), - sorted(["Status", "CreateDate", "UserName", "AccessKeyId"]), + assert sorted(response["AccessKeyMetadata"][0].keys()) == sorted( + ["Status", "CreateDate", "UserName", "AccessKeyId"] ) conn = boto3.client( "iam", @@ -859,9 +931,8 @@ def test_list_access_keys(): aws_secret_access_key=access_key["SecretAccessKey"], ) response = conn.list_access_keys() - assert_equals( - sorted(response["AccessKeyMetadata"][0].keys()), - sorted(["Status", "CreateDate", "UserName", "AccessKeyId"]), + assert sorted(response["AccessKeyMetadata"][0].keys()) == sorted( + ["Status", "CreateDate", "UserName", "AccessKeyId"] ) @@ -951,7 +1022,7 @@ def test_create_virtual_mfa_device_errors(): client.create_virtual_mfa_device.when.called_with( VirtualMFADeviceName="test-device" ).should.throw( - ClientError, "MFADevice entity at the same path and name already exists." + ClientError, "MFADevice entity at the same path and name already exists.", ) client.create_virtual_mfa_device.when.called_with( @@ -1111,7 +1182,7 @@ def test_enable_virtual_mfa_device(): @mock_iam_deprecated() def test_delete_user_deprecated(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.delete_user("my-user") conn.create_user("my-user") conn.delete_user("my-user") @@ -1120,7 +1191,7 @@ def test_delete_user_deprecated(): @mock_iam() def test_delete_user(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.delete_user(UserName="my-user") # Test deletion failure with a managed policy @@ -1129,30 +1200,30 @@ def test_delete_user(): PolicyName="my-managed-policy", PolicyDocument=MOCK_POLICY ) conn.attach_user_policy(PolicyArn=response["Policy"]["Arn"], UserName="my-user") - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_user(UserName="my-user") conn.detach_user_policy(PolicyArn=response["Policy"]["Arn"], UserName="my-user") conn.delete_policy(PolicyArn=response["Policy"]["Arn"]) conn.delete_user(UserName="my-user") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName="my-user") # Test deletion failure with an inline policy conn.create_user(UserName="my-user") conn.put_user_policy( - UserName="my-user", PolicyName="my-user-policy", PolicyDocument=MOCK_POLICY + UserName="my-user", PolicyName="my-user-policy", PolicyDocument=MOCK_POLICY, ) - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_user(UserName="my-user") conn.delete_user_policy(UserName="my-user", PolicyName="my-user-policy") conn.delete_user(UserName="my-user") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName="my-user") # Test deletion with no conflicts conn.create_user(UserName="my-user") conn.delete_user(UserName="my-user") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName="my-user") @@ -1182,7 +1253,7 @@ def test_boto3_generate_credential_report(): def test_get_credential_report(): conn = boto.connect_iam() conn.create_user("my-user") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_credential_report() result = conn.generate_credential_report() while ( @@ -1205,7 +1276,7 @@ def test_get_credential_report(): def test_boto3_get_credential_report(): conn = boto3.client("iam", region_name="us-east-1") conn.create_user(UserName="my-user") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_credential_report() result = conn.generate_credential_report() while result["State"] != "COMPLETE": @@ -1215,6 +1286,69 @@ def test_boto3_get_credential_report(): report.should.match(r".*my-user.*") +@mock_iam +def test_boto3_get_credential_report_content(): + conn = boto3.client("iam", region_name="us-east-1") + username = "my-user" + conn.create_user(UserName=username) + key1 = conn.create_access_key(UserName=username)["AccessKey"] + conn.update_access_key( + UserName=username, AccessKeyId=key1["AccessKeyId"], Status="Inactive" + ) + key1 = conn.create_access_key(UserName=username)["AccessKey"] + timestamp = datetime.utcnow() + if not settings.TEST_SERVER_MODE: + iam_backend = get_backend("iam")["global"] + iam_backend.users[username].access_keys[1].last_used = timestamp + with pytest.raises(ClientError): + conn.get_credential_report() + result = conn.generate_credential_report() + while result["State"] != "COMPLETE": + result = conn.generate_credential_report() + result = conn.get_credential_report() + report = result["Content"].decode("utf-8") + header = report.split("\n")[0] + header.should.equal( + "user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_1_last_used_date,access_key_1_last_used_region,access_key_1_last_used_service,access_key_2_active,access_key_2_last_rotated,access_key_2_last_used_date,access_key_2_last_used_region,access_key_2_last_used_service,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated" + ) + report_dict = csv.DictReader(report.split("\n")) + user = next(report_dict) + user["user"].should.equal("my-user") + user["access_key_1_active"].should.equal("false") + user["access_key_1_last_rotated"].should.match(timestamp.strftime("%Y-%m-%d")) + user["access_key_1_last_used_date"].should.equal("N/A") + user["access_key_2_active"].should.equal("true") + if not settings.TEST_SERVER_MODE: + user["access_key_2_last_used_date"].should.match(timestamp.strftime("%Y-%m-%d")) + else: + user["access_key_2_last_used_date"].should.equal("N/A") + + +@mock_iam +def test_get_access_key_last_used_when_used(): + iam = boto3.resource("iam", region_name="us-east-1") + client = iam.meta.client + username = "test-user" + iam.create_user(UserName=username) + with pytest.raises(ClientError): + client.get_access_key_last_used(AccessKeyId="non-existent-key-id") + create_key_response = client.create_access_key(UserName=username)["AccessKey"] + # Set last used date using the IAM backend. Moto currently does not have a mechanism for tracking usage of access keys + if not settings.TEST_SERVER_MODE: + timestamp = datetime.utcnow() + iam_backend = get_backend("iam")["global"] + iam_backend.users[username].access_keys[0].last_used = timestamp + resp = client.get_access_key_last_used( + AccessKeyId=create_key_response["AccessKeyId"] + ) + if not settings.TEST_SERVER_MODE: + datetime.strftime( + resp["AccessKeyLastUsed"]["LastUsedDate"], "%Y-%m-%d" + ).should.equal(timestamp.strftime("%Y-%m-%d")) + else: + resp["AccessKeyLastUsed"].should_not.contain("LastUsedDate") + + @requires_boto_gte("2.39") @mock_iam_deprecated() def test_managed_policy(): @@ -1262,7 +1396,7 @@ def test_managed_policy(): role_name = "my-role" conn.create_role( - role_name, assume_role_policy_document={"policy": "test"}, path="my-path" + role_name, assume_role_policy_document={"policy": "test"}, path="my-path", ) for policy_name in [ "AmazonElasticMapReduceRole", @@ -1289,7 +1423,7 @@ def test_managed_policy(): ].should.have.length_of(2) conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name, ) rows = conn.list_policies(only_attached=True)["list_policies_response"][ "list_policies_result" @@ -1308,12 +1442,13 @@ def test_managed_policy(): "attached_policies" ].should.have.length_of(1) - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name, ) - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.detach_role_policy("arn:aws:iam::aws:policy/Nonexistent", role_name) @@ -1321,13 +1456,13 @@ def test_managed_policy(): def test_boto3_create_login_profile(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_login_profile(UserName="my-user", Password="Password") conn.create_user(UserName="my-user") conn.create_login_profile(UserName="my-user", Password="Password") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_login_profile(UserName="my-user", Password="Password") @@ -1366,7 +1501,7 @@ def test_update_access_key(): client = iam.meta.client username = "test-user" iam.create_user(UserName=username) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.update_access_key( UserName=username, AccessKeyId="non-existent-key", Status="Inactive" ) @@ -1382,21 +1517,18 @@ def test_update_access_key(): @mock_iam -def test_get_access_key_last_used(): +def test_get_access_key_last_used_when_unused(): iam = boto3.resource("iam", region_name="us-east-1") client = iam.meta.client username = "test-user" iam.create_user(UserName=username) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_access_key_last_used(AccessKeyId="non-existent-key-id") create_key_response = client.create_access_key(UserName=username)["AccessKey"] resp = client.get_access_key_last_used( AccessKeyId=create_key_response["AccessKeyId"] ) - - datetime.strftime( - resp["AccessKeyLastUsed"]["LastUsedDate"], "%Y-%m-%d" - ).should.equal(datetime.strftime(datetime.utcnow(), "%Y-%m-%d")) + resp["AccessKeyLastUsed"].should_not.contain("LastUsedDate") resp["UserName"].should.equal(create_key_response["UserName"]) @@ -1429,9 +1561,9 @@ def test_get_ssh_public_key(): iam.create_user(UserName=username) public_key = MOCK_CERT - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_ssh_public_key( - UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Encoding="SSH" + UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Encoding="SSH", ) resp = client.upload_ssh_public_key(UserName=username, SSHPublicKeyBody=public_key) @@ -1470,9 +1602,9 @@ def test_update_ssh_public_key(): iam.create_user(UserName=username) public_key = MOCK_CERT - with assert_raises(ClientError): + with pytest.raises(ClientError): client.update_ssh_public_key( - UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Status="Inactive" + UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Status="Inactive", ) resp = client.upload_ssh_public_key(UserName=username, SSHPublicKeyBody=public_key) @@ -1497,7 +1629,7 @@ def test_delete_ssh_public_key(): iam.create_user(UserName=username) public_key = MOCK_CERT - with assert_raises(ClientError): + with pytest.raises(ClientError): client.delete_ssh_public_key( UserName=username, SSHPublicKeyId="xxnon-existent-keyxx" ) @@ -1550,7 +1682,7 @@ def test_get_account_authorization_details(): UserName="testUser", PolicyName="testPolicy", PolicyDocument=test_policy ) conn.put_group_policy( - GroupName="testGroup", PolicyName="testPolicy", PolicyDocument=test_policy + GroupName="testGroup", PolicyName="testPolicy", PolicyDocument=test_policy, ) conn.attach_user_policy( @@ -1608,11 +1740,15 @@ def test_get_account_authorization_details(): assert result["RoleDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["RoleDetailList"][0]["RolePolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["User"]) assert len(result["RoleDetailList"]) == 0 assert len(result["UserDetailList"]) == 1 assert len(result["UserDetailList"][0]["GroupList"]) == 1 + assert len(result["UserDetailList"][0]["UserPolicyList"]) == 1 assert len(result["UserDetailList"][0]["AttachedManagedPolicies"]) == 1 assert len(result["GroupDetailList"]) == 0 assert len(result["Policies"]) == 0 @@ -1623,6 +1759,9 @@ def test_get_account_authorization_details(): assert result["UserDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["UserDetailList"][0]["UserPolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["Group"]) assert len(result["RoleDetailList"]) == 0 @@ -1638,6 +1777,9 @@ def test_get_account_authorization_details(): assert result["GroupDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["GroupDetailList"][0]["GroupPolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["LocalManagedPolicy"]) assert len(result["RoleDetailList"]) == 0 @@ -1680,14 +1822,14 @@ def test_signing_certs(): assert resp["CertificateId"] # Upload a the cert with an invalid body: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.upload_signing_certificate( UserName="testing", CertificateBody="notacert" ) - assert ce.exception.response["Error"]["Code"] == "MalformedCertificate" + assert ce.value.response["Error"]["Code"] == "MalformedCertificate" # Upload with an invalid user: - with assert_raises(ClientError): + with pytest.raises(ClientError): client.upload_signing_certificate( UserName="notauser", CertificateBody=MOCK_CERT ) @@ -1697,17 +1839,17 @@ def test_signing_certs(): UserName="testing", CertificateId=cert_id, Status="Inactive" ) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.update_signing_certificate( UserName="notauser", CertificateId=cert_id, Status="Inactive" ) - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.update_signing_certificate( UserName="testing", CertificateId="x" * 32, Status="Inactive" ) - assert ce.exception.response["Error"][ + assert ce.value.response["Error"][ "Message" ] == "The Certificate with id {id} cannot be found.".format(id="x" * 32) @@ -1717,13 +1859,13 @@ def test_signing_certs(): assert resp[0]["CertificateBody"] == MOCK_CERT assert resp[0]["Status"] == "Inactive" # Changed with the update call above. - with assert_raises(ClientError): + with pytest.raises(ClientError): client.list_signing_certificates(UserName="notauser") # Delete: client.delete_signing_certificate(UserName="testing", CertificateId=cert_id) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.delete_signing_certificate(UserName="notauser", CertificateId=cert_id) @@ -1774,10 +1916,10 @@ def test_delete_saml_provider(): conn.create_user(UserName="testing") cert_id = "123456789012345678901234" - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.delete_signing_certificate(UserName="testing", CertificateId=cert_id) - assert ce.exception.response["Error"][ + assert ce.value.response["Error"][ "Message" ] == "The Certificate with id {id} cannot be found.".format(id=cert_id) @@ -1835,20 +1977,20 @@ def test_create_role_with_tags(): # Test creating tags with invalid values: # With more than 50 tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: too_many_tags = list( map(lambda x: {"Key": str(x), "Value": str(x)}, range(0, 51)) ) conn.create_role( - RoleName="my-role3", AssumeRolePolicyDocument="{}", Tags=too_many_tags + RoleName="my-role3", AssumeRolePolicyDocument="{}", Tags=too_many_tags, ) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a duplicate tag: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -1856,11 +1998,11 @@ def test_create_role_with_tags(): ) assert ( "Duplicate tag keys found. Please note that Tag keys are case insensitive." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Duplicate tag with different casing: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -1868,11 +2010,11 @@ def test_create_role_with_tags(): ) assert ( "Duplicate tag keys found. Please note that Tag keys are case insensitive." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a really big key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -1880,11 +2022,11 @@ def test_create_role_with_tags(): ) assert ( "Member must have length less than or equal to 128." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a really big value: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -1892,11 +2034,11 @@ def test_create_role_with_tags(): ) assert ( "Member must have length less than or equal to 256." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With an invalid character: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -1904,7 +2046,7 @@ def test_create_role_with_tags(): ) assert ( "Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) @@ -1978,64 +2120,64 @@ def test_tag_role(): # Test creating tags with invalid values: # With more than 50 tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: too_many_tags = list( map(lambda x: {"Key": str(x), "Value": str(x)}, range(0, 51)) ) conn.tag_role(RoleName="my-role", Tags=too_many_tags) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a duplicate tag: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role( RoleName="my-role", Tags=[{"Key": "0", "Value": ""}, {"Key": "0", "Value": ""}], ) assert ( "Duplicate tag keys found. Please note that Tag keys are case insensitive." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Duplicate tag with different casing: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role( RoleName="my-role", Tags=[{"Key": "a", "Value": ""}, {"Key": "A", "Value": ""}], ) assert ( "Duplicate tag keys found. Please note that Tag keys are case insensitive." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a really big key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role(RoleName="my-role", Tags=[{"Key": "0" * 129, "Value": ""}]) assert ( "Member must have length less than or equal to 128." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a really big value: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role(RoleName="my-role", Tags=[{"Key": "0", "Value": "0" * 257}]) assert ( "Member must have length less than or equal to 256." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With an invalid character: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role(RoleName="my-role", Tags=[{"Key": "NOWAY!", "Value": ""}]) assert ( "Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a role that doesn't exist: - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.tag_role(RoleName="notarole", Tags=[{"Key": "some", "Value": "value"}]) @@ -2067,34 +2209,34 @@ def test_untag_role(): # Test removing tags with invalid values: # With more than 50 tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.untag_role(RoleName="my-role", TagKeys=[str(x) for x in range(0, 51)]) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert "tagKeys" in ce.exception.response["Error"]["Message"] + assert "tagKeys" in ce.value.response["Error"]["Message"] # With a really big key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.untag_role(RoleName="my-role", TagKeys=["0" * 129]) assert ( "Member must have length less than or equal to 128." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert "tagKeys" in ce.exception.response["Error"]["Message"] + assert "tagKeys" in ce.value.response["Error"]["Message"] # With an invalid character: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.untag_role(RoleName="my-role", TagKeys=["NOWAY!"]) assert ( "Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert "tagKeys" in ce.exception.response["Error"]["Message"] + assert "tagKeys" in ce.value.response["Error"]["Message"] # With a role that doesn't exist: - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.untag_role(RoleName="notarole", TagKeys=["somevalue"]) @@ -2102,11 +2244,11 @@ def test_untag_role(): def test_update_role_description(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) response = conn.update_role_description(RoleName="my-role", Description="test") @@ -2117,11 +2259,11 @@ def test_update_role_description(): def test_update_role(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) response = conn.update_role_description(RoleName="my-role", Description="test") assert response["Role"]["RoleName"] == "my-role" @@ -2131,11 +2273,11 @@ def test_update_role(): def test_update_role(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) response = conn.update_role(RoleName="my-role", Description="test") assert len(response.keys()) == 1 @@ -2145,7 +2287,7 @@ def test_update_role(): def test_update_role_defaults(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role( @@ -2176,7 +2318,7 @@ def test_list_entities_for_policy(): conn = boto3.client("iam", region_name="us-east-1") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.create_user(Path="/", UserName="testUser") conn.create_group(Path="/", GroupName="testGroup") @@ -2192,7 +2334,7 @@ def test_list_entities_for_policy(): UserName="testUser", PolicyName="testPolicy", PolicyDocument=test_policy ) conn.put_group_policy( - GroupName="testGroup", PolicyName="testPolicy", PolicyDocument=test_policy + GroupName="testGroup", PolicyName="testPolicy", PolicyDocument=test_policy, ) conn.attach_user_policy( @@ -2255,7 +2397,7 @@ def test_list_entities_for_policy(): def test_create_role_no_path(): conn = boto3.client("iam", region_name="us-east-1") resp = conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Description="test" + RoleName="my-role", AssumeRolePolicyDocument="some policy", Description="test", ) resp.get("Role").get("Arn").should.equal( "arn:aws:iam::{}:role/my-role".format(ACCOUNT_ID) @@ -2281,8 +2423,20 @@ def test_create_role_with_permissions_boundary(): resp.get("Role").get("PermissionsBoundary").should.equal(expected) resp.get("Role").get("Description").should.equal("test") + conn.delete_role_permissions_boundary(RoleName="my-role") + conn.list_roles().get("Roles")[0].should_not.have.key("PermissionsBoundary") + + conn.put_role_permissions_boundary(RoleName="my-role", PermissionsBoundary=boundary) + resp.get("Role").get("PermissionsBoundary").should.equal(expected) + invalid_boundary_arn = "arn:aws:iam::123456789:not_a_boundary" - with assert_raises(ClientError): + + with pytest.raises(ClientError): + conn.put_role_permissions_boundary( + RoleName="my-role", PermissionsBoundary=invalid_boundary_arn + ) + + with pytest.raises(ClientError): conn.create_role( RoleName="bad-boundary", AssumeRolePolicyDocument="some policy", @@ -2299,17 +2453,17 @@ def test_create_role_with_same_name_should_fail(): iam = boto3.client("iam", region_name="us-east-1") test_role_name = str(uuid4()) iam.create_role( - RoleName=test_role_name, AssumeRolePolicyDocument="policy", Description="test" + RoleName=test_role_name, AssumeRolePolicyDocument="policy", Description="test", ) # Create the role again, and verify that it fails - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: iam.create_role( RoleName=test_role_name, AssumeRolePolicyDocument="policy", Description="test", ) - err.exception.response["Error"]["Code"].should.equal("EntityAlreadyExists") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Code"].should.equal("EntityAlreadyExists") + err.value.response["Error"]["Message"].should.equal( "Role with name {0} already exists.".format(test_role_name) ) @@ -2320,10 +2474,10 @@ def test_create_policy_with_same_name_should_fail(): test_policy_name = str(uuid4()) policy = iam.create_policy(PolicyName=test_policy_name, PolicyDocument=MOCK_POLICY) # Create the role again, and verify that it fails - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: iam.create_policy(PolicyName=test_policy_name, PolicyDocument=MOCK_POLICY) - err.exception.response["Error"]["Code"].should.equal("EntityAlreadyExists") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Code"].should.equal("EntityAlreadyExists") + err.value.response["Error"]["Message"].should.equal( "A policy called {0} already exists. Duplicate names are not allowed.".format( test_policy_name ) @@ -2386,14 +2540,14 @@ def test_create_open_id_connect_provider_errors(): client.create_open_id_connect_provider.when.called_with( Url="http://example.org", - ThumbprintList=["a" * 40, "b" * 40, "c" * 40, "d" * 40, "e" * 40, "f" * 40], + ThumbprintList=["a" * 40, "b" * 40, "c" * 40, "d" * 40, "e" * 40, "f" * 40,], ).should.throw(ClientError, "Thumbprint list must contain fewer than 5 entries.") too_many_client_ids = ["{}".format(i) for i in range(101)] client.create_open_id_connect_provider.when.called_with( - Url="http://example.org", ThumbprintList=[], ClientIDList=too_many_client_ids + Url="http://example.org", ThumbprintList=[], ClientIDList=too_many_client_ids, ).should.throw( - ClientError, "Cannot exceed quota for ClientIdsPerOpenIdConnectProvider: 100" + ClientError, "Cannot exceed quota for ClientIdsPerOpenIdConnectProvider: 100", ) too_long_url = "b" * 256 @@ -2434,7 +2588,7 @@ def test_delete_open_id_connect_provider(): client.get_open_id_connect_provider.when.called_with( OpenIDConnectProviderArn=open_id_arn ).should.throw( - ClientError, "OpenIDConnect Provider not found for arn {}".format(open_id_arn) + ClientError, "OpenIDConnect Provider not found for arn {}".format(open_id_arn), ) # deleting a non existing provider should be successful @@ -2516,6 +2670,7 @@ def test_update_account_password_policy(): "RequireNumbers": False, "RequireSymbols": False, "RequireUppercaseCharacters": False, + "HardExpiry": False, } ) @@ -2525,7 +2680,7 @@ def test_update_account_password_policy_errors(): client = boto3.client("iam", region_name="us-east-1") client.update_account_password_policy.when.called_with( - MaxPasswordAge=1096, MinimumPasswordLength=129, PasswordReusePrevention=25 + MaxPasswordAge=1096, MinimumPasswordLength=129, PasswordReusePrevention=25, ).should.throw( ClientError, "3 validation errors detected: " @@ -2603,7 +2758,7 @@ def test_delete_account_password_policy_errors(): client = boto3.client("iam", region_name="us-east-1") client.delete_account_password_policy.when.called_with().should.throw( - ClientError, "The account policy with name PasswordPolicy cannot be found." + ClientError, "The account policy with name PasswordPolicy cannot be found.", ) @@ -2731,7 +2886,7 @@ def test_list_user_tags(): conn = boto3.client("iam", region_name="us-east-1") conn.create_user(UserName="kenny-bania") conn.create_user( - UserName="jackie-chiles", Tags=[{"Key": "Sue-Allen", "Value": "Oh-Henry"}] + UserName="jackie-chiles", Tags=[{"Key": "Sue-Allen", "Value": "Oh-Henry"}], ) conn.create_user( UserName="cosmo", @@ -2750,6 +2905,1117 @@ def test_list_user_tags(): response = conn.list_user_tags(UserName="cosmo") response["Tags"].should.equal( - [{"Key": "Stan", "Value": "The Caddy"}, {"Key": "like-a", "Value": "glove"}] + [{"Key": "Stan", "Value": "The Caddy"}, {"Key": "like-a", "Value": "glove"},] ) response["IsTruncated"].should_not.be.ok + + +@mock_iam() +def test_delete_role_with_instance_profiles_present(): + iam = boto3.client("iam", region_name="us-east-1") + + trust_policy = """ + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + """ + trust_policy = trust_policy.strip() + + iam.create_role(RoleName="Role1", AssumeRolePolicyDocument=trust_policy) + iam.create_instance_profile(InstanceProfileName="IP1") + iam.add_role_to_instance_profile(InstanceProfileName="IP1", RoleName="Role1") + + iam.create_role(RoleName="Role2", AssumeRolePolicyDocument=trust_policy) + + iam.delete_role(RoleName="Role2") + + role_names = [role["RoleName"] for role in iam.list_roles()["Roles"]] + assert "Role1" in role_names + assert "Role2" not in role_names + + +@mock_iam +def test_delete_account_password_policy_errors(): + client = boto3.client("iam", region_name="us-east-1") + + client.delete_account_password_policy.when.called_with().should.throw( + ClientError, "The account policy with name PasswordPolicy cannot be found.", + ) + + +@mock_iam +def test_role_list_config_discovered_resources(): + from moto.iam.config import role_config_query + from moto.iam.utils import random_resource_id + + # Without any roles + assert role_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + # Make 3 roles + roles = [] + num_roles = 3 + for ix in range(1, num_roles + 1): + this_role = role_config_query.backends["global"].create_role( + role_name="role{}".format(ix), + assume_role_policy_document=None, + path="/", + permissions_boundary=None, + description="role{}".format(ix), + tags=[{"Key": "foo", "Value": "bar"}], + max_session_duration=3600, + ) + roles.append( + {"id": this_role.id, "name": this_role.name,} + ) + + assert len(roles) == num_roles + + result = role_config_query.list_config_service_resources(None, None, 100, None)[0] + assert len(result) == num_roles + + # The roles gets a random ID, so we can't directly test it + role = result[0] + assert role["type"] == "AWS::IAM::Role" + assert role["id"] in list(map(lambda p: p["id"], roles)) + assert role["name"] in list(map(lambda p: p["name"], roles)) + assert role["region"] == "global" + + # test passing list of resource ids + resource_ids = role_config_query.list_config_service_resources( + [roles[0]["id"], roles[1]["id"]], None, 100, None + )[0] + assert len(resource_ids) == 2 + + # test passing a single resource name + resource_name = role_config_query.list_config_service_resources( + None, roles[0]["name"], 100, None + )[0] + assert len(resource_name) == 1 + assert resource_name[0]["id"] == roles[0]["id"] + assert resource_name[0]["name"] == roles[0]["name"] + + # test passing a single resource name AND some resource id's + both_filter_good = role_config_query.list_config_service_resources( + [roles[0]["id"], roles[1]["id"]], roles[0]["name"], 100, None + )[0] + assert len(both_filter_good) == 1 + assert both_filter_good[0]["id"] == roles[0]["id"] + assert both_filter_good[0]["name"] == roles[0]["name"] + + both_filter_bad = role_config_query.list_config_service_resources( + [roles[0]["id"], roles[1]["id"]], roles[2]["name"], 100, None + )[0] + assert len(both_filter_bad) == 0 + + +@mock_iam +def test_role_config_dict(): + from moto.iam.config import role_config_query, policy_config_query + from moto.iam.utils import random_resource_id, random_policy_id + + # Without any roles + assert not role_config_query.get_config_resource("something") + assert role_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + basic_assume_role = { + "Version": "2012-10-17", + "Statement": [ + {"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "sts:AssumeRole",} + ], + } + + basic_policy = { + "Version": "2012-10-17", + "Statement": [{"Action": ["ec2:*"], "Effect": "Allow", "Resource": "*"}], + } + + # Create a policy for use in role permissions boundary + policy_arn = ( + policy_config_query.backends["global"] + .create_policy( + description="basic_policy", + path="/", + policy_document=json.dumps(basic_policy), + policy_name="basic_policy", + ) + .arn + ) + + policy_id = policy_config_query.list_config_service_resources( + None, None, 100, None + )[0][0]["id"] + assert len(policy_id) == len(random_policy_id()) + + # Create some roles (and grab them repeatedly since they create with random names) + role_config_query.backends["global"].create_role( + role_name="plain_role", + assume_role_policy_document=None, + path="/", + permissions_boundary=None, + description="plain_role", + tags=[{"Key": "foo", "Value": "bar"}], + max_session_duration=3600, + ) + + plain_role = role_config_query.list_config_service_resources(None, None, 100, None)[ + 0 + ][0] + assert plain_role is not None + assert len(plain_role["id"]) == len(random_resource_id()) + + role_config_query.backends["global"].create_role( + role_name="assume_role", + assume_role_policy_document=json.dumps(basic_assume_role), + path="/", + permissions_boundary=None, + description="assume_role", + tags=[], + max_session_duration=3600, + ) + + assume_role = next( + role + for role in role_config_query.list_config_service_resources( + None, None, 100, None + )[0] + if role["id"] not in [plain_role["id"]] + ) + assert assume_role is not None + assert len(assume_role["id"]) == len(random_resource_id()) + assert assume_role["id"] is not plain_role["id"] + + role_config_query.backends["global"].create_role( + role_name="assume_and_permission_boundary_role", + assume_role_policy_document=json.dumps(basic_assume_role), + path="/", + permissions_boundary=policy_arn, + description="assume_and_permission_boundary_role", + tags=[], + max_session_duration=3600, + ) + + assume_and_permission_boundary_role = next( + role + for role in role_config_query.list_config_service_resources( + None, None, 100, None + )[0] + if role["id"] not in [plain_role["id"], assume_role["id"]] + ) + assert assume_and_permission_boundary_role is not None + assert len(assume_and_permission_boundary_role["id"]) == len(random_resource_id()) + assert assume_and_permission_boundary_role["id"] is not plain_role["id"] + assert assume_and_permission_boundary_role["id"] is not assume_role["id"] + + role_config_query.backends["global"].create_role( + role_name="role_with_attached_policy", + assume_role_policy_document=json.dumps(basic_assume_role), + path="/", + permissions_boundary=None, + description="role_with_attached_policy", + tags=[], + max_session_duration=3600, + ) + role_config_query.backends["global"].attach_role_policy( + policy_arn, "role_with_attached_policy" + ) + role_with_attached_policy = next( + role + for role in role_config_query.list_config_service_resources( + None, None, 100, None + )[0] + if role["id"] + not in [ + plain_role["id"], + assume_role["id"], + assume_and_permission_boundary_role["id"], + ] + ) + assert role_with_attached_policy is not None + assert len(role_with_attached_policy["id"]) == len(random_resource_id()) + assert role_with_attached_policy["id"] is not plain_role["id"] + assert role_with_attached_policy["id"] is not assume_role["id"] + assert ( + role_with_attached_policy["id"] is not assume_and_permission_boundary_role["id"] + ) + + role_config_query.backends["global"].create_role( + role_name="role_with_inline_policy", + assume_role_policy_document=json.dumps(basic_assume_role), + path="/", + permissions_boundary=None, + description="role_with_inline_policy", + tags=[], + max_session_duration=3600, + ) + role_config_query.backends["global"].put_role_policy( + "role_with_inline_policy", "inline_policy", json.dumps(basic_policy) + ) + + role_with_inline_policy = next( + role + for role in role_config_query.list_config_service_resources( + None, None, 100, None + )[0] + if role["id"] + not in [ + plain_role["id"], + assume_role["id"], + assume_and_permission_boundary_role["id"], + role_with_attached_policy["id"], + ] + ) + assert role_with_inline_policy is not None + assert len(role_with_inline_policy["id"]) == len(random_resource_id()) + assert role_with_inline_policy["id"] is not plain_role["id"] + assert role_with_inline_policy["id"] is not assume_role["id"] + assert ( + role_with_inline_policy["id"] is not assume_and_permission_boundary_role["id"] + ) + assert role_with_inline_policy["id"] is not role_with_attached_policy["id"] + + # plain role + plain_role_config = ( + role_config_query.backends["global"].roles[plain_role["id"]].to_config_dict() + ) + assert plain_role_config["version"] == "1.3" + assert plain_role_config["configurationItemStatus"] == "ResourceDiscovered" + assert plain_role_config["configurationStateId"] is not None + assert plain_role_config["arn"] == "arn:aws:iam::123456789012:role/plain_role" + assert plain_role_config["resourceType"] == "AWS::IAM::Role" + assert plain_role_config["resourceId"] == "plain_role" + assert plain_role_config["resourceName"] == "plain_role" + assert plain_role_config["awsRegion"] == "global" + assert plain_role_config["availabilityZone"] == "Not Applicable" + assert plain_role_config["resourceCreationTime"] is not None + assert plain_role_config["tags"] == {"foo": {"Key": "foo", "Value": "bar"}} + assert plain_role_config["configuration"]["path"] == "/" + assert plain_role_config["configuration"]["roleName"] == "plain_role" + assert plain_role_config["configuration"]["roleId"] == plain_role["id"] + assert plain_role_config["configuration"]["arn"] == plain_role_config["arn"] + assert plain_role_config["configuration"]["assumeRolePolicyDocument"] is None + assert plain_role_config["configuration"]["instanceProfileList"] == [] + assert plain_role_config["configuration"]["rolePolicyList"] == [] + assert plain_role_config["configuration"]["attachedManagedPolicies"] == [] + assert plain_role_config["configuration"]["permissionsBoundary"] is None + assert plain_role_config["configuration"]["tags"] == [ + {"key": "foo", "value": "bar"} + ] + assert plain_role_config["supplementaryConfiguration"] == {} + + # assume_role + assume_role_config = ( + role_config_query.backends["global"].roles[assume_role["id"]].to_config_dict() + ) + assert assume_role_config["arn"] == "arn:aws:iam::123456789012:role/assume_role" + assert assume_role_config["resourceId"] == "assume_role" + assert assume_role_config["resourceName"] == "assume_role" + assert assume_role_config["configuration"][ + "assumeRolePolicyDocument" + ] == parse.quote(json.dumps(basic_assume_role)) + + # assume_and_permission_boundary_role + assume_and_permission_boundary_role_config = ( + role_config_query.backends["global"] + .roles[assume_and_permission_boundary_role["id"]] + .to_config_dict() + ) + assert ( + assume_and_permission_boundary_role_config["arn"] + == "arn:aws:iam::123456789012:role/assume_and_permission_boundary_role" + ) + assert ( + assume_and_permission_boundary_role_config["resourceId"] + == "assume_and_permission_boundary_role" + ) + assert ( + assume_and_permission_boundary_role_config["resourceName"] + == "assume_and_permission_boundary_role" + ) + assert assume_and_permission_boundary_role_config["configuration"][ + "assumeRolePolicyDocument" + ] == parse.quote(json.dumps(basic_assume_role)) + assert ( + assume_and_permission_boundary_role_config["configuration"][ + "permissionsBoundary" + ] + == policy_arn + ) + + # role_with_attached_policy + role_with_attached_policy_config = ( + role_config_query.backends["global"] + .roles[role_with_attached_policy["id"]] + .to_config_dict() + ) + assert ( + role_with_attached_policy_config["arn"] + == "arn:aws:iam::123456789012:role/role_with_attached_policy" + ) + assert role_with_attached_policy_config["configuration"][ + "attachedManagedPolicies" + ] == [{"policyArn": policy_arn, "policyName": "basic_policy"}] + + # role_with_inline_policy + role_with_inline_policy_config = ( + role_config_query.backends["global"] + .roles[role_with_inline_policy["id"]] + .to_config_dict() + ) + assert ( + role_with_inline_policy_config["arn"] + == "arn:aws:iam::123456789012:role/role_with_inline_policy" + ) + assert role_with_inline_policy_config["configuration"]["rolePolicyList"] == [ + { + "policyName": "inline_policy", + "policyDocument": parse.quote(json.dumps(basic_policy)), + } + ] + + +@mock_iam +@mock_config +def test_role_config_client(): + from moto.iam.models import ACCOUNT_ID + from moto.iam.utils import random_resource_id + + CONFIG_REGIONS = boto3.Session().get_available_regions("config") + + iam_client = boto3.client("iam", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + all_account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + + two_region_account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AwsRegions": ["us-east-1", "us-west-2"], + } + + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="test_aggregator", + AccountAggregationSources=[all_account_aggregation_source], + ) + + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="test_aggregator_two_regions", + AccountAggregationSources=[two_region_account_aggregation_source], + ) + + result = config_client.list_discovered_resources(resourceType="AWS::IAM::Role") + assert not result["resourceIdentifiers"] + + # Make 10 policies + roles = [] + num_roles = 10 + for ix in range(1, num_roles + 1): + this_policy = iam_client.create_role( + RoleName="role{}".format(ix), + Path="/", + Description="role{}".format(ix), + AssumeRolePolicyDocument=json.dumps("{ }"), + ) + roles.append( + { + "id": this_policy["Role"]["RoleId"], + "name": this_policy["Role"]["RoleName"], + } + ) + + assert len(roles) == num_roles + + # Test non-aggregated query: (everything is getting a random id, so we can't test names by ordering) + result = config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", limit=1 + ) + first_result = result["resourceIdentifiers"][0]["resourceId"] + assert result["resourceIdentifiers"][0]["resourceType"] == "AWS::IAM::Role" + assert len(first_result) == len(random_resource_id()) + + # Test non-aggregated pagination + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", limit=1, nextToken=result["nextToken"], + )["resourceIdentifiers"][0]["resourceId"] + ) != first_result + + # Test aggregated query - by `Limit=len(CONFIG_REGIONS)`, we should get a single policy duplicated across all regions + agg_result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::IAM::Role", + ConfigurationAggregatorName="test_aggregator", + Limit=len(CONFIG_REGIONS), + ) + assert len(agg_result["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + + agg_name = None + agg_id = None + for resource in agg_result["ResourceIdentifiers"]: + assert resource["ResourceType"] == "AWS::IAM::Role" + assert resource["SourceRegion"] in CONFIG_REGIONS + assert resource["SourceAccountId"] == ACCOUNT_ID + if agg_id: + assert resource["ResourceId"] == agg_id + if agg_name: + assert resource["ResourceName"] == agg_name + agg_name = resource["ResourceName"] + agg_id = resource["ResourceId"] + + # Test aggregated pagination + for resource in config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + NextToken=agg_result["NextToken"], + )["ResourceIdentifiers"]: + assert resource["ResourceId"] != agg_id + + # Test non-aggregated resource name/id filter + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", resourceName=roles[1]["name"], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == roles[1]["name"] + ) + + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", resourceIds=[roles[0]["id"]], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == roles[0]["name"] + ) + + # Test aggregated resource name/id filter + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Filters={"ResourceName": roles[5]["name"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + assert agg_name_filter["ResourceIdentifiers"][0]["ResourceId"] == roles[5]["id"] + + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator_two_regions", + ResourceType="AWS::IAM::Role", + Filters={"ResourceName": roles[5]["name"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len( + two_region_account_aggregation_source["AwsRegions"] + ) + assert agg_name_filter["ResourceIdentifiers"][0]["ResourceId"] == roles[5]["id"] + + agg_id_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Filters={"ResourceId": roles[4]["id"]}, + ) + + assert len(agg_id_filter["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + assert agg_id_filter["ResourceIdentifiers"][0]["ResourceName"] == roles[4]["name"] + + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator_two_regions", + ResourceType="AWS::IAM::Role", + Filters={"ResourceId": roles[5]["id"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len( + two_region_account_aggregation_source["AwsRegions"] + ) + assert agg_name_filter["ResourceIdentifiers"][0]["ResourceName"] == roles[5]["name"] + + # Test non-aggregated resource name/id filter + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", resourceName=roles[1]["name"], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == roles[1]["name"] + ) + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", resourceIds=[roles[0]["id"]], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == roles[0]["name"] + ) + + # Test aggregated resource name/id filter + assert ( + config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Filters={"ResourceName": roles[5]["name"]}, + Limit=1, + )["ResourceIdentifiers"][0]["ResourceName"] + == roles[5]["name"] + ) + + assert ( + config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Filters={"ResourceId": roles[4]["id"]}, + Limit=1, + )["ResourceIdentifiers"][0]["ResourceName"] + == roles[4]["name"] + ) + + # Test name/id filter with pagination + first_call = config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", + resourceIds=[roles[1]["id"], roles[2]["id"]], + limit=1, + ) + + assert first_call["nextToken"] in [roles[1]["id"], roles[2]["id"]] + assert first_call["resourceIdentifiers"][0]["resourceName"] in [ + roles[1]["name"], + roles[2]["name"], + ] + second_call = config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", + resourceIds=[roles[1]["id"], roles[2]["id"]], + limit=1, + nextToken=first_call["nextToken"], + ) + assert "nextToken" not in second_call + assert first_call["resourceIdentifiers"][0]["resourceName"] in [ + roles[1]["name"], + roles[2]["name"], + ] + assert ( + first_call["resourceIdentifiers"][0]["resourceName"] + != second_call["resourceIdentifiers"][0]["resourceName"] + ) + + # Test non-aggregated batch get + assert ( + config_client.batch_get_resource_config( + resourceKeys=[ + {"resourceType": "AWS::IAM::Role", "resourceId": roles[0]["id"]} + ] + )["baseConfigurationItems"][0]["resourceName"] + == roles[0]["name"] + ) + + # Test aggregated batch get + assert ( + config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="test_aggregator", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": "us-east-1", + "ResourceId": roles[1]["id"], + "ResourceType": "AWS::IAM::Role", + } + ], + )["BaseConfigurationItems"][0]["resourceName"] + == roles[1]["name"] + ) + + +@mock_iam +def test_policy_list_config_discovered_resources(): + from moto.iam.config import policy_config_query + from moto.iam.utils import random_policy_id + + # Without any policies + assert policy_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + basic_policy = { + "Version": "2012-10-17", + "Statement": [ + {"Action": ["ec2:DeleteKeyPair"], "Effect": "Deny", "Resource": "*"} + ], + } + + # Make 3 policies + policies = [] + num_policies = 3 + for ix in range(1, num_policies + 1): + this_policy = policy_config_query.backends["global"].create_policy( + description="policy{}".format(ix), + path="", + policy_document=json.dumps(basic_policy), + policy_name="policy{}".format(ix), + ) + policies.append( + {"id": this_policy.id, "name": this_policy.name,} + ) + + assert len(policies) == num_policies + + # We expect the backend to have arns as their keys + for backend_key in list( + policy_config_query.backends["global"].managed_policies.keys() + ): + assert backend_key.startswith("arn:aws:iam::") + + result = policy_config_query.list_config_service_resources(None, None, 100, None)[0] + assert len(result) == num_policies + + policy = result[0] + assert policy["type"] == "AWS::IAM::Policy" + assert policy["id"] in list(map(lambda p: p["id"], policies)) + assert policy["name"] in list(map(lambda p: p["name"], policies)) + assert policy["region"] == "global" + + # test passing list of resource ids + resource_ids = policy_config_query.list_config_service_resources( + [policies[0]["id"], policies[1]["id"]], None, 100, None + )[0] + assert len(resource_ids) == 2 + + # test passing a single resource name + resource_name = policy_config_query.list_config_service_resources( + None, policies[0]["name"], 100, None + )[0] + assert len(resource_name) == 1 + assert resource_name[0]["id"] == policies[0]["id"] + assert resource_name[0]["name"] == policies[0]["name"] + + # test passing a single resource name AND some resource id's + both_filter_good = policy_config_query.list_config_service_resources( + [policies[0]["id"], policies[1]["id"]], policies[0]["name"], 100, None + )[0] + assert len(both_filter_good) == 1 + assert both_filter_good[0]["id"] == policies[0]["id"] + assert both_filter_good[0]["name"] == policies[0]["name"] + + both_filter_bad = policy_config_query.list_config_service_resources( + [policies[0]["id"], policies[1]["id"]], policies[2]["name"], 100, None + )[0] + assert len(both_filter_bad) == 0 + + +@mock_iam +def test_policy_config_dict(): + from moto.iam.config import role_config_query, policy_config_query + from moto.iam.utils import random_policy_id + + # Without any roles + assert not policy_config_query.get_config_resource( + "arn:aws:iam::123456789012:policy/basic_policy" + ) + assert policy_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + basic_policy = { + "Version": "2012-10-17", + "Statement": [{"Action": ["ec2:*"], "Effect": "Allow", "Resource": "*"}], + } + + basic_policy_v2 = { + "Version": "2012-10-17", + "Statement": [ + {"Action": ["ec2:*", "s3:*"], "Effect": "Allow", "Resource": "*"} + ], + } + + policy_arn = ( + policy_config_query.backends["global"] + .create_policy( + description="basic_policy", + path="/", + policy_document=json.dumps(basic_policy), + policy_name="basic_policy", + ) + .arn + ) + + policy_id = policy_config_query.list_config_service_resources( + None, None, 100, None + )[0][0]["id"] + assert len(policy_id) == len(random_policy_id()) + + assert policy_arn == "arn:aws:iam::123456789012:policy/basic_policy" + assert policy_config_query.get_config_resource(policy_id) is not None + + # Create a new version + policy_config_query.backends["global"].create_policy_version( + policy_arn, json.dumps(basic_policy_v2), "true" + ) + + # Create role to trigger attachment + role_config_query.backends["global"].create_role( + role_name="role_with_attached_policy", + assume_role_policy_document=None, + path="/", + permissions_boundary=None, + description="role_with_attached_policy", + tags=[], + max_session_duration=3600, + ) + role_config_query.backends["global"].attach_role_policy( + policy_arn, "role_with_attached_policy" + ) + + policy = ( + role_config_query.backends["global"] + .managed_policies["arn:aws:iam::123456789012:policy/basic_policy"] + .to_config_dict() + ) + assert policy["version"] == "1.3" + assert policy["configurationItemCaptureTime"] is not None + assert policy["configurationItemStatus"] == "OK" + assert policy["configurationStateId"] is not None + assert policy["arn"] == "arn:aws:iam::123456789012:policy/basic_policy" + assert policy["resourceType"] == "AWS::IAM::Policy" + assert len(policy["resourceId"]) == len(random_policy_id()) + assert policy["resourceName"] == "basic_policy" + assert policy["awsRegion"] == "global" + assert policy["availabilityZone"] == "Not Applicable" + assert policy["resourceCreationTime"] is not None + assert policy["configuration"]["policyName"] == policy["resourceName"] + assert policy["configuration"]["policyId"] == policy["resourceId"] + assert policy["configuration"]["arn"] == policy["arn"] + assert policy["configuration"]["path"] == "/" + assert policy["configuration"]["defaultVersionId"] == "v2" + assert policy["configuration"]["attachmentCount"] == 1 + assert policy["configuration"]["permissionsBoundaryUsageCount"] == 0 + assert policy["configuration"]["isAttachable"] == True + assert policy["configuration"]["description"] == "basic_policy" + assert policy["configuration"]["createDate"] is not None + assert policy["configuration"]["updateDate"] is not None + assert policy["configuration"]["policyVersionList"] == [ + { + "document": str(parse.quote(json.dumps(basic_policy))), + "versionId": "v1", + "isDefaultVersion": False, + "createDate": policy["configuration"]["policyVersionList"][0]["createDate"], + }, + { + "document": str(parse.quote(json.dumps(basic_policy_v2))), + "versionId": "v2", + "isDefaultVersion": True, + "createDate": policy["configuration"]["policyVersionList"][1]["createDate"], + }, + ] + assert policy["supplementaryConfiguration"] == {} + + +@mock_iam +@mock_config +def test_policy_config_client(): + from moto.iam.models import ACCOUNT_ID + from moto.iam.utils import random_policy_id + + CONFIG_REGIONS = boto3.Session().get_available_regions("config") + + basic_policy = { + "Version": "2012-10-17", + "Statement": [{"Action": ["ec2:*"], "Effect": "Allow", "Resource": "*"}], + } + + iam_client = boto3.client("iam", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + all_account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + + two_region_account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AwsRegions": ["us-east-1", "us-west-2"], + } + + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="test_aggregator", + AccountAggregationSources=[all_account_aggregation_source], + ) + + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="test_aggregator_two_regions", + AccountAggregationSources=[two_region_account_aggregation_source], + ) + + result = config_client.list_discovered_resources(resourceType="AWS::IAM::Policy") + assert not result["resourceIdentifiers"] + + # Make 10 policies + policies = [] + num_policies = 10 + for ix in range(1, num_policies + 1): + this_policy = iam_client.create_policy( + PolicyName="policy{}".format(ix), + Path="/", + PolicyDocument=json.dumps(basic_policy), + Description="policy{}".format(ix), + ) + policies.append( + { + "id": this_policy["Policy"]["PolicyId"], + "name": this_policy["Policy"]["PolicyName"], + } + ) + + assert len(policies) == num_policies + + # Test non-aggregated query: (everything is getting a random id, so we can't test names by ordering) + result = config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", limit=1 + ) + first_result = result["resourceIdentifiers"][0]["resourceId"] + assert result["resourceIdentifiers"][0]["resourceType"] == "AWS::IAM::Policy" + assert len(first_result) == len(random_policy_id()) + + # Test non-aggregated pagination + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", limit=1, nextToken=result["nextToken"], + )["resourceIdentifiers"][0]["resourceId"] + ) != first_result + + # Test aggregated query - by `Limit=len(CONFIG_REGIONS)`, we should get a single policy duplicated across all regions + agg_result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::IAM::Policy", + ConfigurationAggregatorName="test_aggregator", + Limit=len(CONFIG_REGIONS), + ) + assert len(agg_result["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + + agg_name = None + agg_id = None + for resource in agg_result["ResourceIdentifiers"]: + assert resource["ResourceType"] == "AWS::IAM::Policy" + assert resource["SourceRegion"] in CONFIG_REGIONS + assert resource["SourceAccountId"] == ACCOUNT_ID + if agg_id: + assert resource["ResourceId"] == agg_id + if agg_name: + assert resource["ResourceName"] == agg_name + agg_name = resource["ResourceName"] + agg_id = resource["ResourceId"] + + # Test aggregated pagination + for resource in config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Limit=1, + NextToken=agg_result["NextToken"], + )["ResourceIdentifiers"]: + assert resource["ResourceId"] != agg_id + + # Test non-aggregated resource name/id filter + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", resourceName=policies[1]["name"], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == policies[1]["name"] + ) + + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", resourceIds=[policies[0]["id"]], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == policies[0]["name"] + ) + + # Test aggregated resource name/id filter + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceName": policies[5]["name"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + assert ( + agg_name_filter["ResourceIdentifiers"][0]["ResourceName"] == policies[5]["name"] + ) + + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator_two_regions", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceName": policies[5]["name"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len( + two_region_account_aggregation_source["AwsRegions"] + ) + assert agg_name_filter["ResourceIdentifiers"][0]["ResourceId"] == policies[5]["id"] + + agg_id_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceId": policies[4]["id"]}, + ) + + assert len(agg_id_filter["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + assert ( + agg_id_filter["ResourceIdentifiers"][0]["ResourceName"] == policies[4]["name"] + ) + + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator_two_regions", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceId": policies[5]["id"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len( + two_region_account_aggregation_source["AwsRegions"] + ) + assert ( + agg_name_filter["ResourceIdentifiers"][0]["ResourceName"] == policies[5]["name"] + ) + + # Test name/id filter with pagination + first_call = config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", + resourceIds=[policies[1]["id"], policies[2]["id"]], + limit=1, + ) + + assert first_call["nextToken"] in [policies[1]["id"], policies[2]["id"]] + assert first_call["resourceIdentifiers"][0]["resourceName"] in [ + policies[1]["name"], + policies[2]["name"], + ] + second_call = config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", + resourceIds=[policies[1]["id"], policies[2]["id"]], + limit=1, + nextToken=first_call["nextToken"], + ) + assert "nextToken" not in second_call + assert first_call["resourceIdentifiers"][0]["resourceName"] in [ + policies[1]["name"], + policies[2]["name"], + ] + assert ( + first_call["resourceIdentifiers"][0]["resourceName"] + != second_call["resourceIdentifiers"][0]["resourceName"] + ) + + # Test non-aggregated batch get + assert ( + config_client.batch_get_resource_config( + resourceKeys=[ + {"resourceType": "AWS::IAM::Policy", "resourceId": policies[7]["id"],} + ] + )["baseConfigurationItems"][0]["resourceName"] + == policies[7]["name"] + ) + + # Test aggregated batch get + assert ( + config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="test_aggregator", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": "us-east-2", + "ResourceId": policies[8]["id"], + "ResourceType": "AWS::IAM::Policy", + } + ], + )["BaseConfigurationItems"][0]["resourceName"] + == policies[8]["name"] + ) + + +@mock_iam() +def test_list_roles_with_more_than_100_roles_no_max_items_defaults_to_100(): + iam = boto3.client("iam", region_name="us-east-1") + for i in range(150): + iam.create_role( + RoleName="test_role_{}".format(i), AssumeRolePolicyDocument="some policy" + ) + response = iam.list_roles() + roles = response["Roles"] + + assert response["IsTruncated"] is True + assert len(roles) == 100 + + +@mock_iam() +def test_list_roles_max_item_and_marker_values_adhered(): + iam = boto3.client("iam", region_name="us-east-1") + for i in range(10): + iam.create_role( + RoleName="test_role_{}".format(i), AssumeRolePolicyDocument="some policy" + ) + response = iam.list_roles(MaxItems=2) + roles = response["Roles"] + + assert response["IsTruncated"] is True + assert len(roles) == 2 + + response = iam.list_roles(Marker=response["Marker"]) + roles = response["Roles"] + + assert response["IsTruncated"] is False + assert len(roles) == 8 + + +@mock_iam() +def test_list_roles_path_prefix_value_adhered(): + iam = boto3.client("iam", region_name="us-east-1") + iam.create_role( + RoleName="test_role_without_path", AssumeRolePolicyDocument="some policy" + ) + iam.create_role( + RoleName="test_role_with_path", + AssumeRolePolicyDocument="some policy", + Path="/TestPath/", + ) + + response = iam.list_roles(PathPrefix="/TestPath/") + roles = response["Roles"] + + assert len(roles) == 1 + assert roles[0]["RoleName"] == "test_role_with_path" + + +@mock_iam() +def test_list_roles_none_found_returns_empty_list(): + iam = boto3.client("iam", region_name="us-east-1") + + response = iam.list_roles() + roles = response["Roles"] + assert len(roles) == 0 + + response = iam.list_roles(PathPrefix="/TestPath") + roles = response["Roles"] + assert len(roles) == 0 + + response = iam.list_roles(Marker="10") + roles = response["Roles"] + assert len(roles) == 0 + + response = iam.list_roles(MaxItems=10) + roles = response["Roles"] + assert len(roles) == 0 + + +@mock_iam() +def test_create_user_with_tags(): + conn = boto3.client("iam", region_name="us-east-1") + user_name = "test-user" + tags = [ + {"Key": "somekey", "Value": "somevalue"}, + {"Key": "someotherkey", "Value": "someothervalue"}, + ] + resp = conn.create_user(UserName=user_name, Tags=tags) + assert resp["User"]["Tags"] == tags + resp = conn.list_user_tags(UserName=user_name) + assert resp["Tags"] == tags + + resp = conn.create_user(UserName="test-create-user-no-tags") + assert "Tags" not in resp["User"] diff --git a/tests/test_iam/test_iam_cloudformation.py b/tests/test_iam/test_iam_cloudformation.py new file mode 100644 index 000000000..a50ed8234 --- /dev/null +++ b/tests/test_iam/test_iam_cloudformation.py @@ -0,0 +1,1196 @@ +import boto3 +import yaml +import sure # noqa + +import pytest +from botocore.exceptions import ClientError + +from moto import mock_iam, mock_cloudformation, mock_s3, mock_sts + +# AWS::IAM::User Tests +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_user(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + user_name = "MyUser" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + UserName: {0} +""".strip().format( + user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_resource["LogicalResourceId"].should.equal("TheUser") + provisioned_resource["PhysicalResourceId"].should.equal(user_name) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_user_no_interruption(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + user_name = provisioned_resource["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name)["User"] + user["Path"].should.equal("/") + + path = "/MyPath/" + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + Path: {0} +""".strip().format( + path + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + user = iam_client.get_user(UserName=user_name)["User"] + user["Path"].should.equal(path) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_user_replacement(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + original_user_name = provisioned_resource["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=original_user_name)["User"] + user["Path"].should.equal("/") + + new_user_name = "MyUser" + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + UserName: {0} +""".strip().format( + new_user_name + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + with pytest.raises(ClientError) as e: + iam_client.get_user(UserName=original_user_name) + e.value.response["Error"]["Code"].should.equal("NoSuchEntity") + + iam_client.get_user(UserName=new_user_name) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_drop_user(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheFirstUser: + Type: AWS::IAM::User + TheSecondUser: + Type: AWS::IAM::User +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + first_provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheFirstUser" + ][0] + second_provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheSecondUser" + ][0] + first_user_name = first_provisioned_user["PhysicalResourceId"] + second_user_name = second_provisioned_user["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + iam_client.get_user(UserName=first_user_name) + iam_client.get_user(UserName=second_user_name) + + template = """ +Resources: + TheSecondUser: + Type: AWS::IAM::User +""".strip() + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + len(provisioned_resources).should.equal(1) + second_provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheSecondUser" + ][0] + second_user_name.should.equal(second_provisioned_user["PhysicalResourceId"]) + + iam_client.get_user(UserName=second_user_name) + with pytest.raises(ClientError) as e: + iam_client.get_user(UserName=first_user_name) + e.value.response["Error"]["Code"].should.equal("NoSuchEntity") + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_user(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + user_name = "MyUser" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + UserName: {} +""".strip().format( + user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + + cf_client.delete_stack(StackName=stack_name) + + with pytest.raises(ClientError) as e: + user = iam_client.get_user(UserName=user_name) + e.value.response["Error"]["Code"].should.equal("NoSuchEntity") + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_user_having_generated_name(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_resource["LogicalResourceId"].should.equal("TheUser") + user_name = provisioned_resource["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + + cf_client.delete_stack(StackName=stack_name) + + with pytest.raises(ClientError) as e: + user = iam_client.get_user(UserName=user_name) + e.value.response["Error"]["Code"].should.equal("NoSuchEntity") + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_user_get_attr(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + user_name = "MyUser" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + UserName: {0} +Outputs: + UserName: + Value: !Ref TheUser + UserArn: + Value: !GetAtt TheUser.Arn +""".strip().format( + user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + output_user_name = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "UserName" + ][0] + output_user_arn = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "UserArn" + ][0] + + iam_client = boto3.client("iam", region_name="us-east-1") + user_description = iam_client.get_user(UserName=output_user_name)["User"] + output_user_arn.should.equal(user_description["Arn"]) + + +# AWS::IAM::Policy Tests +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_user_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + user_name = "MyUser" + iam_client.create_user(UserName=user_name) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + bucket = s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Users: + - {2} +""".strip().format( + policy_name, bucket_arn, user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_user_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + user_name_1 = "MyUser1" + iam_client.create_user(UserName=user_name_1) + user_name_2 = "MyUser2" + iam_client.create_user(UserName=user_name_2) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Users: + - {2} +""".strip().format( + policy_name, bucket_arn, user_name_1 + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_user_policy(UserName=user_name_1, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + # Change template and user + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:ListBuckets + Resource: {1} + Users: + - {2} +""".strip().format( + policy_name, bucket_arn, user_name_2 + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_user_policy(UserName=user_name_2, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + iam_client.get_user_policy.when.called_with( + UserName=user_name_1, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_user_policy_having_generated_name(): + iam_client = boto3.client("iam", region_name="us-east-1") + user_name = "MyUser" + iam_client.create_user(UserName=user_name) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + bucket = s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: MyPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {0} + Users: + - {1} +""".strip().format( + bucket_arn, user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + cf_client.delete_stack(StackName=stack_name) + iam_client.get_user_policy.when.called_with( + UserName=user_name, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_role_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + role_name = "MyRole" + iam_client.create_role(RoleName=role_name, AssumeRolePolicyDocument="{}") + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Roles: + - {2} +""".strip().format( + policy_name, bucket_arn, role_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_role_policy(RoleName=role_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_role_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + role_name_1 = "MyRole1" + iam_client.create_role(RoleName=role_name_1, AssumeRolePolicyDocument="{}") + role_name_2 = "MyRole2" + iam_client.create_role(RoleName=role_name_2, AssumeRolePolicyDocument="{}") + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Roles: + - {2} +""".strip().format( + policy_name, bucket_arn, role_name_1 + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_role_policy(RoleName=role_name_1, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + # Change template and user + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:ListBuckets + Resource: {1} + Roles: + - {2} +""".strip().format( + policy_name, bucket_arn, role_name_2 + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_role_policy(RoleName=role_name_2, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + iam_client.get_role_policy.when.called_with( + RoleName=role_name_1, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_role_policy_having_generated_name(): + iam_client = boto3.client("iam", region_name="us-east-1") + role_name = "MyRole" + iam_client.create_role(RoleName=role_name, AssumeRolePolicyDocument="{}") + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: MyPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {0} + Roles: + - {1} +""".strip().format( + bucket_arn, role_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_role_policy(RoleName=role_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + cf_client.delete_stack(StackName=stack_name) + iam_client.get_role_policy.when.called_with( + RoleName=role_name, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_group_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + group_name = "MyGroup" + iam_client.create_group(GroupName=group_name) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Groups: + - {2} +""".strip().format( + policy_name, bucket_arn, group_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_group_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + group_name_1 = "MyGroup1" + iam_client.create_group(GroupName=group_name_1) + group_name_2 = "MyGroup2" + iam_client.create_group(GroupName=group_name_2) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Groups: + - {2} +""".strip().format( + policy_name, bucket_arn, group_name_1 + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_group_policy(GroupName=group_name_1, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + # Change template and user + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:ListBuckets + Resource: {1} + Groups: + - {2} +""".strip().format( + policy_name, bucket_arn, group_name_2 + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_group_policy(GroupName=group_name_2, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + iam_client.get_group_policy.when.called_with( + GroupName=group_name_1, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_group_policy_having_generated_name(): + iam_client = boto3.client("iam", region_name="us-east-1") + group_name = "MyGroup" + iam_client.create_group(GroupName=group_name) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: MyPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {0} + Groups: + - {1} +""".strip().format( + bucket_arn, group_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + cf_client.delete_stack(StackName=stack_name) + iam_client.get_group_policy.when.called_with( + GroupName=group_name, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +# AWS::IAM::User AccessKeys +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_user_with_access_key(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_keys = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ] + len(provisioned_access_keys).should.equal(1) + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name)["User"] + user["UserName"].should.equal(user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_keys["AccessKeyMetadata"][0]["UserName"].should.equal(user_name) + + +@mock_sts +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_access_key_get_attr(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser +Outputs: + AccessKeyId: + Value: !Ref TheAccessKey + SecretKey: + Value: !GetAtt TheAccessKey.SecretAccessKey +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + stack_description = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + output_access_key_id = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "AccessKeyId" + ][0] + output_secret_key = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "SecretKey" + ][0] + + sts_client = boto3.client( + "sts", + aws_access_key_id=output_access_key_id, + aws_secret_access_key=output_secret_key, + region_name="us-east-1", + ) + caller_identity = sts_client.get_caller_identity() + caller_identity["Arn"].split("/")[1].should.equal(user_name) + pass + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_users_access_key(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ + Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser + """.strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_key = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ][0] + access_key_id = provisioned_access_key["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + + access_key_id.should.equal(access_keys["AccessKeyMetadata"][0]["AccessKeyId"]) + + cf_client.delete_stack(StackName=stack_name) + + iam_client.get_user.when.called_with(UserName=user_name).should.throw( + iam_client.exceptions.NoSuchEntityException + ) + iam_client.list_access_keys.when.called_with(UserName=user_name).should.throw( + iam_client.exceptions.NoSuchEntityException + ) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_users_access_key(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ + Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser + """.strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_keys = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ] + len(provisioned_access_keys).should.equal(1) + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name)["User"] + user["UserName"].should.equal(user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_keys["AccessKeyMetadata"][0]["UserName"].should.equal(user_name) + + cf_client.delete_stack(StackName=stack_name) + + iam_client.get_user.when.called_with(UserName=user_name).should.throw( + iam_client.exceptions.NoSuchEntityException + ) + iam_client.list_access_keys.when.called_with(UserName=user_name).should.throw( + iam_client.exceptions.NoSuchEntityException + ) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_users_access_key_no_interruption(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_key = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ][0] + access_key_id = provisioned_access_key["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_key_id.should.equal(access_keys["AccessKeyMetadata"][0]["AccessKeyId"]) + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + Status: Inactive +""".strip() + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_keys["AccessKeyMetadata"][0]["Status"].should.equal("Inactive") + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_users_access_key_replacement(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_key = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ][0] + access_key_id = provisioned_access_key["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_key_id.should.equal(access_keys["AccessKeyMetadata"][0]["AccessKeyId"]) + + other_user_name = "MyUser" + iam_client.create_user(UserName=other_user_name) + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: {0} +""".strip().format( + other_user_name + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + access_keys = iam_client.list_access_keys(UserName=user_name) + len(access_keys["AccessKeyMetadata"]).should.equal(0) + + access_keys = iam_client.list_access_keys(UserName=other_user_name) + access_key_id.should_not.equal(access_keys["AccessKeyMetadata"][0]["AccessKeyId"]) diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 64d838e2b..85464b44d 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -6,7 +6,7 @@ import boto import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from boto.exception import BotoServerError from botocore.exceptions import ClientError from moto import mock_iam, mock_iam_deprecated @@ -29,7 +29,7 @@ MOCK_POLICY = """ def test_create_group(): conn = boto.connect_iam() conn.create_group("my-group") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.create_group("my-group") @@ -38,7 +38,7 @@ def test_get_group(): conn = boto.connect_iam() conn.create_group("my-group") conn.get_group("my-group") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_group("not-group") @@ -77,10 +77,10 @@ def test_get_all_groups(): @mock_iam_deprecated() def test_add_user_to_group(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.add_user_to_group("my-group", "my-user") conn.create_group("my-group") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.add_user_to_group("my-group", "my-user") conn.create_user("my-user") conn.add_user_to_group("my-group", "my-user") @@ -89,11 +89,11 @@ def test_add_user_to_group(): @mock_iam_deprecated() def test_remove_user_from_group(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.remove_user_from_group("my-group", "my-user") conn.create_group("my-group") conn.create_user("my-user") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.remove_user_from_group("my-group", "my-user") conn.add_user_to_group("my-group", "my-user") conn.remove_user_from_group("my-group", "my-user") @@ -150,7 +150,7 @@ def test_attach_group_policies(): def test_get_group_policy(): conn = boto.connect_iam() conn.create_group("my-group") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_group_policy("my-group", "my-policy") conn.put_group_policy("my-group", "my-policy", MOCK_POLICY) @@ -199,9 +199,9 @@ def test_delete_group(): @mock_iam def test_delete_unknown_group(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: conn.delete_group(GroupName="unknown-group") - err.exception.response["Error"]["Code"].should.equal("NoSuchEntity") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Code"].should.equal("NoSuchEntity") + err.value.response["Error"]["Message"].should.equal( "The group with name unknown-group cannot be found." ) diff --git a/tests/test_iam/test_iam_policies.py b/tests/test_iam/test_iam_policies.py index 6348b0cba..96cd632c6 100644 --- a/tests/test_iam/test_iam_policies.py +++ b/tests/test_iam/test_iam_policies.py @@ -2,7 +2,8 @@ import json import boto3 from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest +import sure # noqa from moto import mock_iam @@ -1611,31 +1612,25 @@ valid_policy_documents = [ ] -def test_create_policy_with_invalid_policy_documents(): - for test_case in invalid_policy_document_test_cases: - yield check_create_policy_with_invalid_policy_document, test_case - - -def test_create_policy_with_valid_policy_documents(): - for valid_policy_document in valid_policy_documents: - yield check_create_policy_with_valid_policy_document, valid_policy_document - - +@pytest.mark.parametrize("invalid_policy_document", invalid_policy_document_test_cases) @mock_iam -def check_create_policy_with_invalid_policy_document(test_case): +def test_create_policy_with_invalid_policy_document(invalid_policy_document): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_policy( PolicyName="TestCreatePolicy", - PolicyDocument=json.dumps(test_case["document"]), + PolicyDocument=json.dumps(invalid_policy_document["document"]), ) - ex.exception.response["Error"]["Code"].should.equal("MalformedPolicyDocument") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal(test_case["error_message"]) + ex.value.response["Error"]["Code"].should.equal("MalformedPolicyDocument") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( + invalid_policy_document["error_message"] + ) +@pytest.mark.parametrize("valid_policy_document", valid_policy_documents) @mock_iam -def check_create_policy_with_valid_policy_document(valid_policy_document): +def test_create_policy_with_valid_policy_document(valid_policy_document): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=json.dumps(valid_policy_document) diff --git a/tests/test_iot/__init__.py b/tests/test_iot/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_iot/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 0c0623a6f..7a39e0987 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -6,7 +6,206 @@ import boto3 from moto import mock_iot from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest + + +def generate_thing_group_tree(iot_client, tree_dict, _parent=None): + """ + Generates a thing group tree given the input tree structure. + :param iot_client: the iot client for boto3 + :param tree_dict: dictionary with the key being the group_name, and the value being a sub tree. + tree_dict = { + "group_name_1a":{ + "group_name_2a":{ + "group_name_3a":{} or None + }, + }, + "group_name_1b":{} + } + :return: a dictionary of created groups, keyed by group name + """ + if tree_dict is None: + tree_dict = {} + created_dict = {} + for group_name in tree_dict.keys(): + params = {"thingGroupName": group_name} + if _parent: + params["parentGroupName"] = _parent + created_group = iot_client.create_thing_group(**params) + created_dict[group_name] = created_group + subtree_dict = generate_thing_group_tree( + iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name + ) + created_dict.update(created_dict) + created_dict.update(subtree_dict) + return created_dict + + +@mock_iot +def test_attach_policy(): + client = boto3.client("iot", region_name="ap-northeast-1") + policy_name = "my-policy" + doc = "{}" + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert["certificateArn"] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key("policies").which.should.have.length_of(1) + res["policies"][0]["policyName"].should.equal("my-policy") + + +@mock_iot +def test_detach_policy(): + client = boto3.client("iot", region_name="ap-northeast-1") + policy_name = "my-policy" + doc = "{}" + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert["certificateArn"] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key("policies").which.should.have.length_of(1) + res["policies"][0]["policyName"].should.equal("my-policy") + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_attached_policies(target=cert_arn) + res.should.have.key("policies").which.should.be.empty + + +@mock_iot +def test_list_attached_policies(): + client = boto3.client("iot", region_name="ap-northeast-1") + cert = client.create_keys_and_certificate(setAsActive=True) + policies = client.list_attached_policies(target=cert["certificateArn"]) + policies["policies"].should.be.empty + + +@mock_iot +def test_policy_versions(): + client = boto3.client("iot", region_name="ap-northeast-1") + policy_name = "my-policy" + doc = "{}" + + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + policy.should.have.key("policyName").which.should.equal(policy_name) + policy.should.have.key("policyArn").which.should_not.be.none + policy.should.have.key("policyDocument").which.should.equal(json.dumps({})) + policy.should.have.key("policyVersionId").which.should.equal("1") + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key("policyName").which.should.equal(policy_name) + policy.should.have.key("policyArn").which.should_not.be.none + policy.should.have.key("policyDocument").which.should.equal(json.dumps({})) + policy.should.have.key("defaultVersionId").which.should.equal( + policy["defaultVersionId"] + ) + + policy1 = client.create_policy_version( + policyName=policy_name, + policyDocument=json.dumps({"version": "version_1"}), + setAsDefault=True, + ) + policy1.should.have.key("policyArn").which.should_not.be.none + policy1.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) + policy1.should.have.key("policyVersionId").which.should.equal("2") + policy1.should.have.key("isDefaultVersion").which.should.equal(True) + + policy2 = client.create_policy_version( + policyName=policy_name, + policyDocument=json.dumps({"version": "version_2"}), + setAsDefault=False, + ) + policy2.should.have.key("policyArn").which.should_not.be.none + policy2.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_2"}) + ) + policy2.should.have.key("policyVersionId").which.should.equal("3") + policy2.should.have.key("isDefaultVersion").which.should.equal(False) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key("policyName").which.should.equal(policy_name) + policy.should.have.key("policyArn").which.should_not.be.none + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy1["policyVersionId"] + ) + + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key("policyVersions").which.should.have.length_of(3) + list( + map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ).count(True).should.equal(1) + default_policy = list( + filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ) + default_policy[0].should.have.key("versionId").should.equal( + policy1["policyVersionId"] + ) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key("policyName").which.should.equal(policy_name) + policy.should.have.key("policyArn").which.should_not.be.none + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy1["policyVersionId"] + ) + + client.set_default_policy_version( + policyName=policy_name, policyVersionId=policy2["policyVersionId"] + ) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key("policyVersions").which.should.have.length_of(3) + list( + map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ).count(True).should.equal(1) + default_policy = list( + filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ) + default_policy[0].should.have.key("versionId").should.equal( + policy2["policyVersionId"] + ) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key("policyName").which.should.equal(policy_name) + policy.should.have.key("policyArn").which.should_not.be.none + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_2"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy2["policyVersionId"] + ) + + client.delete_policy_version(policyName=policy_name, policyVersionId="1") + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key("policyVersions").which.should.have.length_of(2) + + client.delete_policy_version( + policyName=policy_name, policyVersionId=policy1["policyVersionId"] + ) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key("policyVersions").which.should.have.length_of(1) + + # should fail as it"s the default policy. Should use delete_policy instead + try: + client.delete_policy_version( + policyName=policy_name, policyVersionId=policy2["policyVersionId"] + ) + assert False, "Should have failed in previous call" + except Exception as exception: + exception.response["Error"]["Message"].should.equal( + "Cannot delete the default version of a policy" + ) @mock_iot @@ -264,6 +463,60 @@ def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): ) +@mock_iot +def test_endpoints(): + region_name = "ap-northeast-1" + client = boto3.client("iot", region_name=region_name) + + # iot:Data + endpoint = client.describe_endpoint(endpointType="iot:Data") + endpoint.should.have.key("endpointAddress").which.should_not.contain("ats") + endpoint.should.have.key("endpointAddress").which.should.contain( + "iot.{}.amazonaws.com".format(region_name) + ) + + # iot:Data-ATS + endpoint = client.describe_endpoint(endpointType="iot:Data-ATS") + endpoint.should.have.key("endpointAddress").which.should.contain( + "ats.iot.{}.amazonaws.com".format(region_name) + ) + + # iot:Data-ATS + endpoint = client.describe_endpoint(endpointType="iot:CredentialProvider") + endpoint.should.have.key("endpointAddress").which.should.contain( + "credentials.iot.{}.amazonaws.com".format(region_name) + ) + + # iot:Data-ATS + endpoint = client.describe_endpoint(endpointType="iot:Jobs") + endpoint.should.have.key("endpointAddress").which.should.contain( + "jobs.iot.{}.amazonaws.com".format(region_name) + ) + + # raise InvalidRequestException + try: + client.describe_endpoint(endpointType="iot:Abc") + except client.exceptions.InvalidRequestException as exc: + error_code = exc.response["Error"]["Code"] + error_code.should.equal("InvalidRequestException") + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_certificate_id_generation_deterministic(): + # Creating the same certificate twice should result in the same certificate ID + client = boto3.client("iot", region_name="us-east-1") + cert1 = client.create_keys_and_certificate(setAsActive=False) + client.delete_certificate(certificateId=cert1["certificateId"]) + + cert2 = client.register_certificate( + certificatePem=cert1["certificatePem"], setAsActive=False + ) + cert2.should.have.key("certificateId").which.should.equal(cert1["certificateId"]) + client.delete_certificate(certificateId=cert2["certificateId"]) + + @mock_iot def test_certs(): client = boto3.client("iot", region_name="us-east-1") @@ -324,6 +577,49 @@ def test_certs(): res = client.list_certificates() res.should.have.key("certificates") + # Test register_certificate without CA flow + cert = client.register_certificate_without_ca( + certificatePem=cert_pem, status="INACTIVE" + ) + cert.should.have.key("certificateId").which.should_not.be.none + cert.should.have.key("certificateArn").which.should_not.be.none + cert_id = cert["certificateId"] + + res = client.list_certificates() + res.should.have.key("certificates").which.should.have.length_of(1) + for cert in res["certificates"]: + cert.should.have.key("certificateArn").which.should_not.be.none + cert.should.have.key("certificateId").which.should_not.be.none + cert.should.have.key("status").which.should_not.be.none + cert.should.have.key("creationDate").which.should_not.be.none + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key("certificates") + + +@mock_iot +def test_create_certificate_validation(): + # Test we can't create a cert that already exists + client = boto3.client("iot", region_name="us-east-1") + cert = client.create_keys_and_certificate(setAsActive=False) + + with pytest.raises(ClientError) as e: + client.register_certificate( + certificatePem=cert["certificatePem"], setAsActive=False + ) + e.value.response["Error"]["Message"].should.contain( + "The certificate is already provisioned or registered" + ) + + with pytest.raises(ClientError) as e: + client.register_certificate_without_ca( + certificatePem=cert["certificatePem"], status="ACTIVE" + ) + e.value.response["Error"]["Message"].should.contain( + "The certificate is already provisioned or registered" + ) + @mock_iot def test_delete_policy_validation(): @@ -347,9 +643,9 @@ def test_delete_policy_validation(): client.create_policy(policyName=policy_name, policyDocument=doc) client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_policy(policyName=policy_name) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "The policy cannot be deleted as the policy is attached to one or more principals (name=%s)" % policy_name ) @@ -388,27 +684,27 @@ def test_delete_certificate_validation(): client.create_thing(thingName=thing_name) client.attach_thing_principal(thingName=thing_name, principal=cert_arn) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "Certificate must be deactivated (not ACTIVE) before deletion." ) res = client.list_certificates() res.should.have.key("certificates").which.should.have.length_of(1) client.update_certificate(certificateId=cert_id, newStatus="REVOKED") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "Things must be detached before deletion (arn: %s)" % cert_arn ) res = client.list_certificates() res.should.have.key("certificates").which.should.have.length_of(1) client.detach_thing_principal(thingName=thing_name, principal=cert_arn) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "Certificate policies must be detached before deletion (arn: %s)" % cert_arn ) res = client.list_certificates() @@ -502,9 +798,9 @@ def test_principal_policy(): res.should.have.key("policies").which.should.have.length_of(0) res = client.list_policy_principals(policyName=policy_name) res.should.have.key("principals").which.should.have.length_of(0) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.detach_policy(policyName=policy_name, target=cert_arn) - e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_iot @@ -561,6 +857,14 @@ def test_principal_thing(): res = client.list_thing_principals(thingName=thing_name) res.should.have.key("principals").which.should.have.length_of(0) + with pytest.raises(ClientError) as e: + client.list_thing_principals(thingName="xxx") + + e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + e.value.response["Error"]["Message"].should.equal( + "Failed to list principals for thing xxx because the thing does not exist in your account" + ) + @mock_iot def test_delete_principal_thing(): @@ -581,6 +885,167 @@ def test_delete_principal_thing(): client.delete_certificate(certificateId=cert_id) +class TestListThingGroup: + group_name_1a = "my-group-name-1a" + group_name_1b = "my-group-name-1b" + group_name_2a = "my-group-name-2a" + group_name_2b = "my-group-name-2b" + group_name_3a = "my-group-name-3a" + group_name_3b = "my-group-name-3b" + group_name_3c = "my-group-name-3c" + group_name_3d = "my-group-name-3d" + tree_dict = { + group_name_1a: { + group_name_2a: {group_name_3a: {}, group_name_3b: {}}, + group_name_2b: {group_name_3c: {}, group_name_3d: {}}, + }, + group_name_1b: {}, + } + + @mock_iot + def test_should_list_all_groups(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups() + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(8) + + @mock_iot + def test_should_list_all_groups_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + @mock_iot + def test_should_list_all_groups_filtered_by_parent(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(parentGroup=self.group_name_1a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(6) + resp = client.list_thing_groups(parentGroup=self.group_name_2a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=self.group_name_1b) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + with pytest.raises(ClientError) as e: + client.list_thing_groups(parentGroup="inexistant-group-name") + e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + + @mock_iot + def test_should_list_all_groups_filtered_by_parent_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(parentGroup=self.group_name_1a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=self.group_name_2a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-1", recursive=False + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", recursive=False + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-2", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups( + namePrefixFilter="prefix-which-doesn-not-match", + parentGroup=self.group_name_1a, + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + +@mock_iot +def test_delete_thing_group(): + client = boto3.client("iot", region_name="ap-northeast-1") + group_name_1a = "my-group-name-1a" + group_name_2a = "my-group-name-2a" + tree_dict = { + group_name_1a: {group_name_2a: {},}, + } + group_catalog = generate_thing_group_tree(client, tree_dict) + + # delete group with child + try: + client.delete_thing_group(thingGroupName=group_name_1a) + except client.exceptions.InvalidRequestException as exc: + error_code = exc.response["Error"]["Code"] + error_code.should.equal("InvalidRequestException") + else: + raise Exception("Should have raised error") + + # delete child group + client.delete_thing_group(thingGroupName=group_name_2a) + res = client.list_thing_groups() + res.should.have.key("thingGroups").which.should.have.length_of(1) + res["thingGroups"].should_not.have.key(group_name_2a) + + # now that there is no child group, we can delete the previous group safely + client.delete_thing_group(thingGroupName=group_name_1a) + res = client.list_thing_groups() + res.should.have.key("thingGroups").which.should.have.length_of(0) + + # Deleting an invalid thing group does not raise an error. + res = client.delete_thing_group(thingGroupName="non-existent-group-name") + res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + @mock_iot def test_describe_thing_group_metadata_hierarchy(): client = boto3.client("iot", region_name="ap-northeast-1") @@ -593,56 +1058,14 @@ def test_describe_thing_group_metadata_hierarchy(): group_name_3c = "my-group-name-3c" group_name_3d = "my-group-name-3d" - # --1a - # |--2a - # | |--3a - # | |--3b - # | - # |--2b - # |--3c - # |--3d - # --1b - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - thing_group1b = client.create_thing_group(thingGroupName=group_name_1b) - thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b) - thing_group1b.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") - thing_group2b = client.create_thing_group( - thingGroupName=group_name_2b, parentGroupName=group_name_1a - ) - thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b) - thing_group2b.should.have.key("thingGroupArn") - # 3 - thing_group3a = client.create_thing_group( - thingGroupName=group_name_3a, parentGroupName=group_name_2a - ) - thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a) - thing_group3a.should.have.key("thingGroupArn") - thing_group3b = client.create_thing_group( - thingGroupName=group_name_3b, parentGroupName=group_name_2a - ) - thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b) - thing_group3b.should.have.key("thingGroupArn") - thing_group3c = client.create_thing_group( - thingGroupName=group_name_3c, parentGroupName=group_name_2b - ) - thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c) - thing_group3c.should.have.key("thingGroupArn") - thing_group3d = client.create_thing_group( - thingGroupName=group_name_3d, parentGroupName=group_name_2b - ) - thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d) - thing_group3d.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: { + group_name_2a: {group_name_3a: {}, group_name_3b: {}}, + group_name_2b: {group_name_3c: {}, group_name_3d: {}}, + }, + group_name_1b: {}, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # describe groups # groups level 1 @@ -694,7 +1117,7 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description2a.should.have.key("version") # 2b thing_group_description2b = client.describe_thing_group( @@ -720,7 +1143,7 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description2b.should.have.key("version") # groups level 3 # 3a @@ -747,13 +1170,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2a) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2a["thingGroupArn"]) + ].should.match(group_catalog[group_name_2a]["thingGroupArn"]) thing_group_description3a.should.have.key("version") # 3b thing_group_description3b = client.describe_thing_group( @@ -779,13 +1202,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2a) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2a["thingGroupArn"]) + ].should.match(group_catalog[group_name_2a]["thingGroupArn"]) thing_group_description3b.should.have.key("version") # 3c thing_group_description3c = client.describe_thing_group( @@ -811,13 +1234,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2b) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2b["thingGroupArn"]) + ].should.match(group_catalog[group_name_2b]["thingGroupArn"]) thing_group_description3c.should.have.key("version") # 3d thing_group_description3d = client.describe_thing_group( @@ -843,13 +1266,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2b) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2b["thingGroupArn"]) + ].should.match(group_catalog[group_name_2b]["thingGroupArn"]) thing_group_description3d.should.have.key("version") @@ -994,7 +1417,10 @@ def test_create_job(): client = boto3.client("iot", region_name="eu-west-1") name = "my-thing" job_id = "TestJob" - # thing + # thing# job document + # job_document = { + # "field": "value" + # } thing = client.create_thing(thingName=name) thing.should.have.key("thingName").which.should.equal(name) thing.should.have.key("thingArn") @@ -1020,6 +1446,63 @@ def test_create_job(): job.should.have.key("description") +@mock_iot +def test_list_jobs(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + # job document + job_document = {"field": "value"} + + job1 = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job1.should.have.key("jobId").which.should.equal(job_id) + job1.should.have.key("jobArn") + job1.should.have.key("description") + + job2 = client.create_job( + jobId=job_id + "1", + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job2.should.have.key("jobId").which.should.equal(job_id + "1") + job2.should.have.key("jobArn") + job2.should.have.key("description") + + jobs = client.list_jobs() + jobs.should.have.key("jobs") + jobs.should_not.have.key("nextToken") + jobs["jobs"][0].should.have.key("jobId").which.should.equal(job_id) + jobs["jobs"][1].should.have.key("jobId").which.should.equal(job_id + "1") + + @mock_iot def test_describe_job(): client = boto3.client("iot", region_name="eu-west-1") @@ -1124,3 +1607,387 @@ def test_describe_job_1(): job.should.have.key("job").which.should.have.key( "jobExecutionsRolloutConfig" ).which.should.have.key("maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_delete_job(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + + job = client.describe_job(jobId=job_id) + job.should.have.key("job") + job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id) + + client.delete_job(jobId=job_id) + + client.list_jobs()["jobs"].should.have.length_of(0) + + +@mock_iot +def test_cancel_job(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + + job = client.describe_job(jobId=job_id) + job.should.have.key("job") + job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id) + + job = client.cancel_job(jobId=job_id, reasonCode="Because", comment="You are") + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + + job = client.describe_job(jobId=job_id) + job.should.have.key("job") + job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("job").which.should.have.key("status").which.should.equal( + "CANCELED" + ) + job.should.have.key("job").which.should.have.key( + "forceCanceled" + ).which.should.equal(False) + job.should.have.key("job").which.should.have.key("reasonCode").which.should.equal( + "Because" + ) + job.should.have.key("job").which.should.have.key("comment").which.should.equal( + "You are" + ) + + +@mock_iot +def test_get_job_document_with_document_source(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key("document").which.should.equal("") + + +@mock_iot +def test_get_job_document_with_document(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + # job document + job_document = {"field": "value"} + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key("document").which.should.equal('{"field": "value"}') + + +@mock_iot +def test_describe_job_execution(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + # job document + job_document = {"field": "value"} + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + job.should.have.key("description") + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key("execution") + job_execution["execution"].should.have.key("jobId").which.should.equal(job_id) + job_execution["execution"].should.have.key("status").which.should.equal("QUEUED") + job_execution["execution"].should.have.key("forceCanceled").which.should.equal( + False + ) + job_execution["execution"].should.have.key("statusDetails").which.should.equal( + {"detailsMap": {}} + ) + job_execution["execution"].should.have.key("thingArn").which.should.equal( + thing["thingArn"] + ) + job_execution["execution"].should.have.key("queuedAt") + job_execution["execution"].should.have.key("startedAt") + job_execution["execution"].should.have.key("lastUpdatedAt") + job_execution["execution"].should.have.key("executionNumber").which.should.equal( + 123 + ) + job_execution["execution"].should.have.key("versionNumber").which.should.equal(123) + job_execution["execution"].should.have.key( + "approximateSecondsBeforeTimedOut" + ).which.should.equal(123) + + job_execution = client.describe_job_execution( + jobId=job_id, thingName=name, executionNumber=123 + ) + job_execution.should.have.key("execution") + job_execution["execution"].should.have.key("jobId").which.should.equal(job_id) + job_execution["execution"].should.have.key("status").which.should.equal("QUEUED") + job_execution["execution"].should.have.key("forceCanceled").which.should.equal( + False + ) + job_execution["execution"].should.have.key("statusDetails").which.should.equal( + {"detailsMap": {}} + ) + job_execution["execution"].should.have.key("thingArn").which.should.equal( + thing["thingArn"] + ) + job_execution["execution"].should.have.key("queuedAt") + job_execution["execution"].should.have.key("startedAt") + job_execution["execution"].should.have.key("lastUpdatedAt") + job_execution["execution"].should.have.key("executionNumber").which.should.equal( + 123 + ) + job_execution["execution"].should.have.key("versionNumber").which.should.equal(123) + job_execution["execution"].should.have.key( + "approximateSecondsBeforeTimedOut" + ).which.should.equal(123) + + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) + except ClientError as exc: + error_code = exc.response["Error"]["Code"] + error_code.should.equal("ResourceNotFoundException") + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_cancel_job_execution(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + # job document + job_document = {"field": "value"} + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + job.should.have.key("description") + + client.cancel_job_execution(jobId=job_id, thingName=name) + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key("execution") + job_execution["execution"].should.have.key("status").which.should.equal("CANCELED") + + +@mock_iot +def test_delete_job_execution(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + # job document + job_document = {"field": "value"} + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + job.should.have.key("description") + + client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123) + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + except ClientError as exc: + error_code = exc.response["Error"]["Code"] + error_code.should.equal("ResourceNotFoundException") + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_list_job_executions_for_job(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + # job document + job_document = {"field": "value"} + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + job.should.have.key("description") + + job_execution = client.list_job_executions_for_job(jobId=job_id) + job_execution.should.have.key("executionSummaries") + job_execution["executionSummaries"][0].should.have.key( + "thingArn" + ).which.should.equal(thing["thingArn"]) + + +@mock_iot +def test_list_job_executions_for_thing(): + client = boto3.client("iot", region_name="eu-west-1") + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key("thingName").which.should.equal(name) + thing.should.have.key("thingArn") + + # job document + job_document = {"field": "value"} + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", + "expiresInSec": 123, + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, + ) + + job.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key("jobArn") + job.should.have.key("description") + + job_execution = client.list_job_executions_for_thing(thingName=name) + job_execution.should.have.key("executionSummaries") + job_execution["executionSummaries"][0].should.have.key("jobId").which.should.equal( + job_id + ) diff --git a/tests/test_iotdata/__init__.py b/tests/test_iotdata/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_iotdata/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_iotdata/test_iotdata.py b/tests/test_iotdata/test_iotdata.py index ac0a04244..bbef49348 100644 --- a/tests/test_iotdata/test_iotdata.py +++ b/tests/test_iotdata/test_iotdata.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import json import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from botocore.exceptions import ClientError from moto import mock_iotdata, mock_iot @@ -17,7 +17,7 @@ def test_basic(): raw_payload = b'{"state": {"desired": {"led": "on"}}}' iot_client.create_thing(thingName=name) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_thing_shadow(thingName=name) res = client.update_thing_shadow(thingName=name, payload=raw_payload) @@ -42,7 +42,7 @@ def test_basic(): payload.should.have.key("timestamp") client.delete_thing_shadow(thingName=name) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_thing_shadow(thingName=name) @@ -99,10 +99,10 @@ def test_update(): payload.should.have.key("timestamp") raw_payload = b'{"state": {"desired": {"led": "on"}}, "version": 1}' - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.update_thing_shadow(thingName=name, payload=raw_payload) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) - ex.exception.response["Error"]["Message"].should.equal("Version conflict") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) + ex.value.response["Error"]["Message"].should.equal("Version conflict") @mock_iotdata diff --git a/tests/test_kinesis/__init__.py b/tests/test_kinesis/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_kinesis/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index b3251bb0f..85f248572 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -10,6 +10,8 @@ from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentEx from moto import mock_kinesis, mock_kinesis_deprecated from moto.core import ACCOUNT_ID +import sure # noqa + @mock_kinesis_deprecated def test_create_cluster(): @@ -601,9 +603,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(2) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) shard_range = shards[0]["HashKeyRange"] new_starting_hash = ( @@ -616,9 +615,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(3) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) shard_range = shards[2]["HashKeyRange"] new_starting_hash = ( @@ -631,9 +627,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(4) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) @mock_kinesis_deprecated @@ -662,9 +655,6 @@ def test_merge_shards(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(4) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) conn.merge_shards(stream_name, "shardId-000000000000", "shardId-000000000001") @@ -672,17 +662,23 @@ def test_merge_shards(): stream = stream_response["StreamDescription"] shards = stream["Shards"] - shards.should.have.length_of(3) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) + active_shards = [ + shard + for shard in shards + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + active_shards.should.have.length_of(3) + conn.merge_shards(stream_name, "shardId-000000000002", "shardId-000000000000") stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream["Shards"] - shards.should.have.length_of(2) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) + active_shards = [ + shard + for shard in shards + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + + active_shards.should.have.length_of(2) diff --git a/tests/test_kinesis/test_kinesis_cloudformation.py b/tests/test_kinesis/test_kinesis_cloudformation.py new file mode 100644 index 000000000..59f73b888 --- /dev/null +++ b/tests/test_kinesis/test_kinesis_cloudformation.py @@ -0,0 +1,173 @@ +import boto3 +import sure # noqa + +from moto import mock_kinesis, mock_cloudformation + + +@mock_cloudformation +def test_kinesis_cloudformation_create_stream(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = '{"Resources":{"MyStream":{"Type":"AWS::Kinesis::Stream"}}}' + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_resource["LogicalResourceId"].should.equal("MyStream") + len(provisioned_resource["PhysicalResourceId"]).should.be.greater_than(0) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_get_attr(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream +Outputs: + StreamName: + Value: !Ref TheStream + StreamArn: + Value: !GetAtt TheStream.Arn +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + output_stream_name = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "StreamName" + ][0] + output_stream_arn = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "StreamArn" + ][0] + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName=output_stream_name)[ + "StreamDescription" + ] + output_stream_arn.should.equal(stream_description["StreamARN"]) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_update(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + Name: MyStream + ShardCount: 4 + RetentionPeriodHours: 48 + Tags: + - Key: TagKey1 + Value: TagValue1 + - Key: TagKey2 + Value: TagValue2 +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + stack_description["StackName"].should.equal(stack_name) + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + stream_description["RetentionPeriodHours"].should.equal(48) + + tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"] + tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"] + tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"] + tag1_value.should.equal("TagValue1") + tag2_value.should.equal("TagValue2") + + shards_provisioned = len( + [ + shard + for shard in stream_description["Shards"] + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + ) + shards_provisioned.should.equal(4) + + template = """ + Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + ShardCount: 6 + RetentionPeriodHours: 24 + Tags: + - Key: TagKey1 + Value: TagValue1a + - Key: TagKey2 + Value: TagValue2a + + """.strip() + cf_conn.update_stack(StackName=stack_name, TemplateBody=template) + + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + stream_description["RetentionPeriodHours"].should.equal(24) + + tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"] + tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"] + tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"] + tag1_value.should.equal("TagValue1a") + tag2_value.should.equal("TagValue2a") + + shards_provisioned = len( + [ + shard + for shard in stream_description["Shards"] + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + ) + shards_provisioned.should.equal(6) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_delete(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + Name: MyStream +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + stack_description["StackName"].should.equal(stack_name) + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + stream_description["StreamName"].should.equal("MyStream") + + cf_conn.delete_stack(StackName=stack_name) + streams = kinesis_conn.list_streams()["StreamNames"] + len(streams).should.equal(0) diff --git a/tests/test_kinesisvideo/__init__.py b/tests/test_kinesisvideo/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_kinesisvideo/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_kinesisvideo/test_kinesisvideo.py b/tests/test_kinesisvideo/test_kinesisvideo.py new file mode 100644 index 000000000..abd63bbda --- /dev/null +++ b/tests/test_kinesisvideo/test_kinesisvideo.py @@ -0,0 +1,140 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +import pytest +from moto import mock_kinesisvideo +from botocore.exceptions import ClientError +import json + + +@mock_kinesisvideo +def test_create_stream(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + device_name = "random-device" + + # stream can be created + res = client.create_stream(StreamName=stream_name, DeviceName=device_name) + res.should.have.key("StreamARN").which.should.contain(stream_name) + + +@mock_kinesisvideo +def test_create_stream_with_same_name(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + device_name = "random-device" + + client.create_stream(StreamName=stream_name, DeviceName=device_name) + + # cannot create with same stream name + with pytest.raises(ClientError): + client.create_stream(StreamName=stream_name, DeviceName=device_name) + + +@mock_kinesisvideo +def test_describe_stream(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + device_name = "random-device" + + res = client.create_stream(StreamName=stream_name, DeviceName=device_name) + res.should.have.key("StreamARN").which.should.contain(stream_name) + stream_arn = res["StreamARN"] + + # cannot create with existing stream name + with pytest.raises(ClientError): + client.create_stream(StreamName=stream_name, DeviceName=device_name) + + # stream can be described with name + res = client.describe_stream(StreamName=stream_name) + res.should.have.key("StreamInfo") + stream_info = res["StreamInfo"] + stream_info.should.have.key("StreamARN").which.should.contain(stream_name) + stream_info.should.have.key("StreamName").which.should.equal(stream_name) + stream_info.should.have.key("DeviceName").which.should.equal(device_name) + + # stream can be described with arn + res = client.describe_stream(StreamARN=stream_arn) + res.should.have.key("StreamInfo") + stream_info = res["StreamInfo"] + stream_info.should.have.key("StreamARN").which.should.contain(stream_name) + stream_info.should.have.key("StreamName").which.should.equal(stream_name) + stream_info.should.have.key("DeviceName").which.should.equal(device_name) + + +@mock_kinesisvideo +def test_describe_stream_with_name_not_exist(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name_not_exist = "not-exist-stream" + + # cannot describe with not exist stream name + with pytest.raises(ClientError): + client.describe_stream(StreamName=stream_name_not_exist) + + +@mock_kinesisvideo +def test_list_streams(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + stream_name_2 = "my-stream-2" + device_name = "random-device" + + client.create_stream(StreamName=stream_name, DeviceName=device_name) + client.create_stream(StreamName=stream_name_2, DeviceName=device_name) + + # streams can be listed + res = client.list_streams() + res.should.have.key("StreamInfoList") + streams = res["StreamInfoList"] + streams.should.have.length_of(2) + + +@mock_kinesisvideo +def test_delete_stream(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + stream_name_2 = "my-stream-2" + device_name = "random-device" + + client.create_stream(StreamName=stream_name, DeviceName=device_name) + res = client.create_stream(StreamName=stream_name_2, DeviceName=device_name) + stream_2_arn = res["StreamARN"] + + # stream can be deleted + client.delete_stream(StreamARN=stream_2_arn) + res = client.list_streams() + streams = res["StreamInfoList"] + streams.should.have.length_of(1) + + +@mock_kinesisvideo +def test_delete_stream_with_arn_not_exist(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + stream_name_2 = "my-stream-2" + device_name = "random-device" + + client.create_stream(StreamName=stream_name, DeviceName=device_name) + res = client.create_stream(StreamName=stream_name_2, DeviceName=device_name) + stream_2_arn = res["StreamARN"] + + client.delete_stream(StreamARN=stream_2_arn) + + # cannot delete with not exist stream + stream_arn_not_exist = stream_2_arn + with pytest.raises(ClientError): + client.delete_stream(StreamARN=stream_arn_not_exist) + + +@mock_kinesisvideo +def test_data_endpoint(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + device_name = "random-device" + + # data-endpoint can be created + api_name = "GET_MEDIA" + client.create_stream(StreamName=stream_name, DeviceName=device_name) + res = client.get_data_endpoint(StreamName=stream_name, APIName=api_name) + res.should.have.key("DataEndpoint") diff --git a/tests/test_kinesisvideo/test_server.py b/tests/test_kinesisvideo/test_server.py new file mode 100644 index 000000000..20301353f --- /dev/null +++ b/tests/test_kinesisvideo/test_server.py @@ -0,0 +1,18 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_kinesisvideo + +""" +Test the different server responses +""" + + +@mock_kinesisvideo +def test_kinesisvideo_server_is_up(): + backend = server.create_backend_app("kinesisvideo") + test_client = backend.test_client() + res = test_client.post("/listStreams") + res.status_code.should.equal(200) diff --git a/tests/test_kinesisvideoarchivedmedia/__init__.py b/tests/test_kinesisvideoarchivedmedia/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_kinesisvideoarchivedmedia/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py new file mode 100644 index 000000000..ee4439197 --- /dev/null +++ b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py @@ -0,0 +1,86 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_kinesisvideoarchivedmedia +from moto import mock_kinesisvideo +from datetime import datetime, timedelta + + +@mock_kinesisvideo +@mock_kinesisvideoarchivedmedia +def test_get_hls_streaming_session_url(): + region_name = "ap-northeast-1" + kvs_client = boto3.client("kinesisvideo", region_name=region_name) + stream_name = "my-stream" + kvs_client.create_stream(StreamName=stream_name) + + api_name = "GET_HLS_STREAMING_SESSION_URL" + res = kvs_client.get_data_endpoint(StreamName=stream_name, APIName=api_name) + data_endpoint = res["DataEndpoint"] + + client = boto3.client( + "kinesis-video-archived-media", + region_name=region_name, + endpoint_url=data_endpoint, + ) + res = client.get_hls_streaming_session_url(StreamName=stream_name,) + reg_exp = "^{}/hls/v1/getHLSMasterPlaylist.m3u8\?SessionToken\=.+$".format( + data_endpoint + ) + res.should.have.key("HLSStreamingSessionURL").which.should.match(reg_exp) + + +@mock_kinesisvideo +@mock_kinesisvideoarchivedmedia +def test_get_dash_streaming_session_url(): + region_name = "ap-northeast-1" + kvs_client = boto3.client("kinesisvideo", region_name=region_name) + stream_name = "my-stream" + kvs_client.create_stream(StreamName=stream_name) + + api_name = "GET_DASH_STREAMING_SESSION_URL" + res = kvs_client.get_data_endpoint(StreamName=stream_name, APIName=api_name) + data_endpoint = res["DataEndpoint"] + + client = boto3.client( + "kinesis-video-archived-media", + region_name=region_name, + endpoint_url=data_endpoint, + ) + res = client.get_dash_streaming_session_url(StreamName=stream_name,) + reg_exp = "^{}/dash/v1/getDASHManifest.mpd\?SessionToken\=.+$".format(data_endpoint) + res.should.have.key("DASHStreamingSessionURL").which.should.match(reg_exp) + + +@mock_kinesisvideo +@mock_kinesisvideoarchivedmedia +def test_get_clip(): + region_name = "ap-northeast-1" + kvs_client = boto3.client("kinesisvideo", region_name=region_name) + stream_name = "my-stream" + kvs_client.create_stream(StreamName=stream_name) + + api_name = "GET_DASH_STREAMING_SESSION_URL" + res = kvs_client.get_data_endpoint(StreamName=stream_name, APIName=api_name) + data_endpoint = res["DataEndpoint"] + + client = boto3.client( + "kinesis-video-archived-media", + region_name=region_name, + endpoint_url=data_endpoint, + ) + end_timestamp = datetime.utcnow() - timedelta(hours=1) + start_timestamp = end_timestamp - timedelta(minutes=5) + res = client.get_clip( + StreamName=stream_name, + ClipFragmentSelector={ + "FragmentSelectorType": "PRODUCER_TIMESTAMP", + "TimestampRange": { + "StartTimestamp": start_timestamp, + "EndTimestamp": end_timestamp, + }, + }, + ) + res.should.have.key("ContentType").which.should.match("video/mp4") + res.should.have.key("Payload") diff --git a/tests/test_kinesisvideoarchivedmedia/test_server.py b/tests/test_kinesisvideoarchivedmedia/test_server.py new file mode 100644 index 000000000..482c7bb1b --- /dev/null +++ b/tests/test_kinesisvideoarchivedmedia/test_server.py @@ -0,0 +1,19 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_kinesisvideoarchivedmedia + +""" +Test the different server responses +""" + + +@mock_kinesisvideoarchivedmedia +def test_kinesisvideoarchivedmedia_server_is_up(): + backend = server.create_backend_app("kinesis-video-archived-media") + test_client = backend.test_client() + res = test_client.post("/getHLSStreamingSessionURL") + # Just checking server is up + res.status_code.should.equal(404) diff --git a/tests/test_kms/__init__.py b/tests/test_kms/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_kms/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 9ce324373..bb1b013e0 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -4,21 +4,22 @@ import base64 import re import boto.kms +import boto3 import six import sure # noqa from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException -from nose.tools import assert_raises -from parameterized import parameterized - +import pytest +from moto.core.exceptions import JsonRESTError +from moto.kms.models import KmsBackend from moto.kms.exceptions import NotFoundException as MotoNotFoundException -from moto import mock_kms_deprecated +from moto import mock_kms_deprecated, mock_kms -PLAINTEXT_VECTORS = ( - (b"some encodeable plaintext",), - (b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16",), - ("some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥",), -) +PLAINTEXT_VECTORS = [ + b"some encodeable plaintext", + b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16", + "some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥", +] def _get_encoded_value(plaintext): @@ -190,10 +191,10 @@ def test_generate_data_key(): response = conn.generate_data_key(key_id=key_id, number_of_bytes=32) # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["CiphertextBlob"], validate=True) # Plaintext must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["Plaintext"], validate=True) response["KeyId"].should.equal(key_arn) @@ -362,10 +363,10 @@ def test__create_alias__raises_if_reserved_alias(): ] for alias_name in reserved_aliases: - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) - ex = err.exception + ex = err.value ex.error_message.should.be.none ex.error_code.should.equal("NotAuthorizedException") ex.body.should.equal({"__type": "NotAuthorizedException"}) @@ -390,10 +391,10 @@ def test__create_alias__raises_if_wrong_prefix(): create_resp = kms.create_key() key_id = create_resp["KeyMetadata"]["KeyId"] - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias("wrongprefix/my-alias", key_id) - ex = err.exception + ex = err.value ex.error_message.should.equal("Invalid identifier") ex.error_code.should.equal("ValidationException") ex.body.should.equal( @@ -413,10 +414,10 @@ def test__create_alias__raises_if_duplicate(): kms.create_alias(alias, key_id) - with assert_raises(AlreadyExistsException) as err: + with pytest.raises(AlreadyExistsException) as err: kms.create_alias(alias, key_id) - ex = err.exception + ex = err.value ex.error_message.should.match( r"An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists".format( **locals() @@ -448,9 +449,9 @@ def test__create_alias__raises_if_alias_has_restricted_characters(): ] for alias_name in alias_names_with_restricted_characters: - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) - ex = err.exception + ex = err.value ex.body["__type"].should.equal("ValidationException") ex.body["message"].should.equal( "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format( @@ -478,9 +479,9 @@ def test__create_alias__raises_if_alias_has_colon_character(): alias_names_with_restricted_characters = ["alias/my:alias"] for alias_name in alias_names_with_restricted_characters: - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) - ex = err.exception + ex = err.value ex.body["__type"].should.equal("ValidationException") ex.body["message"].should.equal( "{alias_name} contains invalid characters for an alias".format(**locals()) @@ -493,7 +494,7 @@ def test__create_alias__raises_if_alias_has_colon_character(): ex.status.should.equal(400) -@parameterized((("alias/my-alias_/",), ("alias/my_alias-/",))) +@pytest.mark.parametrize("alias_name", ["alias/my-alias_/", "alias/my_alias-/"]) @mock_kms_deprecated def test__create_alias__accepted_characters(alias_name): kms = boto.connect_kms() @@ -512,10 +513,10 @@ def test__create_alias__raises_if_target_key_id_is_existing_alias(): kms.create_alias(alias, key_id) - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias(alias, alias) - ex = err.exception + ex = err.value ex.body["__type"].should.equal("ValidationException") ex.body["message"].should.equal("Aliases must refer to keys. Not aliases") ex.error_code.should.equal("ValidationException") @@ -552,10 +553,10 @@ def test__delete_alias(): def test__delete_alias__raises_if_wrong_prefix(): kms = boto.connect_kms() - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.delete_alias("wrongprefix/my-alias") - ex = err.exception + ex = err.value ex.body["__type"].should.equal("ValidationException") ex.body["message"].should.equal("Invalid identifier") ex.error_code.should.equal("ValidationException") @@ -570,13 +571,13 @@ def test__delete_alias__raises_if_alias_is_not_found(): kms = boto.kms.connect_to_region(region) alias_name = "alias/unexisting-alias" - with assert_raises(NotFoundException) as err: + with pytest.raises(NotFoundException) as err: kms.delete_alias(alias_name) expected_message_match = r"Alias arn:aws:kms:{region}:[0-9]{{12}}:{alias_name} is not found.".format( region=region, alias_name=alias_name ) - ex = err.exception + ex = err.value ex.body["__type"].should.equal("NotFoundException") ex.body["message"].should.match(expected_message_match) ex.box_usage.should.be.none @@ -679,3 +680,77 @@ def test__assert_default_policy(): _assert_default_policy.when.called_with("default").should_not.throw( MotoNotFoundException ) + + +if six.PY2: + sort = sorted +else: + sort = lambda l: sorted(l, key=lambda d: d.keys()) + + +@mock_kms +def test_key_tag_on_create_key_happy(): + client = boto3.client("kms", region_name="us-east-1") + + tags = [ + {"TagKey": "key1", "TagValue": "value1"}, + {"TagKey": "key2", "TagValue": "value2"}, + ] + key = client.create_key(Description="test-key-tagging", Tags=tags) + key_id = key["KeyMetadata"]["KeyId"] + + result = client.list_resource_tags(KeyId=key_id) + actual = result.get("Tags", []) + assert sort(tags) == sort(actual) + + client.untag_resource(KeyId=key_id, TagKeys=["key1"]) + + actual = client.list_resource_tags(KeyId=key_id).get("Tags", []) + expected = [{"TagKey": "key2", "TagValue": "value2"}] + assert sort(expected) == sort(actual) + + +@mock_kms +def test_key_tag_added_happy(): + client = boto3.client("kms", region_name="us-east-1") + + key = client.create_key(Description="test-key-tagging") + key_id = key["KeyMetadata"]["KeyId"] + tags = [ + {"TagKey": "key1", "TagValue": "value1"}, + {"TagKey": "key2", "TagValue": "value2"}, + ] + client.tag_resource(KeyId=key_id, Tags=tags) + + result = client.list_resource_tags(KeyId=key_id) + actual = result.get("Tags", []) + assert sort(tags) == sort(actual) + + client.untag_resource(KeyId=key_id, TagKeys=["key1"]) + + actual = client.list_resource_tags(KeyId=key_id).get("Tags", []) + expected = [{"TagKey": "key2", "TagValue": "value2"}] + assert sort(expected) == sort(actual) + + +@mock_kms_deprecated +def test_key_tagging_sad(): + b = KmsBackend() + + try: + b.tag_resource("unknown", []) + raise "tag_resource should fail if KeyId is not known" + except JsonRESTError: + pass + + try: + b.untag_resource("unknown", []) + raise "untag_resource should fail if KeyId is not known" + except JsonRESTError: + pass + + try: + b.list_resource_tags("unknown") + raise "list_resource_tags should fail if KeyId is not known" + except JsonRESTError: + pass diff --git a/tests/test_kms/test_kms_boto3.py b/tests/test_kms/test_kms_boto3.py index c125c0557..25a8d5942 100644 --- a/tests/test_kms/test_kms_boto3.py +++ b/tests/test_kms/test_kms_boto3.py @@ -10,16 +10,15 @@ import botocore.exceptions import six import sure # noqa from freezegun import freeze_time -from nose.tools import assert_raises -from parameterized import parameterized +import pytest from moto import mock_kms -PLAINTEXT_VECTORS = ( - (b"some encodeable plaintext",), - (b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16",), - ("some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥",), -) +PLAINTEXT_VECTORS = [ + b"some encodeable plaintext", + b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16", + "some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥", +] def _get_encoded_value(plaintext): @@ -120,19 +119,20 @@ def test_describe_key(): response["KeyMetadata"].should_not.have.key("SigningAlgorithms") -@parameterized( - ( - ("alias/does-not-exist",), - ("arn:aws:kms:us-east-1:012345678912:alias/does-not-exist",), - ("invalid",), - ) +@pytest.mark.parametrize( + "key_id", + [ + "alias/does-not-exist", + "arn:aws:kms:us-east-1:012345678912:alias/does-not-exist", + "invalid", + ], ) @mock_kms def test_describe_key_via_alias_invalid_alias(key_id): client = boto3.client("kms", region_name="us-east-1") client.create_key(Description="key") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.describe_key(KeyId=key_id) @@ -147,16 +147,16 @@ def test_generate_data_key(): response = kms.generate_data_key(KeyId=key_id, NumberOfBytes=32) # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["CiphertextBlob"], validate=True) # Plaintext must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["Plaintext"], validate=True) response["KeyId"].should.equal(key_arn) -@parameterized(PLAINTEXT_VECTORS) +@pytest.mark.parametrize("plaintext", PLAINTEXT_VECTORS) @mock_kms def test_encrypt(plaintext): client = boto3.client("kms", region_name="us-west-2") @@ -169,13 +169,13 @@ def test_encrypt(plaintext): response["CiphertextBlob"].should_not.equal(plaintext) # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["CiphertextBlob"], validate=True) response["KeyId"].should.equal(key_arn) -@parameterized(PLAINTEXT_VECTORS) +@pytest.mark.parametrize("plaintext", PLAINTEXT_VECTORS) @mock_kms def test_decrypt(plaintext): client = boto3.client("kms", region_name="us-west-2") @@ -188,39 +188,38 @@ def test_decrypt(plaintext): client.create_key(Description="key") # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(encrypt_response["CiphertextBlob"], validate=True) decrypt_response = client.decrypt(CiphertextBlob=encrypt_response["CiphertextBlob"]) # Plaintext must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(decrypt_response["Plaintext"], validate=True) decrypt_response["Plaintext"].should.equal(_get_encoded_value(plaintext)) decrypt_response["KeyId"].should.equal(key_arn) -@parameterized( - ( - ("not-a-uuid",), - ("alias/DoesNotExist",), - ("arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist",), - ("d25652e4-d2d2-49f7-929a-671ccda580c6",), - ( - "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", - ), - ) +@pytest.mark.parametrize( + "key_id", + [ + "not-a-uuid", + "alias/DoesNotExist", + "arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist", + "d25652e4-d2d2-49f7-929a-671ccda580c6", + "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", + ], ) @mock_kms def test_invalid_key_ids(key_id): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.generate_data_key(KeyId=key_id, NumberOfBytes=5) -@parameterized(PLAINTEXT_VECTORS) +@pytest.mark.parametrize("plaintext", PLAINTEXT_VECTORS) @mock_kms def test_kms_encrypt(plaintext): client = boto3.client("kms", region_name="us-east-1") @@ -357,14 +356,15 @@ def test_list_resource_tags(): assert response["Tags"][0]["TagValue"] == "string" -@parameterized( +@pytest.mark.parametrize( + "kwargs,expected_key_length", ( (dict(KeySpec="AES_256"), 32), (dict(KeySpec="AES_128"), 16), (dict(NumberOfBytes=64), 64), (dict(NumberOfBytes=1), 1), (dict(NumberOfBytes=1024), 1024), - ) + ), ) @mock_kms def test_generate_data_key_sizes(kwargs, expected_key_length): @@ -389,51 +389,52 @@ def test_generate_data_key_decrypt(): assert resp1["Plaintext"] == resp2["Plaintext"] -@parameterized( - ( - (dict(KeySpec="AES_257"),), - (dict(KeySpec="AES_128", NumberOfBytes=16),), - (dict(NumberOfBytes=2048),), - (dict(NumberOfBytes=0),), - (dict(),), - ) +@pytest.mark.parametrize( + "kwargs", + [ + dict(KeySpec="AES_257"), + dict(KeySpec="AES_128", NumberOfBytes=16), + dict(NumberOfBytes=2048), + dict(NumberOfBytes=0), + dict(), + ], ) @mock_kms def test_generate_data_key_invalid_size_params(kwargs): client = boto3.client("kms", region_name="us-east-1") key = client.create_key(Description="generate-data-key-size") - with assert_raises( + with pytest.raises( (botocore.exceptions.ClientError, botocore.exceptions.ParamValidationError) ) as err: client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs) -@parameterized( - ( - ("alias/DoesNotExist",), - ("arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist",), - ("d25652e4-d2d2-49f7-929a-671ccda580c6",), - ( - "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", - ), - ) +@pytest.mark.parametrize( + "key_id", + [ + "alias/DoesNotExist", + "arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist", + "d25652e4-d2d2-49f7-929a-671ccda580c6", + "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", + ], ) @mock_kms def test_generate_data_key_invalid_key(key_id): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.generate_data_key(KeyId=key_id, KeySpec="AES_256") -@parameterized( - ( +@pytest.mark.parametrize( + "prefix,append_key_id", + [ ("alias/DoesExist", False), ("arn:aws:kms:us-east-1:012345678912:alias/DoesExist", False), ("", True), ("arn:aws:kms:us-east-1:012345678912:key/", True), - ) + ], ) @mock_kms def test_generate_data_key_all_valid_key_ids(prefix, append_key_id): @@ -461,7 +462,7 @@ def test_generate_data_key_without_plaintext_decrypt(): assert "Plaintext" not in resp1 -@parameterized(PLAINTEXT_VECTORS) +@pytest.mark.parametrize("plaintext", PLAINTEXT_VECTORS) @mock_kms def test_re_encrypt_decrypt(plaintext): client = boto3.client("kms", region_name="us-west-2") @@ -485,7 +486,7 @@ def test_re_encrypt_decrypt(plaintext): ) # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(re_encrypt_response["CiphertextBlob"], validate=True) re_encrypt_response["SourceKeyId"].should.equal(key_1_arn) @@ -517,14 +518,14 @@ def test_re_encrypt_to_invalid_destination(): encrypt_response = client.encrypt(KeyId=key_id, Plaintext=b"some plaintext") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.re_encrypt( CiphertextBlob=encrypt_response["CiphertextBlob"], DestinationKeyId="alias/DoesNotExist", ) -@parameterized(((12,), (44,), (91,), (1,), (1024,))) +@pytest.mark.parametrize("number_of_bytes", [12, 44, 91, 1, 1024]) @mock_kms def test_generate_random(number_of_bytes): client = boto3.client("kms", region_name="us-west-2") @@ -535,20 +536,21 @@ def test_generate_random(number_of_bytes): len(response["Plaintext"]).should.equal(number_of_bytes) -@parameterized( - ( +@pytest.mark.parametrize( + "number_of_bytes,error_type", + [ (2048, botocore.exceptions.ClientError), (1025, botocore.exceptions.ClientError), (0, botocore.exceptions.ParamValidationError), (-1, botocore.exceptions.ParamValidationError), (-1024, botocore.exceptions.ParamValidationError), - ) + ], ) @mock_kms def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type): client = boto3.client("kms", region_name="us-west-2") - with assert_raises(error_type): + with pytest.raises(error_type): client.generate_random(NumberOfBytes=number_of_bytes) @@ -556,7 +558,7 @@ def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type): def test_enable_key_rotation_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.enable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -564,7 +566,7 @@ def test_enable_key_rotation_key_not_found(): def test_disable_key_rotation_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.disable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -572,7 +574,7 @@ def test_disable_key_rotation_key_not_found(): def test_enable_key_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.enable_key(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -580,7 +582,7 @@ def test_enable_key_key_not_found(): def test_disable_key_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.disable_key(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -588,7 +590,7 @@ def test_disable_key_key_not_found(): def test_cancel_key_deletion_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.cancel_key_deletion(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -596,7 +598,7 @@ def test_cancel_key_deletion_key_not_found(): def test_schedule_key_deletion_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.schedule_key_deletion(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -604,7 +606,7 @@ def test_schedule_key_deletion_key_not_found(): def test_get_key_rotation_status_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.get_key_rotation_status(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -612,7 +614,7 @@ def test_get_key_rotation_status_key_not_found(): def test_get_key_policy_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.get_key_policy( KeyId="12366f9b-1230-123d-123e-123e6ae60c02", PolicyName="default" ) @@ -622,7 +624,7 @@ def test_get_key_policy_key_not_found(): def test_list_key_policies_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.list_key_policies(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -630,7 +632,7 @@ def test_list_key_policies_key_not_found(): def test_put_key_policy_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.put_key_policy( KeyId="00000000-0000-0000-0000-000000000000", PolicyName="default", diff --git a/tests/test_kms/test_utils.py b/tests/test_kms/test_utils.py index 4c84ed127..92d85610e 100644 --- a/tests/test_kms/test_utils.py +++ b/tests/test_kms/test_utils.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals import sure # noqa -from nose.tools import assert_raises -from parameterized import parameterized +import pytest from moto.kms.exceptions import ( AccessDeniedException, @@ -22,7 +21,7 @@ from moto.kms.utils import ( Ciphertext, ) -ENCRYPTION_CONTEXT_VECTORS = ( +ENCRYPTION_CONTEXT_VECTORS = [ ( {"this": "is", "an": "encryption", "context": "example"}, b"an" b"encryption" b"context" b"example" b"this" b"is", @@ -31,8 +30,8 @@ ENCRYPTION_CONTEXT_VECTORS = ( {"a_this": "one", "b_is": "actually", "c_in": "order"}, b"a_this" b"one" b"b_is" b"actually" b"c_in" b"order", ), -) -CIPHERTEXT_BLOB_VECTORS = ( +] +CIPHERTEXT_BLOB_VECTORS = [ ( Ciphertext( key_id="d25652e4-d2d2-49f7-929a-671ccda580c6", @@ -57,7 +56,7 @@ CIPHERTEXT_BLOB_VECTORS = ( b"1234567890123456" b"some ciphertext that is much longer now", ), -) +] def test_generate_data_key(): @@ -74,35 +73,37 @@ def test_generate_master_key(): len(test).should.equal(MASTER_KEY_LEN) -@parameterized(ENCRYPTION_CONTEXT_VECTORS) +@pytest.mark.parametrize("raw,serialized", ENCRYPTION_CONTEXT_VECTORS) def test_serialize_encryption_context(raw, serialized): test = _serialize_encryption_context(raw) test.should.equal(serialized) -@parameterized(CIPHERTEXT_BLOB_VECTORS) +@pytest.mark.parametrize("raw,_serialized", CIPHERTEXT_BLOB_VECTORS) def test_cycle_ciphertext_blob(raw, _serialized): test_serialized = _serialize_ciphertext_blob(raw) test_deserialized = _deserialize_ciphertext_blob(test_serialized) test_deserialized.should.equal(raw) -@parameterized(CIPHERTEXT_BLOB_VECTORS) +@pytest.mark.parametrize("raw,serialized", CIPHERTEXT_BLOB_VECTORS) def test_serialize_ciphertext_blob(raw, serialized): test = _serialize_ciphertext_blob(raw) test.should.equal(serialized) -@parameterized(CIPHERTEXT_BLOB_VECTORS) +@pytest.mark.parametrize("raw,serialized", CIPHERTEXT_BLOB_VECTORS) def test_deserialize_ciphertext_blob(raw, serialized): test = _deserialize_ciphertext_blob(serialized) test.should.equal(raw) -@parameterized(((ec[0],) for ec in ENCRYPTION_CONTEXT_VECTORS)) +@pytest.mark.parametrize( + "encryption_context", [ec[0] for ec in ENCRYPTION_CONTEXT_VECTORS] +) def test_encrypt_decrypt_cycle(encryption_context): plaintext = b"some secret plaintext" - master_key = Key("nop", "nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = encrypt( @@ -123,7 +124,7 @@ def test_encrypt_decrypt_cycle(encryption_context): def test_encrypt_unknown_key_id(): - with assert_raises(NotFoundException): + with pytest.raises(NotFoundException): encrypt( master_keys={}, key_id="anything", @@ -133,10 +134,10 @@ def test_encrypt_unknown_key_id(): def test_decrypt_invalid_ciphertext_format(): - master_key = Key("nop", "nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} - with assert_raises(InvalidCiphertextException): + with pytest.raises(InvalidCiphertextException): decrypt(master_keys=master_key_map, ciphertext_blob=b"", encryption_context={}) @@ -148,12 +149,12 @@ def test_decrypt_unknwown_key_id(): b"some ciphertext" ) - with assert_raises(AccessDeniedException): + with pytest.raises(AccessDeniedException): decrypt(master_keys={}, ciphertext_blob=ciphertext_blob, encryption_context={}) def test_decrypt_invalid_ciphertext(): - master_key = Key("nop", "nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = ( master_key.id.encode("utf-8") + b"123456789012" @@ -161,7 +162,7 @@ def test_decrypt_invalid_ciphertext(): b"some ciphertext" ) - with assert_raises(InvalidCiphertextException): + with pytest.raises(InvalidCiphertextException): decrypt( master_keys=master_key_map, ciphertext_blob=ciphertext_blob, @@ -171,7 +172,7 @@ def test_decrypt_invalid_ciphertext(): def test_decrypt_invalid_encryption_context(): plaintext = b"some secret plaintext" - master_key = Key("nop", "nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = encrypt( @@ -181,7 +182,7 @@ def test_decrypt_invalid_encryption_context(): encryption_context={"some": "encryption", "context": "here"}, ) - with assert_raises(InvalidCiphertextException): + with pytest.raises(InvalidCiphertextException): decrypt( master_keys=master_key_map, ciphertext_blob=ciphertext_blob, diff --git a/tests/test_logs/__init__.py b/tests/test_logs/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_logs/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_logs/test_integration.py b/tests/test_logs/test_integration.py new file mode 100644 index 000000000..eab839970 --- /dev/null +++ b/tests/test_logs/test_integration.py @@ -0,0 +1,384 @@ +import base64 +import boto3 +import json +import time +import zlib + +from botocore.exceptions import ClientError +from io import BytesIO +from moto import mock_logs, mock_lambda, mock_iam +import pytest +from zipfile import ZipFile, ZIP_DEFLATED + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_update(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + creation_time = filter["creationTime"] + creation_time.should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + # to update an existing subscription filter the 'filerName' must be identical + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="[]", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.equal(creation_time) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "[]" + + # when + # only one subscription filter can be associated with a log group + with pytest.raises(ClientError) as e: + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test-2", + filterPattern="", + destinationArn=function_arn, + ) + + # then + ex = e.value + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("LimitExceededException") + ex.response["Error"]["Message"].should.equal("Resource limit exceeded.") + + +@mock_lambda +@mock_logs +@pytest.mark.network +def test_put_subscription_filter_with_lambda(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + client_logs.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + {"timestamp": 0, "message": "test"}, + {"timestamp": 0, "message": "test 2"}, + ], + ) + + # then + msg_showed_up, received_message = _wait_for_log_msg( + client_logs, "/aws/lambda/test", "awslogs" + ) + assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format( + received_message + ) + + data = json.loads(received_message)["awslogs"]["data"] + response = json.loads( + zlib.decompress(base64.b64decode(data), 16 + zlib.MAX_WBITS).decode("utf-8") + ) + response["messageType"].should.equal("DATA_MESSAGE") + response["owner"].should.equal("123456789012") + response["logGroup"].should.equal("/test") + response["logStream"].should.equal("stream") + response["subscriptionFilters"].should.equal(["test"]) + log_events = sorted(response["logEvents"], key=lambda log_event: log_event["id"]) + log_events.should.have.length_of(2) + log_events[0]["id"].should.be.a(int) + log_events[0]["message"].should.equal("test") + log_events[0]["timestamp"].should.equal(0) + log_events[1]["id"].should.be.a(int) + log_events[1]["message"].should.equal("test 2") + log_events[1]["timestamp"].should.equal(0) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="test", + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + with pytest.raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="not-existing-log-group", filterName="test", + ) + + # then + ex = e.value + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with pytest.raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="wrong-filter-name", + ) + + # then + ex = e.value + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified subscription filter does not exist." + ) + + +@mock_logs +def test_put_subscription_filter_errors(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + with pytest.raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="not-existing-log-group", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:test", + ) + + # then + ex = e.value + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with pytest.raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.value + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + # when + with pytest.raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.value + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + +def _get_role_name(region_name): + with mock_iam(): + iam = boto3.client("iam", region_name=region_name) + try: + return iam.get_role(RoleName="test-role")["Role"]["Arn"] + except ClientError: + return iam.create_role( + RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/", + )["Role"]["Arn"] + + +def _get_test_zip_file(): + func_str = """ +def lambda_handler(event, context): + return event +""" + + zip_output = BytesIO() + zip_file = ZipFile(zip_output, "w", ZIP_DEFLATED) + zip_file.writestr("lambda_function.py", func_str) + zip_file.close() + zip_output.seek(0) + return zip_output.read() + + +def _wait_for_log_msg(client, log_group_name, expected_msg_part): + received_messages = [] + start = time.time() + while (time.time() - start) < 10: + result = client.describe_log_streams(logGroupName=log_group_name) + log_streams = result.get("logStreams") + if not log_streams: + time.sleep(1) + continue + + for log_stream in log_streams: + result = client.get_log_events( + logGroupName=log_group_name, logStreamName=log_stream["logStreamName"], + ) + received_messages.extend( + [event["message"] for event in result.get("events")] + ) + for message in received_messages: + if expected_msg_part in message: + return True, message + time.sleep(1) + return False, received_messages diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index e8f60ff03..fc9868ffb 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -5,24 +5,21 @@ import six from botocore.exceptions import ClientError from moto import mock_logs, settings -from nose.tools import assert_raises -from nose import SkipTest +import pytest +from unittest import SkipTest _logs_region = "us-east-1" if settings.TEST_SERVER_MODE else "us-west-2" @mock_logs -def test_log_group_create(): +def test_create_log_group(): conn = boto3.client("logs", "us-west-2") - log_group_name = "dummy" - response = conn.create_log_group(logGroupName=log_group_name) - response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) - assert len(response["logGroups"]) == 1 - # AWS defaults to Never Expire for log group retention - assert response["logGroups"][0].get("retentionInDays") == None + response = conn.create_log_group(logGroupName="dummy") + response = conn.describe_log_groups() - response = conn.delete_log_group(logGroupName=log_group_name) + response["logGroups"].should.have.length_of(1) + response["logGroups"][0].should_not.have.key("retentionInDays") @mock_logs @@ -31,13 +28,13 @@ def test_exceptions(): log_group_name = "dummy" log_stream_name = "dummp-stream" conn.create_log_group(logGroupName=log_group_name) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_log_group(logGroupName=log_group_name) # descrine_log_groups is not implemented yet conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_log_stream( logGroupName=log_group_name, logStreamName=log_stream_name ) @@ -48,7 +45,7 @@ def test_exceptions(): logEvents=[{"timestamp": 0, "message": "line"}], ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.put_log_events( logGroupName=log_group_name, logStreamName="invalid-stream", @@ -120,7 +117,7 @@ def test_filter_logs_raises_if_filter_pattern(): conn.put_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=messages ) - with assert_raises(NotImplementedError): + with pytest.raises(NotImplementedError): conn.filter_log_events( logGroupName=log_group_name, logStreamNames=[log_stream_name], @@ -335,13 +332,13 @@ def test_get_log_events_errors(): client.create_log_group(logGroupName=log_group_name) client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, nextToken="n/00000000000000000000000000000000000000000000000000000000", ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetLogEvents") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.equal("InvalidParameterException") @@ -349,13 +346,13 @@ def test_get_log_events_errors(): "The specified nextToken is invalid." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, nextToken="not-existing-token", ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetLogEvents") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.equal("InvalidParameterException") @@ -428,3 +425,72 @@ def test_untag_log_group(): assert response["tags"] == remaining_tags response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_describe_subscription_filters(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + response = client.describe_subscription_filters(logGroupName=log_group_name) + + # then + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_logs +def test_describe_subscription_filters_errors(): + # given + client = boto3.client("logs", "us-east-1") + + # when + with pytest.raises(ClientError) as e: + client.describe_subscription_filters(logGroupName="not-existing-log-group",) + + # then + ex = e.value + ex.operation_name.should.equal("DescribeSubscriptionFilters") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + +@mock_logs +def test_describe_log_groups_paging(): + client = boto3.client("logs", "us-east-1") + + group_names = [ + "/aws/lambda/lowercase-dev", + "/aws/lambda/FileMonitoring", + "/aws/events/GetMetricData", + "/aws/lambda/fileAvailable", + ] + + for name in group_names: + client.create_log_group(logGroupName=name) + + resp = client.describe_log_groups() + resp["logGroups"].should.have.length_of(4) + resp.should_not.have.key("nextToken") + + resp = client.describe_log_groups(limit=2) + resp["logGroups"].should.have.length_of(2) + resp["nextToken"].should.equal("/aws/lambda/FileMonitoring") + + resp = client.describe_log_groups(nextToken=resp["nextToken"], limit=1) + resp["logGroups"].should.have.length_of(1) + resp["nextToken"].should.equal("/aws/lambda/fileAvailable") + + resp = client.describe_log_groups(nextToken=resp["nextToken"]) + resp["logGroups"].should.have.length_of(1) + resp["logGroups"][0]["logGroupName"].should.equal("/aws/lambda/lowercase-dev") + resp.should_not.have.key("nextToken") + + resp = client.describe_log_groups(nextToken="invalid-token") + resp["logGroups"].should.have.length_of(0) + resp.should_not.have.key("nextToken") diff --git a/tests/test_managedblockchain/__init__.py b/tests/test_managedblockchain/__init__.py new file mode 100644 index 000000000..baffc4882 --- /dev/null +++ b/tests/test_managedblockchain/__init__.py @@ -0,0 +1 @@ +from __future__ import unicode_literals diff --git a/tests/test_managedblockchain/helpers.py b/tests/test_managedblockchain/helpers.py new file mode 100644 index 000000000..f8c6d29b9 --- /dev/null +++ b/tests/test_managedblockchain/helpers.py @@ -0,0 +1,87 @@ +from __future__ import unicode_literals + + +default_frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} + +default_votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } +} + +default_memberconfiguration = { + "Name": "testmember1", + "Description": "Test Member 1", + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} + }, +} + +default_policy_actions = {"Invitations": [{"Principal": "123456789012"}]} + +multiple_policy_actions = { + "Invitations": [{"Principal": "123456789012"}, {"Principal": "123456789013"}] +} + +default_nodeconfiguration = { + "InstanceType": "bc.t3.small", + "AvailabilityZone": "us-east-1a", + "LogPublishingConfiguration": { + "Fabric": { + "ChaincodeLogs": {"Cloudwatch": {"Enabled": False}}, + "PeerLogs": {"Cloudwatch": {"Enabled": False}}, + } + }, +} + + +def member_id_exist_in_list(members, memberid): + memberidxists = False + for member in members: + if member["Id"] == memberid: + memberidxists = True + break + return memberidxists + + +def create_member_configuration( + name, adminuser, adminpass, cloudwatchenabled, description=None +): + d = { + "Name": name, + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": adminuser, "AdminPassword": adminpass} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": cloudwatchenabled}}} + }, + } + + if description is not None: + d["Description"] = description + + return d + + +def select_invitation_id_for_network(invitations, networkid, status=None): + # Get invitations based on network and maybe status + invitationsfornetwork = [] + for invitation in invitations: + if invitation["NetworkSummary"]["Id"] == networkid: + if status is None or invitation["Status"] == status: + invitationsfornetwork.append(invitation["InvitationId"]) + return invitationsfornetwork + + +def node_id_exist_in_list(nodes, nodeid): + nodeidxists = False + for node in nodes: + if node["Id"] == nodeid: + nodeidxists = True + break + return nodeidxists diff --git a/tests/test_managedblockchain/test_managedblockchain_invitations.py b/tests/test_managedblockchain/test_managedblockchain_invitations.py new file mode 100644 index 000000000..0f70d7f88 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_invitations.py @@ -0,0 +1,141 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_2_invitations(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.multiple_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"].should.have.length_of(2) + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + response["Invitations"][1]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][1]["Status"].should.equal("PENDING") + + +@mock_managedblockchain +def test_reject_invitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Reject - thanks but no thanks + response = conn.reject_invitation(InvitationId=invitation_id) + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("REJECTED") + + +@mock_managedblockchain +def test_reject_invitation_badinvitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + response = conn.reject_invitation.when.called_with( + InvitationId="in-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "InvitationId in-ABCDEFGHIJKLMNOP0123456789 not found.") diff --git a/tests/test_managedblockchain/test_managedblockchain_members.py b/tests/test_managedblockchain/test_managedblockchain_members.py new file mode 100644 index 000000000..9120e4aee --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_members.py @@ -0,0 +1,665 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_another_member(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ) + member_id2 = response["MemberId"] + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("ACCEPTED") + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + helpers.member_id_exist_in_list(members, member_id2).should.equal(True) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["Name"].should.equal("testmember2") + + # Update member + logconfignewenabled = not helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ]["Fabric"]["CaLogs"]["Cloudwatch"]["Enabled"] + logconfignew = { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": logconfignewenabled}}} + } + conn.update_member( + NetworkId=network_id, + MemberId=member_id2, + LogPublishingConfiguration=logconfignew, + ) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["LogPublishingConfiguration"]["Fabric"]["CaLogs"]["Cloudwatch"][ + "Enabled" + ].should.equal(logconfignewenabled) + + +@mock_managedblockchain +def test_create_another_member_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("ACCEPTED") + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + helpers.member_id_exist_in_list(members, member_id2).should.equal(True) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["Description"].should.equal("Test Member 2") + + # Try to create member with already used invitation + response = conn.create_member.when.called_with( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2 Duplicate" + ), + ).should.throw(Exception, "Invitation {0} not valid".format(invitation_id)) + + # Delete member 2 + conn.delete_member(NetworkId=network_id, MemberId=member_id2) + + # Member is still in the list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + + # But cannot get + response = conn.get_member.when.called_with( + NetworkId=network_id, MemberId=member_id2, + ).should.throw(Exception, "Member {0} not found".format(member_id2)) + + # Delete member 1 + conn.delete_member(NetworkId=network_id, MemberId=member_id) + + # Network should be gone + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(0) + + # Verify the invitation network status is DELETED + # Get the invitation + response = conn.list_invitations() + response["Invitations"].should.have.length_of(1) + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["NetworkSummary"]["Status"].should.equal("DELETED") + + +@mock_managedblockchain +def test_invite_and_remove_member(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal (create additional member) + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + both_policy_actions = { + "Invitations": [{"Principal": "123456789012"}], + "Removals": [{"MemberId": member_id2}], + } + + # Create proposal (invite and remove member) + response = conn.create_proposal( + NetworkId=network_id, MemberId=member_id, Actions=both_policy_actions, + ) + proposal_id2 = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id2) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id2, + VoterMemberId=member_id, + Vote="YES", + ) + + # Check the invitation status + response = conn.list_invitations() + invitations = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + invitations.should.have.length_of(1) + + # Member is still in the list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + foundmember2 = False + for member in members: + if member["Id"] == member_id2 and member["Status"] == "DELETED": + foundmember2 = True + foundmember2.should.equal(True) + + +@mock_managedblockchain +def test_create_too_many_members(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create 4 more members - create invitations for 5 + for counter in range(2, 7): + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + for counter in range(2, 6): + # Get the invitation + response = conn.list_invitations() + invitation_id = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + )[0] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember" + str(counter), + "admin", + "Admin12345", + False, + "Test Member " + str(counter), + ), + ) + member_id = response["MemberId"] + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(counter) + helpers.member_id_exist_in_list(members, member_id).should.equal(True) + + # Get member details + response = conn.get_member(NetworkId=network_id, MemberId=member_id) + response["Member"]["Description"].should.equal("Test Member " + str(counter)) + + # Try to create the sixth + response = conn.list_invitations() + invitation_id = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + )[0] + + # Try to create one too many members + response = conn.create_member.when.called_with( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember6", "admin", "Admin12345", False, "Test Member 6" + ), + ).should.throw(Exception, "is the maximum number of members allowed in a",) + + +@mock_managedblockchain +def test_create_another_member_alreadyhave(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Should fail trying to create with same name + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember1", "admin", "Admin12345", False + ), + ).should.throw( + Exception, + "Member name {0} already exists in network {1}".format( + "testmember1", network_id + ), + ) + + +@mock_managedblockchain +def test_create_another_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + InvitationId="id-ABCDEFGHIJKLMNOP0123456789", + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_another_member_badinvitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId="in-ABCDEFGHIJKLMNOP0123456789", + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ).should.throw(Exception, "Invitation in-ABCDEFGHIJKLMNOP0123456789 not valid") + + +@mock_managedblockchain +def test_create_another_member_adminpassword(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + badadminpassmemberconf = helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ) + + # Too short + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badap" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw( + Exception, + "Invalid length for parameter MemberConfiguration.FrameworkConfiguration.Fabric.AdminPassword", + ) + + # No uppercase or numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badadminpwd" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # No lowercase or numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "BADADMINPWD" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # No numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badAdminpwd" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # Invalid character + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badAdmin@pwd1" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_list_members_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_members.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.get_member.when.called_with( + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.delete_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.delete_member.when.called_with( + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.update_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.update_member.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py new file mode 100644 index 000000000..c2a332983 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -0,0 +1,122 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_network(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Find in full list + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(1) + mbcnetworks[0]["Name"].should.equal("testnetwork1") + + # Get network details + response = conn.get_network(NetworkId=network_id) + response["Network"]["Name"].should.equal("testnetwork1") + + +@mock_managedblockchain +def test_create_network_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Find in full list + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(1) + mbcnetworks[0]["Description"].should.equal("Test Network 1") + + # Get network details + response = conn.get_network(NetworkId=network_id) + response["Network"]["Description"].should.equal("Test Network 1") + + +@mock_managedblockchain +def test_create_network_noframework(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_VINYL", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_create_network_badframeworkver(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.X", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ).should.throw( + Exception, "Invalid version 1.X requested for framework HYPERLEDGER_FABRIC" + ) + + +@mock_managedblockchain +def test_create_network_badedition(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + frameworkconfiguration = {"Fabric": {"Edition": "SUPER"}} + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_get_network_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_network.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_nodes.py b/tests/test_managedblockchain/test_managedblockchain_nodes.py new file mode 100644 index 000000000..32a5bc62c --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_nodes.py @@ -0,0 +1,477 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_node(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create a node + response = conn.create_node( + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=helpers.default_nodeconfiguration, + ) + node_id = response["NodeId"] + + # Find node in full list + response = conn.list_nodes(NetworkId=network_id, MemberId=member_id) + nodes = response["Nodes"] + nodes.should.have.length_of(1) + helpers.node_id_exist_in_list(nodes, node_id).should.equal(True) + + # Get node details + response = conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id) + response["Node"]["AvailabilityZone"].should.equal("us-east-1a") + + # Update node + logconfignewenabled = not helpers.default_nodeconfiguration[ + "LogPublishingConfiguration" + ]["Fabric"]["ChaincodeLogs"]["Cloudwatch"]["Enabled"] + logconfignew = { + "Fabric": {"ChaincodeLogs": {"Cloudwatch": {"Enabled": logconfignewenabled}}} + } + conn.update_node( + NetworkId=network_id, + MemberId=member_id, + NodeId=node_id, + LogPublishingConfiguration=logconfignew, + ) + + # Delete node + conn.delete_node( + NetworkId=network_id, MemberId=member_id, NodeId=node_id, + ) + + # Find node in full list + response = conn.list_nodes(NetworkId=network_id, MemberId=member_id) + nodes = response["Nodes"] + nodes.should.have.length_of(1) + helpers.node_id_exist_in_list(nodes, node_id).should.equal(True) + + # Find node in full list - only DELETED + response = conn.list_nodes( + NetworkId=network_id, MemberId=member_id, Status="DELETED" + ) + nodes = response["Nodes"] + nodes.should.have.length_of(1) + helpers.node_id_exist_in_list(nodes, node_id).should.equal(True) + + # But cannot get + response = conn.get_node.when.called_with( + NetworkId=network_id, MemberId=member_id, NodeId=node_id, + ).should.throw(Exception, "Node {0} not found".format(node_id)) + + +@mock_managedblockchain +def test_create_node_standard_edition(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + frameworkconfiguration = {"Fabric": {"Edition": "STANDARD"}} + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Instance type only allowed with standard edition + logconfigbad = dict(helpers.default_nodeconfiguration) + logconfigbad["InstanceType"] = "bc.t3.large" + response = conn.create_node( + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + ) + node_id = response["NodeId"] + + # Get node details + response = conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id) + response["Node"]["InstanceType"].should.equal("bc.t3.large") + + # Need another member so the network does not get deleted + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + + # Remove member 1 - should remove nodes + conn.delete_member(NetworkId=network_id, MemberId=member_id) + + # Should now be an exception + response = conn.list_nodes.when.called_with( + NetworkId=network_id, MemberId=member_id, + ).should.throw(Exception, "Member {0} not found".format(member_id)) + + +@mock_managedblockchain +def test_create_too_many_nodes(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create a node + response = conn.create_node( + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=helpers.default_nodeconfiguration, + ) + + # Create another node + response = conn.create_node( + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=helpers.default_nodeconfiguration, + ) + + # Find node in full list + response = conn.list_nodes(NetworkId=network_id, MemberId=member_id) + nodes = response["Nodes"] + nodes.should.have.length_of(2) + + # Try to create one too many nodes + response = conn.create_node.when.called_with( + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=helpers.default_nodeconfiguration, + ).should.throw( + Exception, "Maximum number of nodes exceeded in member {0}".format(member_id), + ) + + +@mock_managedblockchain +def test_create_node_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_node.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeConfiguration=helpers.default_nodeconfiguration, + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_node_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.create_node.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeConfiguration=helpers.default_nodeconfiguration, + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_node_badnodeconfig(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Incorrect instance type + logconfigbad = dict(helpers.default_nodeconfiguration) + logconfigbad["InstanceType"] = "foo" + response = conn.create_node.when.called_with( + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + ).should.throw(Exception, "Requested instance foo isn't supported.") + + # Incorrect instance type for edition + logconfigbad = dict(helpers.default_nodeconfiguration) + logconfigbad["InstanceType"] = "bc.t3.large" + response = conn.create_node.when.called_with( + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + ).should.throw( + Exception, + "Instance type bc.t3.large is not supported with STARTER Edition networks", + ) + + # Incorrect availability zone + logconfigbad = dict(helpers.default_nodeconfiguration) + logconfigbad["AvailabilityZone"] = "us-east-11" + response = conn.create_node.when.called_with( + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + ).should.throw(Exception, "Availability Zone is not valid") + + +@mock_managedblockchain +def test_list_nodes_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_nodes.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_list_nodes_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.list_nodes.when.called_with( + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_node_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_node.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_node_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.get_node.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_node_badnode(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.get_node.when.called_with( + NetworkId=network_id, + MemberId=member_id, + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_node_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.delete_node.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_node_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.delete_node.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_node_badnode(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.delete_node.when.called_with( + NetworkId=network_id, + MemberId=member_id, + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_node_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.update_node.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_nodeconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_node_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.update_node.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_nodeconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_node_badnode(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.update_node.when.called_with( + NetworkId=network_id, + MemberId=member_id, + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_nodeconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposals.py b/tests/test_managedblockchain/test_managedblockchain_proposals.py new file mode 100644 index 000000000..aa899e3a1 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_proposals.py @@ -0,0 +1,198 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_proposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + proposal_id.should.match("p-[A-Z0-9]{26}") + + # Find in full list + response = conn.list_proposals(NetworkId=network_id) + proposals = response["Proposals"] + proposals.should.have.length_of(1) + proposals[0]["ProposalId"].should.equal(proposal_id) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + + +@mock_managedblockchain +def test_create_proposal_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + Description="Adding a new member", + ) + proposal_id = response["ProposalId"] + proposal_id.should.match("p-[A-Z0-9]{26}") + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Description"].should.equal("Adding a new member") + + +@mock_managedblockchain +def test_create_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + Actions=helpers.default_policy_actions, + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_proposal_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + Actions=helpers.default_policy_actions, + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_proposal_badinvitationacctid(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Must be 12 digits + actions = {"Invitations": [{"Principal": "1234567890"}]} + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, MemberId=member_id, Actions=actions, + ).should.throw(Exception, "Account ID format specified in proposal is not valid") + + +@mock_managedblockchain +def test_create_proposal_badremovalmemid(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Must be 12 digits + actions = {"Removals": [{"MemberId": "m-ABCDEFGHIJKLMNOP0123456789"}]} + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, MemberId=member_id, Actions=actions, + ).should.throw(Exception, "Member ID format specified in proposal is not valid") + + +@mock_managedblockchain +def test_list_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_proposals.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_proposal_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.get_proposal.when.called_with( + NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py new file mode 100644 index 000000000..e8f4043d5 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py @@ -0,0 +1,670 @@ +from __future__ import unicode_literals + +import os + +import boto3 +import sure # noqa +from freezegun import freeze_time +from unittest import SkipTest + +from moto import mock_managedblockchain, settings +from . import helpers + + +@mock_managedblockchain +def test_vote_on_proposal_one_member_total_yes(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # List proposal votes + response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id) + response["ProposalVotes"][0]["MemberId"].should.equal(member_id) + + # Get proposal details - should be APPROVED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("APPROVED") + response["Proposal"]["YesVoteCount"].should.equal(1) + response["Proposal"]["NoVoteCount"].should.equal(0) + response["Proposal"]["OutstandingVoteCount"].should.equal(0) + + +@mock_managedblockchain +def test_vote_on_proposal_one_member_total_no(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote no + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="NO", + ) + + # List proposal votes + response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id) + response["ProposalVotes"][0]["MemberId"].should.equal(member_id) + + # Get proposal details - should be REJECTED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("REJECTED") + response["Proposal"]["YesVoteCount"].should.equal(0) + response["Proposal"]["NoVoteCount"].should.equal(1) + response["Proposal"]["OutstandingVoteCount"].should.equal(0) + + +@mock_managedblockchain +def test_vote_on_proposal_yes_greater_than(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN", + } + } + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote no with member 2 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id2, + Vote="NO", + ) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("REJECTED") + + +@mock_managedblockchain +def test_vote_on_proposal_no_greater_than(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN", + } + } + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote no with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="NO", + ) + + # Vote no with member 2 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id2, + Vote="NO", + ) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("REJECTED") + + +@mock_managedblockchain +def test_vote_on_proposal_expiredproposal(): + if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + raise SkipTest("Cant manipulate time in server mode") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 1, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } + } + + conn = boto3.client("managedblockchain", region_name="us-east-1") + + with freeze_time("2015-01-01 12:00:00"): + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + with freeze_time("2015-02-01 12:00:00"): + # Vote yes - should set status to expired + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ).should.throw( + Exception, + "Proposal {0} is expired and you cannot vote on it.".format(proposal_id), + ) + + # Get proposal details - should be EXPIRED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("EXPIRED") + + +@mock_managedblockchain +def test_vote_on_proposal_status_check(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create 2 more members + for counter in range(2, 4): + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + memberidlist = [None, None, None] + memberidlist[0] = member_id + for counter in range(2, 4): + # Get the invitation + response = conn.list_invitations() + invitation_id = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + )[0] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember" + str(counter), + "admin", + "Admin12345", + False, + "Test Member " + str(counter), + ), + ) + member_id = response["MemberId"] + memberidlist[counter - 1] = member_id + + # Should be no more pending invitations + response = conn.list_invitations() + pendinginvs = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + pendinginvs.should.have.length_of(0) + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=memberidlist[0], + Vote="YES", + ) + + # Vote yes with member 2 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=memberidlist[1], + Vote="YES", + ) + + # Get proposal details - now approved (2 yes, 1 outstanding) + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("APPROVED") + + # Should be one pending invitation + response = conn.list_invitations() + pendinginvs = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + pendinginvs.should.have.length_of(1) + + # Vote with member 3 - should throw an exception and not create a new invitation + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=memberidlist[2], + Vote="YES", + ).should.throw(Exception, "and you cannot vote on it") + + # Should still be one pending invitation + response = conn.list_invitations() + pendinginvs = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + pendinginvs.should.have.length_of(1) + + +@mock_managedblockchain +def test_vote_on_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.vote_on_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badvote(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="FOO", + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_vote_on_proposal_alreadyvoted(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN", + } + } + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Vote yes with member 1 again + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ).should.throw( + Exception, + "Member {0} has already voted on proposal {1}.".format(member_id, proposal_id), + ) + + +@mock_managedblockchain +def test_list_proposal_votes_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_proposal_votes.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_list_proposal_votes_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.list_proposal_votes.when.called_with( + NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_opsworks/__init__.py b/tests/test_opsworks/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_opsworks/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_opsworks/test_apps.py b/tests/test_opsworks/test_apps.py index 417140df2..1d3445c7d 100644 --- a/tests/test_opsworks/test_apps.py +++ b/tests/test_opsworks/test_apps.py @@ -1,76 +1,76 @@ -from __future__ import unicode_literals -import boto3 -from freezegun import freeze_time -import sure # noqa -import re - -from moto import mock_opsworks - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_create_app_response(): - client = boto3.client("opsworks", region_name="us-east-1") - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - - response = client.create_app(StackId=stack_id, Type="other", Name="TestApp") - - response.should.contain("AppId") - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - - response = client.create_app(StackId=second_stack_id, Type="other", Name="TestApp") - - response.should.contain("AppId") - - # ClientError - client.create_app.when.called_with( - StackId=stack_id, Type="other", Name="TestApp" - ).should.throw(Exception, re.compile(r'already an app named "TestApp"')) - - # ClientError - client.create_app.when.called_with( - StackId="nothere", Type="other", Name="TestApp" - ).should.throw(Exception, "nothere") - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_describe_apps(): - client = boto3.client("opsworks", region_name="us-east-1") - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - app_id = client.create_app(StackId=stack_id, Type="other", Name="TestApp")["AppId"] - - rv1 = client.describe_apps(StackId=stack_id) - rv2 = client.describe_apps(AppIds=[app_id]) - rv1["Apps"].should.equal(rv2["Apps"]) - - rv1["Apps"][0]["Name"].should.equal("TestApp") - - # ClientError - client.describe_apps.when.called_with( - StackId=stack_id, AppIds=[app_id] - ).should.throw(Exception, "Please provide one or more app IDs or a stack ID") - # ClientError - client.describe_apps.when.called_with(StackId="nothere").should.throw( - Exception, "Unable to find stack with ID nothere" - ) - # ClientError - client.describe_apps.when.called_with(AppIds=["nothere"]).should.throw( - Exception, "nothere" - ) +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_app_response(): + client = boto3.client("opsworks", region_name="us-east-1") + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + + response = client.create_app(StackId=stack_id, Type="other", Name="TestApp") + + response.should.contain("AppId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + + response = client.create_app(StackId=second_stack_id, Type="other", Name="TestApp") + + response.should.contain("AppId") + + # ClientError + client.create_app.when.called_with( + StackId=stack_id, Type="other", Name="TestApp" + ).should.throw(Exception, re.compile(r'already an app named "TestApp"')) + + # ClientError + client.create_app.when.called_with( + StackId="nothere", Type="other", Name="TestApp" + ).should.throw(Exception, "nothere") + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_apps(): + client = boto3.client("opsworks", region_name="us-east-1") + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + app_id = client.create_app(StackId=stack_id, Type="other", Name="TestApp")["AppId"] + + rv1 = client.describe_apps(StackId=stack_id) + rv2 = client.describe_apps(AppIds=[app_id]) + rv1["Apps"].should.equal(rv2["Apps"]) + + rv1["Apps"][0]["Name"].should.equal("TestApp") + + # ClientError + client.describe_apps.when.called_with( + StackId=stack_id, AppIds=[app_id] + ).should.throw(Exception, "Please provide one or more app IDs or a stack ID") + # ClientError + client.describe_apps.when.called_with(StackId="nothere").should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_apps.when.called_with(AppIds=["nothere"]).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index 55d23f08e..93935d20f 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -1,206 +1,212 @@ -from __future__ import unicode_literals -import boto3 -import sure # noqa - -from moto import mock_opsworks -from moto import mock_ec2 - - -@mock_opsworks -def test_create_instance(): - client = boto3.client("opsworks", region_name="us-east-1") - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - - layer_id = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName", - )["LayerId"] - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - - second_layer_id = client.create_layer( - StackId=second_stack_id, - Type="custom", - Name="SecondTestLayer", - Shortname="SecondTestLayerShortName", - )["LayerId"] - - response = client.create_instance( - StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" - ) - - response.should.contain("InstanceId") - - client.create_instance.when.called_with( - StackId="nothere", LayerIds=[layer_id], InstanceType="t2.micro" - ).should.throw(Exception, "Unable to find stack with ID nothere") - - client.create_instance.when.called_with( - StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" - ).should.throw(Exception, "nothere") - # ClientError - client.create_instance.when.called_with( - StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" - ).should.throw(Exception, "Please only provide layer IDs from the same stack") - # ClientError - client.start_instance.when.called_with(InstanceId="nothere").should.throw( - Exception, "Unable to find instance with ID nothere" - ) - - -@mock_opsworks -def test_describe_instances(): - """ - create two stacks, with 1 layer and 2 layers (S1L1, S2L1, S2L2) - - populate S1L1 with 2 instances (S1L1_i1, S1L1_i2) - populate S2L1 with 1 instance (S2L1_i1) - populate S2L2 with 3 instances (S2L2_i1..2) - """ - - client = boto3.client("opsworks", region_name="us-east-1") - S1 = client.create_stack( - Name="S1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - S1L1 = client.create_layer( - StackId=S1, Type="custom", Name="S1L1", Shortname="S1L1" - )["LayerId"] - S2 = client.create_stack( - Name="S2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - S2L1 = client.create_layer( - StackId=S2, Type="custom", Name="S2L1", Shortname="S2L1" - )["LayerId"] - S2L2 = client.create_layer( - StackId=S2, Type="custom", Name="S2L2", Shortname="S2L2" - )["LayerId"] - - S1L1_i1 = client.create_instance( - StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" - )["InstanceId"] - S1L1_i2 = client.create_instance( - StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" - )["InstanceId"] - S2L1_i1 = client.create_instance( - StackId=S2, LayerIds=[S2L1], InstanceType="t2.micro" - )["InstanceId"] - S2L2_i1 = client.create_instance( - StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" - )["InstanceId"] - S2L2_i2 = client.create_instance( - StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" - )["InstanceId"] - - # instances in Stack 1 - response = client.describe_instances(StackId=S1)["Instances"] - response.should.have.length_of(2) - S1L1_i1.should.be.within([i["InstanceId"] for i in response]) - S1L1_i2.should.be.within([i["InstanceId"] for i in response]) - - response2 = client.describe_instances(InstanceIds=[S1L1_i1, S1L1_i2])["Instances"] - sorted(response2, key=lambda d: d["InstanceId"]).should.equal( - sorted(response, key=lambda d: d["InstanceId"]) - ) - - response3 = client.describe_instances(LayerId=S1L1)["Instances"] - sorted(response3, key=lambda d: d["InstanceId"]).should.equal( - sorted(response, key=lambda d: d["InstanceId"]) - ) - - response = client.describe_instances(StackId=S1)["Instances"] - response.should.have.length_of(2) - S1L1_i1.should.be.within([i["InstanceId"] for i in response]) - S1L1_i2.should.be.within([i["InstanceId"] for i in response]) - - # instances in Stack 2 - response = client.describe_instances(StackId=S2)["Instances"] - response.should.have.length_of(3) - S2L1_i1.should.be.within([i["InstanceId"] for i in response]) - S2L2_i1.should.be.within([i["InstanceId"] for i in response]) - S2L2_i2.should.be.within([i["InstanceId"] for i in response]) - - response = client.describe_instances(LayerId=S2L1)["Instances"] - response.should.have.length_of(1) - S2L1_i1.should.be.within([i["InstanceId"] for i in response]) - - response = client.describe_instances(LayerId=S2L2)["Instances"] - response.should.have.length_of(2) - S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) - - # ClientError - client.describe_instances.when.called_with(StackId=S1, LayerId=S1L1).should.throw( - Exception, "Please provide either one or more" - ) - # ClientError - client.describe_instances.when.called_with(StackId="nothere").should.throw( - Exception, "nothere" - ) - # ClientError - client.describe_instances.when.called_with(LayerId="nothere").should.throw( - Exception, "nothere" - ) - # ClientError - client.describe_instances.when.called_with(InstanceIds=["nothere"]).should.throw( - Exception, "nothere" - ) - - -@mock_opsworks -@mock_ec2 -def test_ec2_integration(): - """ - instances created via OpsWorks should be discoverable via ec2 - """ - - opsworks = boto3.client("opsworks", region_name="us-east-1") - stack_id = opsworks.create_stack( - Name="S1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - - layer_id = opsworks.create_layer( - StackId=stack_id, Type="custom", Name="S1L1", Shortname="S1L1" - )["LayerId"] - - instance_id = opsworks.create_instance( - StackId=stack_id, - LayerIds=[layer_id], - InstanceType="t2.micro", - SshKeyName="testSSH", - )["InstanceId"] - - ec2 = boto3.client("ec2", region_name="us-east-1") - - # Before starting the instance, it shouldn't be discoverable via ec2 - reservations = ec2.describe_instances()["Reservations"] - assert reservations.should.be.empty - - # After starting the instance, it should be discoverable via ec2 - opsworks.start_instance(InstanceId=instance_id) - reservations = ec2.describe_instances()["Reservations"] - reservations[0]["Instances"].should.have.length_of(1) - instance = reservations[0]["Instances"][0] - opsworks_instance = opsworks.describe_instances(StackId=stack_id)["Instances"][0] - - instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"]) - instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"]) +from __future__ import unicode_literals +import boto3 +import sure # noqa + +from moto import mock_opsworks +from moto import mock_ec2 + + +@mock_opsworks +def test_create_instance(): + client = boto3.client("opsworks", region_name="us-east-1") + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + + layer_id = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName", + )["LayerId"] + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + + second_layer_id = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="SecondTestLayer", + Shortname="SecondTestLayerShortName", + )["LayerId"] + + response = client.create_instance( + StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" + ) + + response.should.contain("InstanceId") + + client.create_instance.when.called_with( + StackId="nothere", LayerIds=[layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Unable to find stack with ID nothere") + + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" + ).should.throw(Exception, "nothere") + # ClientError + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Please only provide layer IDs from the same stack") + # ClientError + client.start_instance.when.called_with(InstanceId="nothere").should.throw( + Exception, "Unable to find instance with ID nothere" + ) + + +@mock_opsworks +def test_describe_instances(): + """ + create two stacks, with 1 layer and 2 layers (S1L1, S2L1, S2L2) + + populate S1L1 with 2 instances (S1L1_i1, S1L1_i2) + populate S2L1 with 1 instance (S2L1_i1) + populate S2L2 with 3 instances (S2L2_i1..2) + """ + + client = boto3.client("opsworks", region_name="us-east-1") + S1 = client.create_stack( + Name="S1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + S1L1 = client.create_layer( + StackId=S1, Type="custom", Name="S1L1", Shortname="S1L1" + )["LayerId"] + S2 = client.create_stack( + Name="S2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + S2L1 = client.create_layer( + StackId=S2, Type="custom", Name="S2L1", Shortname="S2L1" + )["LayerId"] + S2L2 = client.create_layer( + StackId=S2, Type="custom", Name="S2L2", Shortname="S2L2" + )["LayerId"] + + S1L1_i1 = client.create_instance( + StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" + )["InstanceId"] + S1L1_i2 = client.create_instance( + StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" + )["InstanceId"] + S2L1_i1 = client.create_instance( + StackId=S2, LayerIds=[S2L1], InstanceType="t2.micro" + )["InstanceId"] + S2L2_i1 = client.create_instance( + StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" + )["InstanceId"] + S2L2_i2 = client.create_instance( + StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" + )["InstanceId"] + + # instances in Stack 1 + response = client.describe_instances(StackId=S1)["Instances"] + response.should.have.length_of(2) + S1L1_i1.should.be.within([i["InstanceId"] for i in response]) + S1L1_i2.should.be.within([i["InstanceId"] for i in response]) + + response2 = client.describe_instances(InstanceIds=[S1L1_i1, S1L1_i2])["Instances"] + sorted(response2, key=lambda d: d["InstanceId"]).should.equal( + sorted(response, key=lambda d: d["InstanceId"]) + ) + + response3 = client.describe_instances(LayerId=S1L1)["Instances"] + sorted(response3, key=lambda d: d["InstanceId"]).should.equal( + sorted(response, key=lambda d: d["InstanceId"]) + ) + + response = client.describe_instances(StackId=S1)["Instances"] + response.should.have.length_of(2) + S1L1_i1.should.be.within([i["InstanceId"] for i in response]) + S1L1_i2.should.be.within([i["InstanceId"] for i in response]) + + # instances in Stack 2 + response = client.describe_instances(StackId=S2)["Instances"] + response.should.have.length_of(3) + S2L1_i1.should.be.within([i["InstanceId"] for i in response]) + S2L2_i1.should.be.within([i["InstanceId"] for i in response]) + S2L2_i2.should.be.within([i["InstanceId"] for i in response]) + + response = client.describe_instances(LayerId=S2L1)["Instances"] + response.should.have.length_of(1) + S2L1_i1.should.be.within([i["InstanceId"] for i in response]) + + response = client.describe_instances(LayerId=S2L2)["Instances"] + response.should.have.length_of(2) + S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) + + # ClientError + client.describe_instances.when.called_with(StackId=S1, LayerId=S1L1).should.throw( + Exception, "Please provide either one or more" + ) + # ClientError + client.describe_instances.when.called_with(StackId="nothere").should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with(LayerId="nothere").should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with(InstanceIds=["nothere"]).should.throw( + Exception, "nothere" + ) + + +@mock_opsworks +@mock_ec2 +def test_ec2_integration(): + """ + instances created via OpsWorks should be discoverable via ec2 + """ + + opsworks = boto3.client("opsworks", region_name="us-east-1") + stack_id = opsworks.create_stack( + Name="S1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + + layer_id = opsworks.create_layer( + StackId=stack_id, Type="custom", Name="S1L1", Shortname="S1L1" + )["LayerId"] + + instance_id = opsworks.create_instance( + StackId=stack_id, + LayerIds=[layer_id], + InstanceType="t2.micro", + SshKeyName="testSSH", + )["InstanceId"] + + ec2 = boto3.client("ec2", region_name="us-east-1") + + # Before starting the instance, it shouldn't be discoverable via ec2 + reservations = ec2.describe_instances()["Reservations"] + assert reservations.should.be.empty + + # Before starting the instance, its status should be "stopped" + opsworks_instance = opsworks.describe_instances(StackId=stack_id)["Instances"][0] + opsworks_instance["Status"].should.equal("stopped") + + # After starting the instance, it should be discoverable via ec2 + opsworks.start_instance(InstanceId=instance_id) + reservations = ec2.describe_instances()["Reservations"] + reservations[0]["Instances"].should.have.length_of(1) + instance = reservations[0]["Instances"][0] + opsworks_instance = opsworks.describe_instances(StackId=stack_id)["Instances"][0] + + instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"]) + instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"]) + # After starting the instance, its status should be "online" + opsworks_instance["Status"].should.equal("online") diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 850666381..08d5a1ce4 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -1,96 +1,96 @@ -from __future__ import unicode_literals -import boto3 -from freezegun import freeze_time -import sure # noqa -import re - -from moto import mock_opsworks - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_create_layer_response(): - client = boto3.client("opsworks", region_name="us-east-1") - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - - response = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName", - ) - - response.should.contain("LayerId") - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - - response = client.create_layer( - StackId=second_stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName", - ) - - response.should.contain("LayerId") - - # ClientError - client.create_layer.when.called_with( - StackId=stack_id, Type="custom", Name="TestLayer", Shortname="_" - ).should.throw(Exception, re.compile(r'already a layer named "TestLayer"')) - # ClientError - client.create_layer.when.called_with( - StackId=stack_id, Type="custom", Name="_", Shortname="TestLayerShortName" - ).should.throw( - Exception, re.compile(r'already a layer with shortname "TestLayerShortName"') - ) - # ClientError - client.create_layer.when.called_with( - StackId="nothere", Type="custom", Name="TestLayer", Shortname="_" - ).should.throw(Exception, "nothere") - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_describe_layers(): - client = boto3.client("opsworks", region_name="us-east-1") - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn", - )["StackId"] - layer_id = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName", - )["LayerId"] - - rv1 = client.describe_layers(StackId=stack_id) - rv2 = client.describe_layers(LayerIds=[layer_id]) - rv1["Layers"].should.equal(rv2["Layers"]) - - rv1["Layers"][0]["Name"].should.equal("TestLayer") - - # ClientError - client.describe_layers.when.called_with( - StackId=stack_id, LayerIds=[layer_id] - ).should.throw(Exception, "Please provide one or more layer IDs or a stack ID") - # ClientError - client.describe_layers.when.called_with(StackId="nothere").should.throw( - Exception, "Unable to find stack with ID nothere" - ) - # ClientError - client.describe_layers.when.called_with(LayerIds=["nothere"]).should.throw( - Exception, "nothere" - ) +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_layer_response(): + client = boto3.client("opsworks", region_name="us-east-1") + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + + response = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName", + ) + + response.should.contain("LayerId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + + response = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName", + ) + + response.should.contain("LayerId") + + # ClientError + client.create_layer.when.called_with( + StackId=stack_id, Type="custom", Name="TestLayer", Shortname="_" + ).should.throw(Exception, re.compile(r'already a layer named "TestLayer"')) + # ClientError + client.create_layer.when.called_with( + StackId=stack_id, Type="custom", Name="_", Shortname="TestLayerShortName" + ).should.throw( + Exception, re.compile(r'already a layer with shortname "TestLayerShortName"') + ) + # ClientError + client.create_layer.when.called_with( + StackId="nothere", Type="custom", Name="TestLayer", Shortname="_" + ).should.throw(Exception, "nothere") + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_layers(): + client = boto3.client("opsworks", region_name="us-east-1") + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn", + )["StackId"] + layer_id = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName", + )["LayerId"] + + rv1 = client.describe_layers(StackId=stack_id) + rv2 = client.describe_layers(LayerIds=[layer_id]) + rv1["Layers"].should.equal(rv2["Layers"]) + + rv1["Layers"][0]["Name"].should.equal("TestLayer") + + # ClientError + client.describe_layers.when.called_with( + StackId=stack_id, LayerIds=[layer_id] + ).should.throw(Exception, "Please provide one or more layer IDs or a stack ID") + # ClientError + client.describe_layers.when.called_with(StackId="nothere").should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_layers.when.called_with(LayerIds=["nothere"]).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py index 12189c530..4c26d788d 100644 --- a/tests/test_organizations/organizations_test_utils.py +++ b/tests/test_organizations/organizations_test_utils.py @@ -31,9 +31,9 @@ def test_make_random_create_account_status_id(): create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) -def test_make_random_service_control_policy_id(): - service_control_policy_id = utils.make_random_service_control_policy_id() - service_control_policy_id.should.match(utils.SCP_ID_REGEX) +def test_make_random_policy_id(): + policy_id = utils.make_random_policy_id() + policy_id.should.match(utils.POLICY_ID_REGEX) def validate_organization(response): @@ -128,7 +128,7 @@ def validate_create_account_status(create_status): def validate_policy_summary(org, summary): summary.should.be.a(dict) - summary.should.have.key("Id").should.match(utils.SCP_ID_REGEX) + summary.should.have.key("Id").should.match(utils.POLICY_ID_REGEX) summary.should.have.key("Arn").should.equal( utils.SCP_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], summary["Id"]) ) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 876e83712..07cd3afa6 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -1,13 +1,16 @@ from __future__ import unicode_literals +from datetime import datetime + import boto3 import json import six import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_organizations +from moto.core import ACCOUNT_ID from moto.organizations import utils from .organizations_test_utils import ( validate_organization, @@ -58,12 +61,15 @@ def test_describe_organization(): @mock_organizations def test_describe_organization_exception(): client = boto3.client("organizations", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_organization() - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeOrganization") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("AWSOrganizationsNotInUseException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AWSOrganizationsNotInUseException") + ex.response["Error"]["Message"].should.equal( + "Your account is not a member of an organization." + ) # Organizational Units @@ -104,11 +110,11 @@ def test_describe_organizational_unit(): def test_describe_organizational_unit_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_organizational_unit( OrganizationalUnitId=utils.make_random_root_id() ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeOrganizationalUnit") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( @@ -133,11 +139,11 @@ def test_list_organizational_units_for_parent(): @mock_organizations def test_list_organizational_units_for_parent_exception(): client = boto3.client("organizations", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_organizational_units_for_parent( ParentId=utils.make_random_root_id() ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListOrganizationalUnitsForParent") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("ParentNotFoundException") @@ -187,12 +193,15 @@ def test_describe_account(): @mock_organizations def test_describe_account_exception(): client = boto3.client("organizations", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_account(AccountId=utils.make_random_account_id()) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeAccount") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("AccountNotFoundException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) @mock_organizations @@ -326,20 +335,21 @@ def test_list_children_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] root_id = client.list_roots()["Roots"][0]["Id"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_children( ParentId=utils.make_random_root_id(), ChildType="ACCOUNT" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListChildren") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("ParentNotFoundException") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_children(ParentId=root_id, ChildType="BLEE") - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListChildren") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") # Service Control Policies @@ -369,6 +379,30 @@ def test_create_policy(): policy["Content"].should.equal(json.dumps(policy_doc01)) +@mock_organizations +def test_create_policy_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # invalid policy type + # when + with pytest.raises(ClientError) as e: + client.create_policy( + Content=json.dumps(policy_doc01), + Description="moto", + Name="moto", + Type="MOTO", + ) + + # then + ex = e.value + ex.operation_name.should.equal("CreatePolicy") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + @mock_organizations def test_describe_policy(): client = boto3.client("organizations", region_name="us-east-1") @@ -393,18 +427,19 @@ def test_describe_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL")["Organization"] policy_id = "p-47fhe9s3" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_policy(PolicyId=policy_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribePolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_policy(PolicyId="meaninglessstring") - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribePolicy") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") @mock_organizations @@ -432,6 +467,189 @@ def test_attach_policy(): response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) +@mock_organizations +def test_detach_policy(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_id = client.create_organizational_unit(ParentId=root_id, Name="ou01")[ + "OrganizationalUnit" + ]["Id"] + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.detach_policy(PolicyId=policy_id, TargetId=ou_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.detach_policy(PolicyId=policy_id, TargetId=root_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.detach_policy(PolicyId=policy_id, TargetId=account_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_organizations +def test_detach_policy_root_ou_not_found_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_id = client.create_organizational_unit(ParentId=root_id, Name="ou01")[ + "OrganizationalUnit" + ]["Id"] + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + with pytest.raises(ClientError) as e: + response = client.detach_policy(PolicyId=policy_id, TargetId="r-xy85") + ex = e.value + ex.operation_name.should.equal("DetachPolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain( + "OrganizationalUnitNotFoundException" + ) + + +@mock_organizations +def test_detach_policy_ou_not_found_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_id = client.create_organizational_unit(ParentId=root_id, Name="ou01")[ + "OrganizationalUnit" + ]["Id"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + with pytest.raises(ClientError) as e: + response = client.detach_policy( + PolicyId=policy_id, TargetId="ou-zx86-z3x4yr2t7" + ) + ex = e.value + ex.operation_name.should.equal("DetachPolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain( + "OrganizationalUnitNotFoundException" + ) + + +@mock_organizations +def test_detach_policy_account_id_not_found_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + with pytest.raises(ClientError) as e: + response = client.detach_policy(PolicyId=policy_id, TargetId="111619863336") + ex = e.value + ex.operation_name.should.equal("DetachPolicy") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) + + +@mock_organizations +def test_detach_policy_invalid_target_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_id = client.create_organizational_unit(ParentId=root_id, Name="ou01")[ + "OrganizationalUnit" + ]["Id"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + with pytest.raises(ClientError) as e: + response = client.detach_policy(PolicyId=policy_id, TargetId="invalidtargetid") + ex = e.value + ex.operation_name.should.equal("DetachPolicy") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + +@mock_organizations +def test_delete_policy(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + base_policies = client.list_policies(Filter="SERVICE_CONTROL_POLICY")["Policies"] + base_policies.should.have.length_of(1) + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + new_policies = client.list_policies(Filter="SERVICE_CONTROL_POLICY")["Policies"] + new_policies.should.have.length_of(2) + response = client.delete_policy(PolicyId=policy_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + new_policies = client.list_policies(Filter="SERVICE_CONTROL_POLICY")["Policies"] + new_policies.should.equal(base_policies) + new_policies.should.have.length_of(1) + + +@mock_organizations +def test_delete_policy_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + non_existent_policy_id = utils.make_random_policy_id() + with pytest.raises(ClientError) as e: + response = client.delete_policy(PolicyId=non_existent_policy_id) + ex = e.value + ex.operation_name.should.equal("DeletePolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") + + # Attempt to delete an attached policy + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + root_id = client.list_roots()["Roots"][0]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + with pytest.raises(ClientError) as e: + response = client.delete_policy(PolicyId=policy_id) + ex = e.value + ex.operation_name.should.equal("DeletePolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain("PolicyInUseException") + + @mock_organizations def test_attach_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") @@ -445,36 +663,78 @@ def test_attach_policy_exception(): Name="MockServiceControlPolicy", Type="SERVICE_CONTROL_POLICY", )["Policy"]["PolicySummary"]["Id"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AttachPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( "OrganizationalUnitNotFoundException" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AttachPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( "OrganizationalUnitNotFoundException" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AttachPolicy") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("AccountNotFoundException") - with assert_raises(ClientError) as e: + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) + with pytest.raises(ClientError) as e: response = client.attach_policy( PolicyId=policy_id, TargetId="meaninglessstring" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AttachPolicy") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + +@mock_organizations +def test_update_policy(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + + policy_dict = dict( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + ) + policy_id = client.create_policy(**policy_dict)["Policy"]["PolicySummary"]["Id"] + + for key in ("Description", "Name"): + response = client.update_policy(**{"PolicyId": policy_id, key: "foobar"}) + policy = client.describe_policy(PolicyId=policy_id) + policy["Policy"]["PolicySummary"][key].should.equal("foobar") + validate_service_control_policy(org, response["Policy"]) + + response = client.update_policy(PolicyId=policy_id, Content="foobar") + policy = client.describe_policy(PolicyId=policy_id) + policy["Policy"]["Content"].should.equal("foobar") + validate_service_control_policy(org, response["Policy"]) + + +@mock_organizations +def test_update_policy_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + non_existent_policy_id = utils.make_random_policy_id() + with pytest.raises(ClientError) as e: + response = client.update_policy(PolicyId=non_existent_policy_id) + ex = e.value + ex.operation_name.should.equal("UpdatePolicy") ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") @mock_organizations @@ -528,34 +788,67 @@ def test_list_policies_for_target(): def test_list_policies_for_target_exception(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] ou_id = "ou-gi99-i7r8eh2i2" account_id = "126644886543" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_policies_for_target( TargetId=ou_id, Filter="SERVICE_CONTROL_POLICY" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListPoliciesForTarget") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( "OrganizationalUnitNotFoundException" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_policies_for_target( TargetId=account_id, Filter="SERVICE_CONTROL_POLICY" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListPoliciesForTarget") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("AccountNotFoundException") - with assert_raises(ClientError) as e: + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) + with pytest.raises(ClientError) as e: response = client.list_policies_for_target( TargetId="meaninglessstring", Filter="SERVICE_CONTROL_POLICY" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListPoliciesForTarget") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + # not existing root + # when + with pytest.raises(ClientError) as e: + client.list_policies_for_target( + TargetId="r-0000", Filter="SERVICE_CONTROL_POLICY" + ) + + # then + ex = e.value + ex.operation_name.should.equal("ListPoliciesForTarget") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TargetNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a target that doesn't exist." + ) + + # invalid policy type + # when + with pytest.raises(ClientError) as e: + client.list_policies_for_target(TargetId=root_id, Filter="MOTO") + + # then + ex = e.value + ex.operation_name.should.equal("ListPoliciesForTarget") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") @mock_organizations @@ -594,18 +887,19 @@ def test_list_targets_for_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL")["Organization"] policy_id = "p-47fhe9s3" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_targets_for_policy(PolicyId=policy_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListTargetsForPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_targets_for_policy(PolicyId="meaninglessstring") - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListTargetsForPolicy") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") @mock_organizations @@ -635,11 +929,11 @@ def test_tag_resource_errors(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.tag_resource( - ResourceId="000000000000", Tags=[{"Key": "key", "Value": "value"},] + ResourceId="000000000000", Tags=[{"Key": "key", "Value": "value"},], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -667,9 +961,9 @@ def test_list_tags_for_resource_errors(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_tags_for_resource(ResourceId="000000000000") - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListTagsForResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -704,9 +998,9 @@ def test_untag_resource_errors(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.untag_resource(ResourceId="000000000000", TagKeys=["key"]) - ex = e.exception + ex = e.value ex.operation_name.should.equal("UntagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -741,13 +1035,667 @@ def test_update_organizational_unit_duplicate_error(): response = client.create_organizational_unit(ParentId=root_id, Name=ou_name) validate_organizational_unit(org, response) response["OrganizationalUnit"]["Name"].should.equal(ou_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.update_organizational_unit( OrganizationalUnitId=response["OrganizationalUnit"]["Id"], Name=ou_name ) - exc = e.exception + exc = e.value exc.operation_name.should.equal("UpdateOrganizationalUnit") exc.response["Error"]["Code"].should.contain("DuplicateOrganizationalUnitException") exc.response["Error"]["Message"].should.equal( "An OU with the same name already exists." ) + + +@mock_organizations +def test_enable_aws_service_access(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # when + client.enable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # then + response = client.list_aws_service_access_for_organization() + response["EnabledServicePrincipals"].should.have.length_of(1) + service = response["EnabledServicePrincipals"][0] + service["ServicePrincipal"].should.equal("config.amazonaws.com") + date_enabled = service["DateEnabled"] + date_enabled["DateEnabled"].should.be.a(datetime) + + # enabling the same service again should not result in any error or change + # when + client.enable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # then + response = client.list_aws_service_access_for_organization() + response["EnabledServicePrincipals"].should.have.length_of(1) + service = response["EnabledServicePrincipals"][0] + service["ServicePrincipal"].should.equal("config.amazonaws.com") + service["DateEnabled"].should.equal(date_enabled) + + +@mock_organizations +def test_enable_aws_service_access(): + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + with pytest.raises(ClientError) as e: + client.enable_aws_service_access(ServicePrincipal="moto.amazonaws.com") + ex = e.value + ex.operation_name.should.equal("EnableAWSServiceAccess") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) + + +@mock_organizations +def test_enable_aws_service_access(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + client.enable_aws_service_access(ServicePrincipal="config.amazonaws.com") + client.enable_aws_service_access(ServicePrincipal="ram.amazonaws.com") + + # when + response = client.list_aws_service_access_for_organization() + + # then + response["EnabledServicePrincipals"].should.have.length_of(2) + services = sorted( + response["EnabledServicePrincipals"], key=lambda i: i["ServicePrincipal"] + ) + services[0]["ServicePrincipal"].should.equal("config.amazonaws.com") + services[0]["DateEnabled"].should.be.a(datetime) + services[1]["ServicePrincipal"].should.equal("ram.amazonaws.com") + services[1]["DateEnabled"].should.be.a(datetime) + + +@mock_organizations +def test_disable_aws_service_access(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + client.enable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # when + client.disable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # then + response = client.list_aws_service_access_for_organization() + response["EnabledServicePrincipals"].should.have.length_of(0) + + # disabling the same service again should not result in any error + # when + client.disable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # then + response = client.list_aws_service_access_for_organization() + response["EnabledServicePrincipals"].should.have.length_of(0) + + +@mock_organizations +def test_disable_aws_service_access_errors(): + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + with pytest.raises(ClientError) as e: + client.disable_aws_service_access(ServicePrincipal="moto.amazonaws.com") + ex = e.value + ex.operation_name.should.equal("DisableAWSServiceAccess") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) + + +@mock_organizations +def test_register_delegated_administrator(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org_id = client.create_organization(FeatureSet="ALL")["Organization"]["Id"] + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + + # when + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + response = client.list_delegated_administrators() + response["DelegatedAdministrators"].should.have.length_of(1) + admin = response["DelegatedAdministrators"][0] + admin["Id"].should.equal(account_id) + admin["Arn"].should.equal( + "arn:aws:organizations::{0}:account/{1}/{2}".format( + ACCOUNT_ID, org_id, account_id + ) + ) + admin["Email"].should.equal(mockemail) + admin["Name"].should.equal(mockname) + admin["Status"].should.equal("ACTIVE") + admin["JoinedMethod"].should.equal("CREATED") + admin["JoinedTimestamp"].should.be.a(datetime) + admin["DelegationEnabledDate"].should.be.a(datetime) + + +@mock_organizations +def test_register_delegated_administrator_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # register master Account + # when + with pytest.raises(ClientError) as e: + client.register_delegated_administrator( + AccountId=ACCOUNT_ID, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.value + ex.operation_name.should.equal("RegisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ConstraintViolationException") + ex.response["Error"]["Message"].should.equal( + "You cannot register master account/yourself as delegated administrator for your organization." + ) + + # register not existing Account + # when + with pytest.raises(ClientError) as e: + client.register_delegated_administrator( + AccountId="000000000000", ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.value + ex.operation_name.should.equal("RegisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) + + # register not supported service + # when + with pytest.raises(ClientError) as e: + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="moto.amazonaws.com" + ) + + # then + ex = e.value + ex.operation_name.should.equal("RegisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) + + # register service again + # when + with pytest.raises(ClientError) as e: + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.value + ex.operation_name.should.equal("RegisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountAlreadyRegisteredException") + ex.response["Error"]["Message"].should.equal( + "The provided account is already a delegated administrator for your organization." + ) + + +@mock_organizations +def test_list_delegated_administrators(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org_id = client.create_organization(FeatureSet="ALL")["Organization"]["Id"] + account_id_1 = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + account_id_2 = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + client.register_delegated_administrator( + AccountId=account_id_1, ServicePrincipal="ssm.amazonaws.com" + ) + client.register_delegated_administrator( + AccountId=account_id_2, ServicePrincipal="guardduty.amazonaws.com" + ) + + # when + response = client.list_delegated_administrators() + + # then + response["DelegatedAdministrators"].should.have.length_of(2) + sorted([admin["Id"] for admin in response["DelegatedAdministrators"]]).should.equal( + sorted([account_id_1, account_id_2]) + ) + + # when + response = client.list_delegated_administrators( + ServicePrincipal="ssm.amazonaws.com" + ) + + # then + response["DelegatedAdministrators"].should.have.length_of(1) + admin = response["DelegatedAdministrators"][0] + admin["Id"].should.equal(account_id_1) + admin["Arn"].should.equal( + "arn:aws:organizations::{0}:account/{1}/{2}".format( + ACCOUNT_ID, org_id, account_id_1 + ) + ) + admin["Email"].should.equal(mockemail) + admin["Name"].should.equal(mockname) + admin["Status"].should.equal("ACTIVE") + admin["JoinedMethod"].should.equal("CREATED") + admin["JoinedTimestamp"].should.be.a(datetime) + admin["DelegationEnabledDate"].should.be.a(datetime) + + +@mock_organizations +def test_list_delegated_administrators_erros(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # list not supported service + # when + with pytest.raises(ClientError) as e: + client.list_delegated_administrators(ServicePrincipal="moto.amazonaws.com") + + # then + ex = e.value + ex.operation_name.should.equal("ListDelegatedAdministrators") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) + + +@mock_organizations +def test_list_delegated_services_for_account(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="guardduty.amazonaws.com" + ) + + # when + response = client.list_delegated_services_for_account(AccountId=account_id) + + # then + response["DelegatedServices"].should.have.length_of(2) + sorted( + [service["ServicePrincipal"] for service in response["DelegatedServices"]] + ).should.equal(["guardduty.amazonaws.com", "ssm.amazonaws.com"]) + + +@mock_organizations +def test_list_delegated_services_for_account_erros(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # list services for not existing Account + # when + with pytest.raises(ClientError) as e: + client.list_delegated_services_for_account(AccountId="000000000000") + + # then + ex = e.value + ex.operation_name.should.equal("ListDelegatedServicesForAccount") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AWSOrganizationsNotInUseException") + ex.response["Error"]["Message"].should.equal( + "Your account is not a member of an organization." + ) + + # list services for not registered Account + # when + with pytest.raises(ClientError) as e: + client.list_delegated_services_for_account(AccountId=ACCOUNT_ID) + + # then + ex = e.value + ex.operation_name.should.equal("ListDelegatedServicesForAccount") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotRegisteredException") + ex.response["Error"]["Message"].should.equal( + "The provided account is not a registered delegated administrator for your organization." + ) + + +@mock_organizations +def test_deregister_delegated_administrator(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # when + client.deregister_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + response = client.list_delegated_administrators() + response["DelegatedAdministrators"].should.have.length_of(0) + + +@mock_organizations +def test_deregister_delegated_administrator_erros(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + + # deregister master Account + # when + with pytest.raises(ClientError) as e: + client.deregister_delegated_administrator( + AccountId=ACCOUNT_ID, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.value + ex.operation_name.should.equal("DeregisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ConstraintViolationException") + ex.response["Error"]["Message"].should.equal( + "You cannot register master account/yourself as delegated administrator for your organization." + ) + + # deregister not existing Account + # when + with pytest.raises(ClientError) as e: + client.deregister_delegated_administrator( + AccountId="000000000000", ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.value + ex.operation_name.should.equal("DeregisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) + + # deregister not registered Account + # when + with pytest.raises(ClientError) as e: + client.deregister_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.value + ex.operation_name.should.equal("DeregisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotRegisteredException") + ex.response["Error"]["Message"].should.equal( + "The provided account is not a registered delegated administrator for your organization." + ) + + # given + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # deregister not registered service + # when + with pytest.raises(ClientError) as e: + client.deregister_delegated_administrator( + AccountId=account_id, ServicePrincipal="guardduty.amazonaws.com" + ) + + # then + ex = e.value + ex.operation_name.should.equal("DeregisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) + + +@mock_organizations +def test_enable_policy_type(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + + # when + response = client.enable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + root = response["Root"] + root["Id"].should.equal(root_id) + root["Arn"].should.equal( + utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root_id) + ) + root["Name"].should.equal("Root") + sorted(root["PolicyTypes"], key=lambda x: x["Type"]).should.equal( + [ + {"Type": "AISERVICES_OPT_OUT_POLICY", "Status": "ENABLED"}, + {"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}, + ] + ) + + +@mock_organizations +def test_enable_policy_type_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + root_id = client.list_roots()["Roots"][0]["Id"] + + # not existing root + # when + with pytest.raises(ClientError) as e: + client.enable_policy_type( + RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.value + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("RootNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a root that doesn't exist." + ) + + # enable policy again ('SERVICE_CONTROL_POLICY' is enabled by default) + # when + with pytest.raises(ClientError) as e: + client.enable_policy_type(RootId=root_id, PolicyType="SERVICE_CONTROL_POLICY") + + # then + ex = e.value + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("PolicyTypeAlreadyEnabledException") + ex.response["Error"]["Message"].should.equal( + "The specified policy type is already enabled." + ) + + # invalid policy type + # when + with pytest.raises(ClientError) as e: + client.enable_policy_type(RootId=root_id, PolicyType="MOTO") + + # then + ex = e.value + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + +@mock_organizations +def test_disable_policy_type(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + client.enable_policy_type(RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY") + + # when + response = client.disable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + root = response["Root"] + root["Id"].should.equal(root_id) + root["Arn"].should.equal( + utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root_id) + ) + root["Name"].should.equal("Root") + root["PolicyTypes"].should.equal( + [{"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}] + ) + + +@mock_organizations +def test_disable_policy_type_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + root_id = client.list_roots()["Roots"][0]["Id"] + + # not existing root + # when + with pytest.raises(ClientError) as e: + client.disable_policy_type( + RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.value + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("RootNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a root that doesn't exist." + ) + + # disable not enabled policy + # when + with pytest.raises(ClientError) as e: + client.disable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.value + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("PolicyTypeNotEnabledException") + ex.response["Error"]["Message"].should.equal( + "This operation can be performed only for enabled policy types." + ) + + # invalid policy type + # when + with pytest.raises(ClientError) as e: + client.disable_policy_type(RootId=root_id, PolicyType="MOTO") + + # then + ex = e.value + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + +@mock_organizations +def test_aiservices_opt_out_policy(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + client.enable_policy_type(RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY") + ai_policy = { + "services": { + "@@operators_allowed_for_child_policies": ["@@none"], + "default": { + "@@operators_allowed_for_child_policies": ["@@none"], + "opt_out_policy": { + "@@operators_allowed_for_child_policies": ["@@none"], + "@@assign": "optOut", + }, + }, + } + } + + # when + response = client.create_policy( + Content=json.dumps(ai_policy), + Description="Opt out of all AI services", + Name="ai-opt-out", + Type="AISERVICES_OPT_OUT_POLICY", + ) + + # then + summary = response["Policy"]["PolicySummary"] + policy_id = summary["Id"] + summary["Id"].should.match(utils.POLICY_ID_REGEX) + summary["Arn"].should.equal( + utils.AI_POLICY_ARN_FORMAT.format( + org["MasterAccountId"], org["Id"], summary["Id"] + ) + ) + summary["Name"].should.equal("ai-opt-out") + summary["Description"].should.equal("Opt out of all AI services") + summary["Type"].should.equal("AISERVICES_OPT_OUT_POLICY") + summary["AwsManaged"].should_not.be.ok + json.loads(response["Policy"]["Content"]).should.equal(ai_policy) + + # when + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + + # then + response = client.list_policies_for_target( + TargetId=root_id, Filter="AISERVICES_OPT_OUT_POLICY" + ) + response["Policies"].should.have.length_of(1) + response["Policies"][0]["Id"].should.equal(policy_id) diff --git a/tests/test_packages/__init__.py b/tests/test_packages/__init__.py index 05b1d476b..01fe5ab1f 100644 --- a/tests/test_packages/__init__.py +++ b/tests/test_packages/__init__.py @@ -6,4 +6,3 @@ import logging logging.getLogger("boto").setLevel(logging.CRITICAL) logging.getLogger("boto3").setLevel(logging.CRITICAL) logging.getLogger("botocore").setLevel(logging.CRITICAL) -logging.getLogger("nose").setLevel(logging.CRITICAL) diff --git a/tests/test_polly/__init__.py b/tests/test_polly/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_polly/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_polly/test_polly.py b/tests/test_polly/test_polly.py index e172b98d0..6c99d0538 100644 --- a/tests/test_polly/test_polly.py +++ b/tests/test_polly/test_polly.py @@ -1,263 +1,263 @@ -from __future__ import unicode_literals - -from botocore.exceptions import ClientError -import boto3 -import sure # noqa -from nose.tools import assert_raises -from moto import mock_polly - -# Polly only available in a few regions -DEFAULT_REGION = "eu-west-1" - -LEXICON_XML = """ - - - W3C - World Wide Web Consortium - -""" - - -@mock_polly -def test_describe_voices(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - - resp = client.describe_voices() - len(resp["Voices"]).should.be.greater_than(1) - - resp = client.describe_voices(LanguageCode="en-GB") - len(resp["Voices"]).should.equal(3) - - try: - client.describe_voices(LanguageCode="SOME_LANGUAGE") - except ClientError as err: - err.response["Error"]["Code"].should.equal("400") - else: - raise RuntimeError("Should of raised an exception") - - -@mock_polly -def test_put_list_lexicon(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon(Name="test", Content=LEXICON_XML) - - resp = client.list_lexicons() - len(resp["Lexicons"]).should.equal(1) - - -@mock_polly -def test_put_get_lexicon(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon(Name="test", Content=LEXICON_XML) - - resp = client.get_lexicon(Name="test") - resp.should.contain("Lexicon") - resp.should.contain("LexiconAttributes") - - -@mock_polly -def test_put_lexicon_bad_name(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - - try: - client.put_lexicon(Name="test-invalid", Content=LEXICON_XML) - except ClientError as err: - err.response["Error"]["Code"].should.equal("InvalidParameterValue") - else: - raise RuntimeError("Should of raised an exception") - - -@mock_polly -def test_synthesize_speech(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon(Name="test", Content=LEXICON_XML) - - tests = (("pcm", "audio/pcm"), ("mp3", "audio/mpeg"), ("ogg_vorbis", "audio/ogg")) - for output_format, content_type in tests: - resp = client.synthesize_speech( - LexiconNames=["test"], - OutputFormat=output_format, - SampleRate="16000", - Text="test1234", - TextType="text", - VoiceId="Astrid", - ) - resp["ContentType"].should.equal(content_type) - - -@mock_polly -def test_synthesize_speech_bad_lexicon(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - client.put_lexicon(Name="test", Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=["test2"], - OutputFormat="pcm", - SampleRate="16000", - Text="test1234", - TextType="text", - VoiceId="Astrid", - ) - except ClientError as err: - err.response["Error"]["Code"].should.equal("LexiconNotFoundException") - else: - raise RuntimeError("Should of raised LexiconNotFoundException") - - -@mock_polly -def test_synthesize_speech_bad_output_format(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - client.put_lexicon(Name="test", Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=["test"], - OutputFormat="invalid", - SampleRate="16000", - Text="test1234", - TextType="text", - VoiceId="Astrid", - ) - except ClientError as err: - err.response["Error"]["Code"].should.equal("InvalidParameterValue") - else: - raise RuntimeError("Should of raised ") - - -@mock_polly -def test_synthesize_speech_bad_sample_rate(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - client.put_lexicon(Name="test", Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=["test"], - OutputFormat="pcm", - SampleRate="18000", - Text="test1234", - TextType="text", - VoiceId="Astrid", - ) - except ClientError as err: - err.response["Error"]["Code"].should.equal("InvalidSampleRateException") - else: - raise RuntimeError("Should of raised ") - - -@mock_polly -def test_synthesize_speech_bad_text_type(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - client.put_lexicon(Name="test", Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=["test"], - OutputFormat="pcm", - SampleRate="16000", - Text="test1234", - TextType="invalid", - VoiceId="Astrid", - ) - except ClientError as err: - err.response["Error"]["Code"].should.equal("InvalidParameterValue") - else: - raise RuntimeError("Should of raised ") - - -@mock_polly -def test_synthesize_speech_bad_voice_id(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - client.put_lexicon(Name="test", Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=["test"], - OutputFormat="pcm", - SampleRate="16000", - Text="test1234", - TextType="text", - VoiceId="Luke", - ) - except ClientError as err: - err.response["Error"]["Code"].should.equal("InvalidParameterValue") - else: - raise RuntimeError("Should of raised ") - - -@mock_polly -def test_synthesize_speech_text_too_long(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - client.put_lexicon(Name="test", Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=["test"], - OutputFormat="pcm", - SampleRate="16000", - Text="test1234" * 376, # = 3008 characters - TextType="text", - VoiceId="Astrid", - ) - except ClientError as err: - err.response["Error"]["Code"].should.equal("TextLengthExceededException") - else: - raise RuntimeError("Should of raised ") - - -@mock_polly -def test_synthesize_speech_bad_speech_marks1(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - client.put_lexicon(Name="test", Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=["test"], - OutputFormat="pcm", - SampleRate="16000", - Text="test1234", - TextType="text", - SpeechMarkTypes=["word"], - VoiceId="Astrid", - ) - except ClientError as err: - err.response["Error"]["Code"].should.equal( - "MarksNotSupportedForFormatException" - ) - else: - raise RuntimeError("Should of raised ") - - -@mock_polly -def test_synthesize_speech_bad_speech_marks2(): - client = boto3.client("polly", region_name=DEFAULT_REGION) - client.put_lexicon(Name="test", Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=["test"], - OutputFormat="pcm", - SampleRate="16000", - Text="test1234", - TextType="ssml", - SpeechMarkTypes=["word"], - VoiceId="Astrid", - ) - except ClientError as err: - err.response["Error"]["Code"].should.equal( - "MarksNotSupportedForFormatException" - ) - else: - raise RuntimeError("Should of raised ") +from __future__ import unicode_literals + +from botocore.exceptions import ClientError +import boto3 +import sure # noqa +import pytest +from moto import mock_polly + +# Polly only available in a few regions +DEFAULT_REGION = "eu-west-1" + +LEXICON_XML = """ + + + W3C + World Wide Web Consortium + +""" + + +@mock_polly +def test_describe_voices(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + + resp = client.describe_voices() + len(resp["Voices"]).should.be.greater_than(1) + + resp = client.describe_voices(LanguageCode="en-GB") + len(resp["Voices"]).should.equal(3) + + try: + client.describe_voices(LanguageCode="SOME_LANGUAGE") + except ClientError as err: + err.response["Error"]["Code"].should.equal("400") + else: + raise RuntimeError("Should of raised an exception") + + +@mock_polly +def test_put_list_lexicon(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon(Name="test", Content=LEXICON_XML) + + resp = client.list_lexicons() + len(resp["Lexicons"]).should.equal(1) + + +@mock_polly +def test_put_get_lexicon(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon(Name="test", Content=LEXICON_XML) + + resp = client.get_lexicon(Name="test") + resp.should.contain("Lexicon") + resp.should.contain("LexiconAttributes") + + +@mock_polly +def test_put_lexicon_bad_name(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + + try: + client.put_lexicon(Name="test-invalid", Content=LEXICON_XML) + except ClientError as err: + err.response["Error"]["Code"].should.equal("InvalidParameterValue") + else: + raise RuntimeError("Should of raised an exception") + + +@mock_polly +def test_synthesize_speech(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon(Name="test", Content=LEXICON_XML) + + tests = (("pcm", "audio/pcm"), ("mp3", "audio/mpeg"), ("ogg_vorbis", "audio/ogg")) + for output_format, content_type in tests: + resp = client.synthesize_speech( + LexiconNames=["test"], + OutputFormat=output_format, + SampleRate="16000", + Text="test1234", + TextType="text", + VoiceId="Astrid", + ) + resp["ContentType"].should.equal(content_type) + + +@mock_polly +def test_synthesize_speech_bad_lexicon(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + client.put_lexicon(Name="test", Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=["test2"], + OutputFormat="pcm", + SampleRate="16000", + Text="test1234", + TextType="text", + VoiceId="Astrid", + ) + except ClientError as err: + err.response["Error"]["Code"].should.equal("LexiconNotFoundException") + else: + raise RuntimeError("Should of raised LexiconNotFoundException") + + +@mock_polly +def test_synthesize_speech_bad_output_format(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + client.put_lexicon(Name="test", Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=["test"], + OutputFormat="invalid", + SampleRate="16000", + Text="test1234", + TextType="text", + VoiceId="Astrid", + ) + except ClientError as err: + err.response["Error"]["Code"].should.equal("InvalidParameterValue") + else: + raise RuntimeError("Should of raised ") + + +@mock_polly +def test_synthesize_speech_bad_sample_rate(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + client.put_lexicon(Name="test", Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=["test"], + OutputFormat="pcm", + SampleRate="18000", + Text="test1234", + TextType="text", + VoiceId="Astrid", + ) + except ClientError as err: + err.response["Error"]["Code"].should.equal("InvalidSampleRateException") + else: + raise RuntimeError("Should of raised ") + + +@mock_polly +def test_synthesize_speech_bad_text_type(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + client.put_lexicon(Name="test", Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=["test"], + OutputFormat="pcm", + SampleRate="16000", + Text="test1234", + TextType="invalid", + VoiceId="Astrid", + ) + except ClientError as err: + err.response["Error"]["Code"].should.equal("InvalidParameterValue") + else: + raise RuntimeError("Should of raised ") + + +@mock_polly +def test_synthesize_speech_bad_voice_id(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + client.put_lexicon(Name="test", Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=["test"], + OutputFormat="pcm", + SampleRate="16000", + Text="test1234", + TextType="text", + VoiceId="Luke", + ) + except ClientError as err: + err.response["Error"]["Code"].should.equal("InvalidParameterValue") + else: + raise RuntimeError("Should of raised ") + + +@mock_polly +def test_synthesize_speech_text_too_long(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + client.put_lexicon(Name="test", Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=["test"], + OutputFormat="pcm", + SampleRate="16000", + Text="test1234" * 376, # = 3008 characters + TextType="text", + VoiceId="Astrid", + ) + except ClientError as err: + err.response["Error"]["Code"].should.equal("TextLengthExceededException") + else: + raise RuntimeError("Should of raised ") + + +@mock_polly +def test_synthesize_speech_bad_speech_marks1(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + client.put_lexicon(Name="test", Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=["test"], + OutputFormat="pcm", + SampleRate="16000", + Text="test1234", + TextType="text", + SpeechMarkTypes=["word"], + VoiceId="Astrid", + ) + except ClientError as err: + err.response["Error"]["Code"].should.equal( + "MarksNotSupportedForFormatException" + ) + else: + raise RuntimeError("Should of raised ") + + +@mock_polly +def test_synthesize_speech_bad_speech_marks2(): + client = boto3.client("polly", region_name=DEFAULT_REGION) + client.put_lexicon(Name="test", Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=["test"], + OutputFormat="pcm", + SampleRate="16000", + Text="test1234", + TextType="ssml", + SpeechMarkTypes=["word"], + VoiceId="Astrid", + ) + except ClientError as err: + err.response["Error"]["Code"].should.equal( + "MarksNotSupportedForFormatException" + ) + else: + raise RuntimeError("Should of raised ") diff --git a/tests/test_ram/test_ram.py b/tests/test_ram/test_ram.py new file mode 100644 index 000000000..73a23331b --- /dev/null +++ b/tests/test_ram/test_ram.py @@ -0,0 +1,381 @@ +import time +from datetime import datetime + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +import pytest + +from moto import mock_ram, mock_organizations +from moto.core import ACCOUNT_ID + + +@mock_ram +def test_create_resource_share(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # when + response = client.create_resource_share(name="test") + + # then + resource = response["resourceShare"] + resource["allowExternalPrincipals"].should.be.ok + resource["creationTime"].should.be.a(datetime) + resource["lastUpdatedTime"].should.be.a(datetime) + resource["name"].should.equal("test") + resource["owningAccountId"].should.equal(ACCOUNT_ID) + resource["resourceShareArn"].should.match( + r"arn:aws:ram:us-east-1:\d{12}:resource-share/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + ) + resource["status"].should.equal("ACTIVE") + resource.should_not.have.key("featureSet") + + # creating a resource share with the name should result in a second one + # not overwrite/update the old one + # when + response = client.create_resource_share( + name="test", + allowExternalPrincipals=False, + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + ], + ) + + # then + resource = response["resourceShare"] + resource["allowExternalPrincipals"].should_not.be.ok + resource["creationTime"].should.be.a(datetime) + resource["lastUpdatedTime"].should.be.a(datetime) + resource["name"].should.equal("test") + resource["owningAccountId"].should.equal(ACCOUNT_ID) + resource["resourceShareArn"].should.match( + r"arn:aws:ram:us-east-1:\d{12}:resource-share/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + ) + resource["status"].should.equal("ACTIVE") + + response = client.get_resource_shares(resourceOwner="SELF") + response["resourceShares"].should.have.length_of(2) + + +@mock_ram +def test_create_resource_share_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # invalid ARN + # when + with pytest.raises(ClientError) as e: + client.create_resource_share(name="test", resourceArns=["inalid-arn"]) + ex = e.value + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("MalformedArnException") + ex.response["Error"]["Message"].should.equal( + "The specified resource ARN inalid-arn is not valid. " + "Verify the ARN and try again." + ) + + # valid ARN, but not shareable resource type + # when + with pytest.raises(ClientError) as e: + client.create_resource_share( + name="test", resourceArns=["arn:aws:iam::{}:role/test".format(ACCOUNT_ID)] + ) + ex = e.value + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("MalformedArnException") + ex.response["Error"]["Message"].should.equal( + "You cannot share the selected resource type." + ) + + # invalid principal ID + # when + with pytest.raises(ClientError) as e: + client.create_resource_share( + name="test", + principals=["invalid"], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( + ACCOUNT_ID + ) + ], + ) + ex = e.value + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Principal ID invalid is malformed. Verify the ID and try again." + ) + + +@mock_ram +@mock_organizations +def test_create_resource_share_with_organization(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org_arn = client.create_organization(FeatureSet="ALL")["Organization"]["Arn"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_arn = client.create_organizational_unit(ParentId=root_id, Name="test")[ + "OrganizationalUnit" + ]["Arn"] + client = boto3.client("ram", region_name="us-east-1") + + # share in whole Organization + # when + response = client.create_resource_share( + name="test", + principals=[org_arn], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + ], + ) + + # then + response["resourceShare"]["name"].should.equal("test") + + # share in an OU + # when + response = client.create_resource_share( + name="test", + principals=[ou_arn], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + ], + ) + + # then + response["resourceShare"]["name"].should.equal("test") + + +@mock_ram +@mock_organizations +def test_create_resource_share_with_organization_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + root_id = client.list_roots()["Roots"][0]["Id"] + client.create_organizational_unit(ParentId=root_id, Name="test") + client = boto3.client("ram", region_name="us-east-1") + + # unknown Organization + # when + with pytest.raises(ClientError) as e: + client.create_resource_share( + name="test", + principals=[ + "arn:aws:organizations::{}:organization/o-unknown".format(ACCOUNT_ID) + ], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( + ACCOUNT_ID + ) + ], + ) + ex = e.value + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("UnknownResourceException") + ex.response["Error"]["Message"].should.equal( + "Organization o-unknown could not be found." + ) + + # unknown OU + # when + with pytest.raises(ClientError) as e: + client.create_resource_share( + name="test", + principals=[ + "arn:aws:organizations::{}:ou/o-unknown/ou-unknown".format(ACCOUNT_ID) + ], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( + ACCOUNT_ID + ) + ], + ) + ex = e.value + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("UnknownResourceException") + ex.response["Error"]["Message"].should.equal( + "OrganizationalUnit ou-unknown in unknown organization could not be found." + ) + + +@mock_ram +def test_get_resource_shares(): + # given + client = boto3.client("ram", region_name="us-east-1") + client.create_resource_share(name="test") + + # when + response = client.get_resource_shares(resourceOwner="SELF") + + # then + response["resourceShares"].should.have.length_of(1) + resource = response["resourceShares"][0] + resource["allowExternalPrincipals"].should.be.ok + resource["creationTime"].should.be.a(datetime) + resource["featureSet"].should.equal("STANDARD") + resource["lastUpdatedTime"].should.be.a(datetime) + resource["name"].should.equal("test") + resource["owningAccountId"].should.equal(ACCOUNT_ID) + resource["resourceShareArn"].should.match( + r"arn:aws:ram:us-east-1:\d{12}:resource-share/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + ) + resource["status"].should.equal("ACTIVE") + + +@mock_ram +def test_get_resource_shares_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # invalid resource owner + # when + with pytest.raises(ClientError) as e: + client.get_resource_shares(resourceOwner="invalid") + ex = e.value + ex.operation_name.should.equal("GetResourceShares") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "invalid is not a valid resource owner. " + "Specify either SELF or OTHER-ACCOUNTS and try again." + ) + + +@mock_ram +def test_update_resource_share(): + # given + client = boto3.client("ram", region_name="us-east-1") + arn = client.create_resource_share(name="test")["resourceShare"]["resourceShareArn"] + + # when + time.sleep(0.1) + response = client.update_resource_share(resourceShareArn=arn, name="test-update") + + # then + resource = response["resourceShare"] + resource["allowExternalPrincipals"].should.be.ok + resource["name"].should.equal("test-update") + resource["owningAccountId"].should.equal(ACCOUNT_ID) + resource["resourceShareArn"].should.match( + r"arn:aws:ram:us-east-1:\d{12}:resource-share/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + ) + resource["status"].should.equal("ACTIVE") + resource.should_not.have.key("featureSet") + creation_time = resource["creationTime"] + resource["lastUpdatedTime"].should.be.greater_than(creation_time) + + response = client.get_resource_shares(resourceOwner="SELF") + response["resourceShares"].should.have.length_of(1) + + +@mock_ram +def test_update_resource_share_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # invalid resource owner + # when + with pytest.raises(ClientError) as e: + client.update_resource_share( + resourceShareArn="arn:aws:ram:us-east-1:{}:resource-share/not-existing".format( + ACCOUNT_ID + ), + name="test-update", + ) + ex = e.value + ex.operation_name.should.equal("UpdateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("UnknownResourceException") + ex.response["Error"]["Message"].should.equal( + "ResourceShare arn:aws:ram:us-east-1:{}:resource-share/not-existing could not be found.".format( + ACCOUNT_ID + ) + ) + + +@mock_ram +def test_delete_resource_share(): + # given + client = boto3.client("ram", region_name="us-east-1") + arn = client.create_resource_share(name="test")["resourceShare"]["resourceShareArn"] + + # when + time.sleep(0.1) + response = client.delete_resource_share(resourceShareArn=arn) + + # then + response["returnValue"].should.be.ok + + response = client.get_resource_shares(resourceOwner="SELF") + response["resourceShares"].should.have.length_of(1) + resource = response["resourceShares"][0] + resource["status"].should.equal("DELETED") + creation_time = resource["creationTime"] + resource["lastUpdatedTime"].should.be.greater_than(creation_time) + + +@mock_ram +def test_delete_resource_share_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # invalid resource owner + # when + with pytest.raises(ClientError) as e: + client.delete_resource_share( + resourceShareArn="arn:aws:ram:us-east-1:{}:resource-share/not-existing".format( + ACCOUNT_ID + ) + ) + ex = e.value + ex.operation_name.should.equal("DeleteResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("UnknownResourceException") + ex.response["Error"]["Message"].should.equal( + "ResourceShare arn:aws:ram:us-east-1:{}:resource-share/not-existing could not be found.".format( + ACCOUNT_ID + ) + ) + + +@mock_ram +@mock_organizations +def test_enable_sharing_with_aws_organization(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + client = boto3.client("ram", region_name="us-east-1") + + # when + response = client.enable_sharing_with_aws_organization() + + # then + response["returnValue"].should.be.ok + + +@mock_ram +@mock_organizations +def test_enable_sharing_with_aws_organization_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # no Organization defined + # when + with pytest.raises(ClientError) as e: + client.enable_sharing_with_aws_organization() + ex = e.value + ex.operation_name.should.equal("EnableSharingWithAwsOrganization") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("OperationNotPermittedException") + ex.response["Error"]["Message"].should.equal( + "Unable to enable sharing with AWS Organizations. " + "Received AccessDeniedException from AWSOrganizations with the following error message: " + "You don't have permissions to access this resource." + ) diff --git a/tests/test_rds/__init__.py b/tests/test_rds/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_rds/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_rds2/__init__.py b/tests/test_rds2/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_rds2/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index e93ff43e9..96ec378db 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -4,6 +4,7 @@ from botocore.exceptions import ClientError, ParamValidationError import boto3 import sure # noqa from moto import mock_ec2, mock_kms, mock_rds2 +from moto.core import ACCOUNT_ID @mock_rds2 @@ -183,12 +184,12 @@ def test_start_database(): @mock_rds2 -def test_fail_to_stop_multi_az(): +def test_fail_to_stop_multi_az_and_sqlserver(): conn = boto3.client("rds", region_name="us-west-2") database = conn.create_db_instance( DBInstanceIdentifier="db-master-1", AllocatedStorage=10, - Engine="postgres", + Engine="sqlserver-ee", DBName="staging-postgres", DBInstanceClass="db.m1.small", LicenseModel="license-included", @@ -213,6 +214,33 @@ def test_fail_to_stop_multi_az(): ).should.throw(ClientError) +@mock_rds2 +def test_stop_multi_az_postgres(): + conn = boto3.client("rds", region_name="us-west-2") + database = conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + LicenseModel="license-included", + MasterUsername="root", + MasterUserPassword="hunter2", + Port=1234, + DBSecurityGroups=["my_sg"], + MultiAZ=True, + ) + + mydb = conn.describe_db_instances( + DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"] + )["DBInstances"][0] + mydb["DBInstanceStatus"].should.equal("available") + + response = conn.stop_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"]) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response["DBInstance"]["DBInstanceStatus"].should.equal("stopped") + + @mock_rds2 def test_fail_to_stop_readreplica(): conn = boto3.client("rds", region_name="us-west-2") @@ -1477,7 +1505,9 @@ def test_create_database_with_encrypted_storage(): @mock_rds2 def test_create_db_parameter_group(): - conn = boto3.client("rds", region_name="us-west-2") + region = "us-west-2" + pg_name = "test" + conn = boto3.client("rds", region_name=region) db_parameter_group = conn.create_db_parameter_group( DBParameterGroupName="test", DBParameterGroupFamily="mysql5.6", @@ -1491,6 +1521,9 @@ def test_create_db_parameter_group(): db_parameter_group["DBParameterGroup"]["Description"].should.equal( "test parameter group" ) + db_parameter_group["DBParameterGroup"]["DBParameterGroupArn"].should.equal( + "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name) + ) @mock_rds2 @@ -1602,9 +1635,11 @@ def test_create_db_parameter_group_duplicate(): @mock_rds2 def test_describe_db_parameter_group(): - conn = boto3.client("rds", region_name="us-west-2") + region = "us-west-2" + pg_name = "test" + conn = boto3.client("rds", region_name=region) conn.create_db_parameter_group( - DBParameterGroupName="test", + DBParameterGroupName=pg_name, DBParameterGroupFamily="mysql5.6", Description="test parameter group", ) @@ -1612,6 +1647,9 @@ def test_describe_db_parameter_group(): db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupName"].should.equal( "test" ) + db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupArn"].should.equal( + "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name) + ) @mock_rds2 @@ -1722,3 +1760,21 @@ def test_create_db_snapshot_with_iam_authentication(): ).get("DBSnapshot") snapshot.get("IAMDatabaseAuthenticationEnabled").should.equal(True) + + +@mock_rds2 +def test_create_db_instance_with_tags(): + client = boto3.client("rds", region_name="us-west-2") + tags = [{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}] + db_instance_identifier = "test-db-instance" + resp = client.create_db_instance( + DBInstanceIdentifier=db_instance_identifier, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + Tags=tags, + ) + resp["DBInstance"]["TagList"].should.equal(tags) + + resp = client.describe_db_instances(DBInstanceIdentifier=db_instance_identifier) + resp["DBInstances"][0]["TagList"].should.equal(tags) diff --git a/tests/test_redshift/__init__.py b/tests/test_redshift/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_redshift/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 6bb3b1396..f2acf4d00 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -12,6 +12,7 @@ from boto.redshift.exceptions import ( InvalidSubnet, ) from botocore.exceptions import ClientError +import pytest import sure # noqa from moto import mock_ec2 @@ -42,7 +43,7 @@ def test_create_cluster_boto3(): @mock_redshift -def test_create_cluster_boto3(): +def test_create_cluster_with_enhanced_vpc_routing_enabled(): client = boto3.client("redshift", region_name="us-east-1") response = client.create_cluster( DBName="test", @@ -75,7 +76,7 @@ def test_create_snapshot_copy_grant(): client.describe_snapshot_copy_grants.when.called_with( SnapshotCopyGrantName="test-us-east-1" - ).should.throw(Exception) + ).should.throw(ClientError) @mock_redshift @@ -423,7 +424,7 @@ def test_delete_cluster(): ) conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw( - AttributeError + boto.exception.JSONResponseError ) clusters = conn.describe_clusters()["DescribeClustersResponse"][ @@ -825,12 +826,11 @@ def test_describe_cluster_snapshots(): @mock_redshift def test_describe_cluster_snapshots_not_found_error(): client = boto3.client("redshift", region_name="us-east-1") - cluster_identifier = "my_cluster" - snapshot_identifier = "my_snapshot" + cluster_identifier = "non-existent-cluster-id" + snapshot_identifier = "non-existent-snapshot-id" - client.describe_cluster_snapshots.when.called_with( - ClusterIdentifier=cluster_identifier - ).should.throw(ClientError, "Cluster {} not found.".format(cluster_identifier)) + resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp["Snapshots"].should.have.length_of(0) client.describe_cluster_snapshots.when.called_with( SnapshotIdentifier=snapshot_identifier @@ -866,8 +866,8 @@ def test_delete_cluster_snapshot(): # Delete invalid id client.delete_cluster_snapshot.when.called_with( - SnapshotIdentifier="not-a-snapshot" - ).should.throw(ClientError) + SnapshotIdentifier="non-existent" + ).should.throw(ClientError, "Snapshot non-existent not found.") @mock_redshift @@ -891,7 +891,7 @@ def test_cluster_snapshot_already_exists(): client.create_cluster_snapshot.when.called_with( SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier - ).should.throw(ClientError) + ).should.throw(ClientError, "{} already exists".format(snapshot_identifier)) @mock_redshift @@ -915,6 +915,11 @@ def test_create_cluster_from_snapshot(): ClusterIdentifier=original_cluster_identifier, ) + client.restore_from_cluster_snapshot.when.called_with( + ClusterIdentifier=original_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + ).should.throw(ClientError, "ClusterAlreadyExists") + response = client.restore_from_cluster_snapshot( ClusterIdentifier=new_cluster_identifier, SnapshotIdentifier=original_snapshot_identifier, @@ -1255,6 +1260,23 @@ def test_enable_snapshot_copy(): MasterUserPassword="password", NodeType="ds2.xlarge", ) + with pytest.raises(ClientError) as ex: + client.enable_snapshot_copy( + ClusterIdentifier="test", DestinationRegion="us-west-2", RetentionPeriod=3, + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue") + ex.value.response["Error"]["Message"].should.contain( + "SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters." + ) + with pytest.raises(ClientError) as ex: + client.enable_snapshot_copy( + ClusterIdentifier="test", + DestinationRegion="us-east-1", + RetentionPeriod=3, + SnapshotCopyGrantName="invalid-us-east-1-to-us-east-1", + ) + ex.value.response["Error"]["Code"].should.equal("UnknownSnapshotCopyRegionFault") + ex.value.response["Error"]["Message"].should.contain("Invalid region us-east-1") client.enable_snapshot_copy( ClusterIdentifier="test", DestinationRegion="us-west-2", @@ -1333,3 +1355,91 @@ def test_modify_snapshot_copy_retention_period(): response = client.describe_clusters(ClusterIdentifier="test") cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"] cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5) + + +@mock_redshift +def test_create_duplicate_cluster_fails(): + kwargs = { + "ClusterIdentifier": "test", + "ClusterType": "single-node", + "DBName": "test", + "MasterUsername": "user", + "MasterUserPassword": "password", + "NodeType": "ds2.xlarge", + } + client = boto3.client("redshift", region_name="us-east-1") + client.create_cluster(**kwargs) + client.create_cluster.when.called_with(**kwargs).should.throw( + ClientError, "ClusterAlreadyExists" + ) + + +@mock_redshift +def test_delete_cluster_with_final_snapshot(): + client = boto3.client("redshift", region_name="us-east-1") + + with pytest.raises(ClientError) as ex: + client.delete_cluster(ClusterIdentifier="non-existent") + ex.value.response["Error"]["Code"].should.equal("ClusterNotFound") + ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.") + + cluster_identifier = "my_cluster" + client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType="single-node", + DBName="test", + MasterUsername="user", + MasterUserPassword="password", + NodeType="ds2.xlarge", + ) + + with pytest.raises(ClientError) as ex: + client.delete_cluster( + ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=False + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameterCombination") + ex.value.response["Error"]["Message"].should.contain( + "FinalClusterSnapshotIdentifier is required unless SkipFinalClusterSnapshot is specified." + ) + + snapshot_identifier = "my_snapshot" + client.delete_cluster( + ClusterIdentifier=cluster_identifier, + SkipFinalClusterSnapshot=False, + FinalClusterSnapshotIdentifier=snapshot_identifier, + ) + + resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp["Snapshots"].should.have.length_of(1) + resp["Snapshots"][0]["SnapshotIdentifier"].should.equal(snapshot_identifier) + resp["Snapshots"][0]["SnapshotType"].should.equal("manual") + + with pytest.raises(ClientError) as ex: + client.describe_clusters(ClusterIdentifier=cluster_identifier) + ex.value.response["Error"]["Code"].should.equal("ClusterNotFound") + ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.") + + +@mock_redshift +def test_delete_cluster_without_final_snapshot(): + client = boto3.client("redshift", region_name="us-east-1") + cluster_identifier = "my_cluster" + client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType="single-node", + DBName="test", + MasterUsername="user", + MasterUserPassword="password", + NodeType="ds2.xlarge", + ) + client.delete_cluster( + ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=True + ) + + resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp["Snapshots"].should.have.length_of(0) + + with pytest.raises(ClientError) as ex: + client.describe_clusters(ClusterIdentifier=cluster_identifier) + ex.value.response["Error"]["Code"].should.equal("ClusterNotFound") + ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.") diff --git a/tests/test_redshift/test_server.py b/tests/test_redshift/test_server.py index f4eee85e8..e3ba6d9d4 100644 --- a/tests/test_redshift/test_server.py +++ b/tests/test_redshift/test_server.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import json import sure # noqa import moto.server as server @@ -20,3 +19,14 @@ def test_describe_clusters(): result = res.data.decode("utf-8") result.should.contain("") + + +@mock_redshift +def test_describe_clusters_with_json_content_type(): + backend = server.create_backend_app("redshift") + test_client = backend.test_client() + + res = test_client.get("/?Action=DescribeClusters&ContentType=JSON") + + result = res.data.decode("utf-8") + result.should.contain('{"Clusters": []}') diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 84f7a8b86..f5a934b9b 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -9,41 +9,6 @@ from moto import mock_resourcegroupstaggingapi from moto import mock_s3 -@mock_s3 -@mock_resourcegroupstaggingapi -def test_get_resources_s3(): - # Tests pagination - s3_client = boto3.client("s3", region_name="eu-central-1") - - # Will end up having key1,key2,key3,key4 - response_keys = set() - - # Create 4 buckets - for i in range(1, 5): - i_str = str(i) - s3_client.create_bucket(Bucket="test_bucket" + i_str) - s3_client.put_bucket_tagging( - Bucket="test_bucket" + i_str, - Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]}, - ) - response_keys.add("key" + i_str) - - rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1") - resp = rtapi.get_resources(ResourcesPerPage=2) - for resource in resp["ResourceTagMappingList"]: - response_keys.remove(resource["Tags"][0]["Key"]) - - response_keys.should.have.length_of(2) - - resp = rtapi.get_resources( - ResourcesPerPage=2, PaginationToken=resp["PaginationToken"] - ) - for resource in resp["ResourceTagMappingList"]: - response_keys.remove(resource["Tags"][0]["Key"]) - - response_keys.should.have.length_of(0) - - @mock_ec2 @mock_resourcegroupstaggingapi def test_get_resources_ec2(): @@ -96,6 +61,27 @@ def test_get_resources_ec2(): resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("instance/") +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_resources_ec2_vpc(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + ec2.create_tags(Resources=[vpc.id], Tags=[{"Key": "test", "Value": "test"}]) + + def assert_response(resp): + results = resp.get("ResourceTagMappingList", []) + results.should.have.length_of(1) + vpc.id.should.be.within(results[0]["ResourceARN"]) + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-west-1") + resp = rtapi.get_resources(ResourceTypeFilters=["ec2"]) + assert_response(resp) + resp = rtapi.get_resources(ResourceTypeFilters=["ec2:vpc"]) + assert_response(resp) + resp = rtapi.get_resources(TagFilters=[{"Key": "test", "Values": ["test"]}]) + assert_response(resp) + + @mock_ec2 @mock_resourcegroupstaggingapi def test_get_tag_keys_ec2(): @@ -230,12 +216,14 @@ def test_get_many_resources(): rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-east-1") - resp = rtapi.get_resources(ResourceTypeFilters=["elasticloadbalancer:loadbalancer"]) + resp = rtapi.get_resources( + ResourceTypeFilters=["elasticloadbalancing:loadbalancer"] + ) resp["ResourceTagMappingList"].should.have.length_of(2) resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("loadbalancer/") resp = rtapi.get_resources( - ResourceTypeFilters=["elasticloadbalancer:loadbalancer"], + ResourceTypeFilters=["elasticloadbalancing:loadbalancer"], TagFilters=[{"Key": "key_name"}], ) @@ -244,4 +232,144 @@ def test_get_many_resources(): {"Key": "key_name", "Value": "a_value"} ) - # TODO test pagenation + # TODO test pagination + + +@mock_ec2 +@mock_elbv2 +@mock_resourcegroupstaggingapi +def test_get_resources_target_group(): + ec2 = boto3.resource("ec2", region_name="eu-central-1") + elbv2 = boto3.client("elbv2", region_name="eu-central-1") + + vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default") + + # Create two tagged target groups + for i in range(1, 3): + i_str = str(i) + + target_group = elbv2.create_target_group( + Name="test" + i_str, + Protocol="HTTP", + Port=8080, + VpcId=vpc.id, + TargetType="instance", + )["TargetGroups"][0] + + elbv2.add_tags( + ResourceArns=[target_group["TargetGroupArn"]], + Tags=[{"Key": "Test", "Value": i_str}], + ) + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1") + + # Basic test + resp = rtapi.get_resources(ResourceTypeFilters=["elasticloadbalancing:targetgroup"]) + resp["ResourceTagMappingList"].should.have.length_of(2) + + # Test tag filtering + resp = rtapi.get_resources( + ResourceTypeFilters=["elasticloadbalancing:targetgroup"], + TagFilters=[{"Key": "Test", "Values": ["1"]}], + ) + resp["ResourceTagMappingList"].should.have.length_of(1) + resp["ResourceTagMappingList"][0]["Tags"].should.contain( + {"Key": "Test", "Value": "1"} + ) + + +@mock_s3 +@mock_resourcegroupstaggingapi +def test_get_resources_s3(): + # Tests pagination + s3_client = boto3.client("s3", region_name="eu-central-1") + + # Will end up having key1,key2,key3,key4 + response_keys = set() + + # Create 4 buckets + for i in range(1, 5): + i_str = str(i) + s3_client.create_bucket( + Bucket="test_bucket" + i_str, + CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}, + ) + s3_client.put_bucket_tagging( + Bucket="test_bucket" + i_str, + Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]}, + ) + response_keys.add("key" + i_str) + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1") + resp = rtapi.get_resources(ResourcesPerPage=2) + for resource in resp["ResourceTagMappingList"]: + response_keys.remove(resource["Tags"][0]["Key"]) + + response_keys.should.have.length_of(2) + + resp = rtapi.get_resources( + ResourcesPerPage=2, PaginationToken=resp["PaginationToken"] + ) + for resource in resp["ResourceTagMappingList"]: + response_keys.remove(resource["Tags"][0]["Key"]) + + response_keys.should.have.length_of(0) + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_multiple_tag_filters(): + client = boto3.client("ec2", region_name="eu-central-1") + + resp = client.run_instances( + ImageId="ami-123", + MinCount=1, + MaxCount=1, + InstanceType="t2.micro", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "MY_TAG1", "Value": "MY_UNIQUE_VALUE"}, + {"Key": "MY_TAG2", "Value": "MY_SHARED_VALUE"}, + ], + }, + { + "ResourceType": "instance", + "Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE3"}], + }, + ], + ) + instance_1_id = resp["Instances"][0]["InstanceId"] + + resp = client.run_instances( + ImageId="ami-456", + MinCount=1, + MaxCount=1, + InstanceType="t2.micro", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "MY_TAG1", "Value": "MY_ALT_UNIQUE_VALUE"}, + {"Key": "MY_TAG2", "Value": "MY_SHARED_VALUE"}, + ], + }, + { + "ResourceType": "instance", + "Tags": [{"Key": "MY_ALT_TAG3", "Value": "MY_VALUE3"}], + }, + ], + ) + instance_2_id = resp["Instances"][0]["InstanceId"] + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1") + results = rtapi.get_resources( + TagFilters=[ + {"Key": "MY_TAG1", "Values": ["MY_UNIQUE_VALUE"]}, + {"Key": "MY_TAG2", "Values": ["MY_SHARED_VALUE"]}, + ] + ).get("ResourceTagMappingList", []) + results.should.have.length_of(1) + instance_1_id.should.be.within(results[0]["ResourceARN"]) + instance_2_id.shouldnt.be.within(results[0]["ResourceARN"]) diff --git a/tests/test_route53/__init__.py b/tests/test_route53/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_route53/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 746c78719..8ce5272ef 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -10,7 +10,7 @@ import sure # noqa import uuid import botocore -from nose.tools import assert_raises +import pytest from moto import mock_route53, mock_route53_deprecated @@ -644,6 +644,94 @@ def test_change_resource_record_sets_crud_valid(): len(response["ResourceRecordSets"]).should.equal(0) +@mock_route53 +def test_change_resource_record_sets_crud_valid_with_special_xml_chars(): + conn = boto3.client("route53", region_name="us-east-1") + conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash("foo")), + HostedZoneConfig=dict(PrivateZone=True, Comment="db"), + ) + + zones = conn.list_hosted_zones_by_name(DNSName="db.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("db.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + # Create TXT Record. + txt_record_endpoint_payload = { + "Comment": "Create TXT record prod.redis.db", + "Changes": [ + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "prod.redis.db.", + "Type": "TXT", + "TTL": 10, + "ResourceRecords": [{"Value": "SomeInitialValue"}], + }, + } + ], + } + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, ChangeBatch=txt_record_endpoint_payload + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response["ResourceRecordSets"]).should.equal(1) + a_record_detail = response["ResourceRecordSets"][0] + a_record_detail["Name"].should.equal("prod.redis.db.") + a_record_detail["Type"].should.equal("TXT") + a_record_detail["TTL"].should.equal(10) + a_record_detail["ResourceRecords"].should.equal([{"Value": "SomeInitialValue"}]) + + # Update TXT Record with XML Special Character &. + txt_record_with_special_char_endpoint_payload = { + "Comment": "Update TXT record prod.redis.db", + "Changes": [ + { + "Action": "UPSERT", + "ResourceRecordSet": { + "Name": "prod.redis.db.", + "Type": "TXT", + "TTL": 60, + "ResourceRecords": [{"Value": "SomeInitialValue&NewValue"}], + }, + } + ], + } + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch=txt_record_with_special_char_endpoint_payload, + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response["ResourceRecordSets"]).should.equal(1) + cname_record_detail = response["ResourceRecordSets"][0] + cname_record_detail["Name"].should.equal("prod.redis.db.") + cname_record_detail["Type"].should.equal("TXT") + cname_record_detail["TTL"].should.equal(60) + cname_record_detail["ResourceRecords"].should.equal( + [{"Value": "SomeInitialValue&NewValue"}] + ) + + # Delete record. + delete_payload = { + "Comment": "delete prod.redis.db", + "Changes": [ + { + "Action": "DELETE", + "ResourceRecordSet": {"Name": "prod.redis.db", "Type": "TXT"}, + } + ], + } + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, ChangeBatch=delete_payload + ) + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response["ResourceRecordSets"]).should.equal(0) + + @mock_route53 def test_change_weighted_resource_record_sets(): conn = boto3.client("route53", region_name="us-east-2") @@ -753,6 +841,79 @@ def test_change_weighted_resource_record_sets(): record["Weight"].should.equal(10) +@mock_route53 +def test_failover_record_sets(): + conn = boto3.client("route53", region_name="us-east-2") + conn.create_hosted_zone(Name="test.zone.", CallerReference=str(hash("test"))) + zones = conn.list_hosted_zones_by_name(DNSName="test.zone.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + # Create geolocation record + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch={ + "Changes": [ + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "failover.test.zone.", + "Type": "A", + "TTL": 10, + "ResourceRecords": [{"Value": "127.0.0.1"}], + "Failover": "PRIMARY", + }, + } + ] + }, + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + record = response["ResourceRecordSets"][0] + record["Failover"].should.equal("PRIMARY") + + +@mock_route53 +def test_geolocation_record_sets(): + conn = boto3.client("route53", region_name="us-east-2") + conn.create_hosted_zone(Name="test.zone.", CallerReference=str(hash("test"))) + zones = conn.list_hosted_zones_by_name(DNSName="test.zone.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + # Create geolocation record + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch={ + "Changes": [ + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "georecord1.test.zone.", + "Type": "A", + "TTL": 10, + "ResourceRecords": [{"Value": "127.0.0.1"}], + "GeoLocation": {"ContinentCode": "EU"}, + }, + }, + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "georecord2.test.zone.", + "Type": "A", + "TTL": 10, + "ResourceRecords": [{"Value": "127.0.0.2"}], + "GeoLocation": {"CountryCode": "US", "SubdivisionCode": "NY"}, + }, + }, + ] + }, + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + rrs = response["ResourceRecordSets"] + rrs[0]["GeoLocation"].should.equal({"ContinentCode": "EU"}) + rrs[1]["GeoLocation"].should.equal({"CountryCode": "US", "SubdivisionCode": "NY"}) + + @mock_route53 def test_change_resource_record_invalid(): conn = boto3.client("route53", region_name="us-east-1") @@ -782,7 +943,7 @@ def test_change_resource_record_invalid(): ], } - with assert_raises(botocore.exceptions.ClientError): + with pytest.raises(botocore.exceptions.ClientError): conn.change_resource_record_sets( HostedZoneId=hosted_zone_id, ChangeBatch=invalid_a_record_payload ) @@ -805,7 +966,7 @@ def test_change_resource_record_invalid(): ], } - with assert_raises(botocore.exceptions.ClientError): + with pytest.raises(botocore.exceptions.ClientError): conn.change_resource_record_sets( HostedZoneId=hosted_zone_id, ChangeBatch=invalid_cname_record_payload ) @@ -870,3 +1031,14 @@ def test_list_resource_record_sets_name_type_filters(): len(returned_records).should.equal(len(all_records) - start_with) for desired_record in all_records[start_with:]: returned_records.should.contain(desired_record) + + +@mock_route53 +def test_get_change(): + conn = boto3.client("route53", region_name="us-east-2") + + change_id = "123456" + response = conn.get_change(Id=change_id) + + response["ChangeInfo"]["Id"].should.equal(change_id) + response["ChangeInfo"]["Status"].should.equal("INSYNC") diff --git a/tests/test_s3/red.jpg b/tests/test_s3/red.jpg new file mode 100644 index 000000000..6fb9aed7c Binary files /dev/null and b/tests/test_s3/red.jpg differ diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 294beca87..bac03ed6a 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2,9 +2,9 @@ from __future__ import unicode_literals import datetime -import os import sys - +import os +from boto3 import Session from six.moves.urllib.request import urlopen from six.moves.urllib.error import HTTPError from functools import wraps @@ -12,6 +12,7 @@ from gzip import GzipFile from io import BytesIO import zlib import pickle +import uuid import json import boto @@ -23,12 +24,11 @@ from botocore.handlers import disable_signing from boto.s3.connection import S3Connection from boto.s3.key import Key from freezegun import freeze_time -from parameterized import parameterized import six import requests -import tests.backport_assert_raises # noqa -from nose import SkipTest -from nose.tools import assert_raises +from moto.s3.responses import DEFAULT_REGION_NAME +from unittest import SkipTest +import pytest import sure # noqa @@ -37,6 +37,7 @@ import moto.s3.models as s3model from moto.core.exceptions import InvalidNextTokenException from moto.core.utils import py2_strip_unicode_keys + if settings.TEST_SERVER_MODE: REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"' @@ -46,8 +47,8 @@ else: def reduced_min_part_size(f): - """ speed up tests by temporarily making the multipart minimum part size - small + """speed up tests by temporarily making the multipart minimum part size + small """ orig_size = s3model.UPLOAD_PART_MIN_SIZE @@ -68,7 +69,7 @@ class MyModel(object): self.value = value def save(self): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.put_object(Bucket="mybucket", Key=self.name, Body=self.value) @@ -119,7 +120,7 @@ def test_append_to_value__empty_key(): @mock_s3 def test_my_model_save(): # Create Bucket so that test can run - conn = boto3.resource("s3", region_name="us-east-1") + conn = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) conn.create_bucket(Bucket="mybucket") #################################### @@ -133,7 +134,7 @@ def test_my_model_save(): @mock_s3 def test_key_etag(): - conn = boto3.resource("s3", region_name="us-east-1") + conn = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) conn.create_bucket(Bucket="mybucket") model_instance = MyModel("steve", "is awesome") @@ -424,7 +425,7 @@ def test_copy_key(): bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") -@parameterized([("the-unicode-💩-key",), ("key-with?question-mark",)]) +@pytest.mark.parametrize("key_name", ["the-unicode-💩-key", "key-with?question-mark"]) @mock_s3_deprecated def test_copy_key_with_special_chars(key_name): conn = boto.connect_s3("the_key", "the_secret") @@ -519,9 +520,9 @@ def test_bucket_with_dash(): def test_create_existing_bucket(): "Trying to create a bucket that already exists should raise an Error" conn = boto.s3.connect_to_region("us-west-2") - conn.create_bucket("foobar") - with assert_raises(S3CreateError): - conn.create_bucket("foobar") + conn.create_bucket("foobar", location="us-west-2") + with pytest.raises(S3CreateError): + conn.create_bucket("foobar", location="us-west-2") @mock_s3_deprecated @@ -535,7 +536,7 @@ def test_create_existing_bucket_in_us_east_1(): us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if bucket exists it Amazon S3 will not do anything). """ - conn = boto.s3.connect_to_region("us-east-1") + conn = boto.s3.connect_to_region(DEFAULT_REGION_NAME) conn.create_bucket("foobar") bucket = conn.create_bucket("foobar") bucket.name.should.equal("foobar") @@ -544,7 +545,7 @@ def test_create_existing_bucket_in_us_east_1(): @mock_s3_deprecated def test_other_region(): conn = S3Connection("key", "secret", host="s3-website-ap-southeast-2.amazonaws.com") - conn.create_bucket("foobar") + conn.create_bucket("foobar", location="ap-southeast-2") list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) @@ -662,9 +663,9 @@ def test_delete_keys_invalid(): @mock_s3 def test_boto3_delete_empty_keys_list(): - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: boto3.client("s3").delete_objects(Bucket="foobar", Delete={"Objects": []}) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" @mock_s3_deprecated @@ -995,7 +996,9 @@ def test_bucket_acl_switching(): def test_s3_object_in_public_bucket(): s3 = boto3.resource("s3") bucket = s3.Bucket("test-bucket") - bucket.create(ACL="public-read") + bucket.create( + ACL="public-read", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) bucket.put_object(Body=b"ABCD", Key="file.txt") s3_anonymous = boto3.resource("s3") @@ -1010,31 +1013,83 @@ def test_s3_object_in_public_bucket(): bucket.put_object(ACL="private", Body=b"ABCD", Key="file.txt") - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get() - exc.exception.response["Error"]["Code"].should.equal("403") + exc.value.response["Error"]["Code"].should.equal("403") + + +@mock_s3 +def test_s3_object_in_public_bucket_using_multiple_presigned_urls(): + s3 = boto3.resource("s3") + bucket = s3.Bucket("test-bucket") + bucket.create( + ACL="public-read", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) + bucket.put_object(Body=b"ABCD", Key="file.txt") params = {"Bucket": "test-bucket", "Key": "file.txt"} presigned_url = boto3.client("s3").generate_presigned_url( "get_object", params, ExpiresIn=900 ) - response = requests.get(presigned_url) + for i in range(1, 10): + response = requests.get(presigned_url) + assert response.status_code == 200, "Failed on req number {}".format(i) + + +@mock_s3 +def test_streaming_upload_from_file_to_presigned_url(): + s3 = boto3.resource("s3", region_name="us-east-1") + bucket = s3.Bucket("test-bucket") + bucket.create() + bucket.put_object(Body=b"ABCD", Key="file.txt") + + params = {"Bucket": "test-bucket", "Key": "file.txt"} + presigned_url = boto3.client("s3").generate_presigned_url( + "put_object", params, ExpiresIn=900 + ) + with open(__file__, "rb") as f: + response = requests.get(presigned_url, data=f) assert response.status_code == 200 +@mock_s3 +def test_multipart_upload_from_file_to_presigned_url(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + s3.create_bucket(Bucket="mybucket") + + params = {"Bucket": "mybucket", "Key": "file_upload"} + presigned_url = boto3.client("s3").generate_presigned_url( + "put_object", params, ExpiresIn=900 + ) + + file = open("text.txt", "w") + file.write("test") + file.close() + files = {"upload_file": open("text.txt", "rb")} + + requests.put(presigned_url, files=files) + resp = s3.get_object(Bucket="mybucket", Key="file_upload") + data = resp["Body"].read() + assert data == b"test" + # cleanup + os.remove("text.txt") + + @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource("s3") bucket = s3.Bucket("test-bucket") - bucket.create(ACL="private") + bucket.create( + ACL="private", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) bucket.put_object(ACL="private", Body=b"ABCD", Key="file.txt") s3_anonymous = boto3.resource("s3") s3_anonymous.meta.client.meta.events.register("choose-signer.s3.*", disable_signing) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get() - exc.exception.response["Error"]["Code"].should.equal("403") + exc.value.response["Error"]["Code"].should.equal("403") bucket.put_object(ACL="public-read", Body=b"ABCD", Key="file.txt") contents = ( @@ -1086,19 +1141,421 @@ def test_setting_content_encoding(): @mock_s3_deprecated def test_bucket_location(): conn = boto.s3.connect_to_region("us-west-2") - bucket = conn.create_bucket("mybucket") + bucket = conn.create_bucket("mybucket", location="us-west-2") bucket.get_location().should.equal("us-west-2") @mock_s3 -def test_bucket_location_us_east_1(): - cli = boto3.client("s3") +def test_bucket_location_default(): + cli = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" # No LocationConstraint ==> us-east-1 cli.create_bucket(Bucket=bucket_name) cli.get_bucket_location(Bucket=bucket_name)["LocationConstraint"].should.equal(None) +@mock_s3 +def test_bucket_location_nondefault(): + cli = boto3.client("s3", region_name="eu-central-1") + bucket_name = "mybucket" + # LocationConstraint set for non default regions + resp = cli.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}, + ) + cli.get_bucket_location(Bucket=bucket_name)["LocationConstraint"].should.equal( + "eu-central-1" + ) + + +# Test uses current Region to determine whether to throw an error +# Region is retrieved based on current URL +# URL will always be localhost in Server Mode, so can't run it there +if not settings.TEST_SERVER_MODE: + + @mock_s3 + def test_s3_location_should_error_outside_useast1(): + s3 = boto3.client("s3", region_name="eu-west-1") + + bucket_name = "asdfasdfsdfdsfasda" + + with pytest.raises(ClientError) as e: + s3.create_bucket(Bucket=bucket_name) + e.value.response["Error"]["Message"].should.equal( + "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." + ) + + # All tests for s3-control cannot be run under the server without a modification of the + # hosts file on your system. This is due to the fact that the URL to the host is in the form of: + # ACCOUNT_ID.s3-control.amazonaws.com <-- That Account ID part is the problem. If you want to + # make use of the moto server, update your hosts file for `THE_ACCOUNT_ID_FOR_MOTO.localhost` + # and this will work fine. + + @mock_s3 + def test_get_public_access_block_for_account(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + + # With an invalid account ID: + with pytest.raises(ClientError) as ce: + client.get_public_access_block(AccountId="111111111111") + assert ce.value.response["Error"]["Code"] == "AccessDenied" + + # Without one defined: + with pytest.raises(ClientError) as ce: + client.get_public_access_block(AccountId=ACCOUNT_ID) + assert ( + ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" + ) + + # Put a with an invalid account ID: + with pytest.raises(ClientError) as ce: + client.put_public_access_block( + AccountId="111111111111", + PublicAccessBlockConfiguration={"BlockPublicAcls": True}, + ) + assert ce.value.response["Error"]["Code"] == "AccessDenied" + + # Put with an invalid PAB: + with pytest.raises(ClientError) as ce: + client.put_public_access_block( + AccountId=ACCOUNT_ID, PublicAccessBlockConfiguration={} + ) + assert ce.value.response["Error"]["Code"] == "InvalidRequest" + assert ( + "Must specify at least one configuration." + in ce.value.response["Error"]["Message"] + ) + + # Correct PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Get the correct PAB (for all regions): + for region in Session().get_available_regions("s3control"): + region_client = boto3.client("s3control", region_name=region) + assert region_client.get_public_access_block(AccountId=ACCOUNT_ID)[ + "PublicAccessBlockConfiguration" + ] == { + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + } + + # Delete with an invalid account ID: + with pytest.raises(ClientError) as ce: + client.delete_public_access_block(AccountId="111111111111") + assert ce.value.response["Error"]["Code"] == "AccessDenied" + + # Delete successfully: + client.delete_public_access_block(AccountId=ACCOUNT_ID) + + # Confirm that it's deleted: + with pytest.raises(ClientError) as ce: + client.get_public_access_block(AccountId=ACCOUNT_ID) + assert ( + ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" + ) + + @mock_s3 + @mock_config + def test_config_list_account_pab(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + # Create the aggregator: + account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="testing", + AccountAggregationSources=[account_aggregation_source], + ) + + # Without a PAB in place: + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock" + ) + assert not result["resourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + ) + assert not result["ResourceIdentifiers"] + + # Create a PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Test that successful queries work (non-aggregated): + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock" + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", + resourceIds=[ACCOUNT_ID, "nope"], + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="" + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + + # Test that successful queries work (aggregated): + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + ) + regions = {region for region in Session().get_available_regions("config")} + for r in result["ResourceIdentifiers"]: + regions.remove(r.pop("SourceRegion")) + assert r == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + } + + # Just check that the len is the same -- this should be reasonable + regions = {region for region in Session().get_available_regions("config")} + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": ""}, + ) + assert len(regions) == len(result["ResourceIdentifiers"]) + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": "", "ResourceId": ACCOUNT_ID}, + ) + assert len(regions) == len(result["ResourceIdentifiers"]) + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={ + "ResourceName": "", + "ResourceId": ACCOUNT_ID, + "Region": "us-west-2", + }, + ) + assert ( + result["ResourceIdentifiers"][0]["SourceRegion"] == "us-west-2" + and len(result["ResourceIdentifiers"]) == 1 + ) + + # Test aggregator pagination: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Limit=1, + ) + regions = sorted( + [region for region in Session().get_available_regions("config")] + ) + assert result["ResourceIdentifiers"][0] == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + "SourceRegion": regions[0], + } + assert result["NextToken"] == regions[1] + + # Get the next region: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Limit=1, + NextToken=regions[1], + ) + assert result["ResourceIdentifiers"][0] == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + "SourceRegion": regions[1], + } + + # Non-aggregated with incorrect info: + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="nope" + ) + assert not result["resourceIdentifiers"] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceIds=["nope"] + ) + assert not result["resourceIdentifiers"] + + # Aggregated with incorrect info: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": "nope"}, + ) + assert not result["ResourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceId": "nope"}, + ) + assert not result["ResourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"Region": "Nope"}, + ) + assert not result["ResourceIdentifiers"] + + @mock_s3 + @mock_config + def test_config_get_account_pab(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + # Create the aggregator: + account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="testing", + AccountAggregationSources=[account_aggregation_source], + ) + + # Without a PAB in place: + with pytest.raises(ClientError) as ce: + config_client.get_resource_config_history( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID + ) + assert ce.value.response["Error"]["Code"] == "ResourceNotDiscoveredException" + # aggregate + result = config_client.batch_get_resource_config( + resourceKeys=[ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": "ACCOUNT_ID", + } + ] + ) + assert not result["baseConfigurationItems"] + result = config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="testing", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": "us-west-2", + "ResourceId": ACCOUNT_ID, + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "ResourceName": "", + } + ], + ) + assert not result["BaseConfigurationItems"] + + # Create a PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Get the proper config: + proper_config = { + "blockPublicAcls": True, + "ignorePublicAcls": True, + "blockPublicPolicy": True, + "restrictPublicBuckets": True, + } + result = config_client.get_resource_config_history( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID + ) + assert ( + json.loads(result["configurationItems"][0]["configuration"]) + == proper_config + ) + assert ( + result["configurationItems"][0]["accountId"] + == result["configurationItems"][0]["resourceId"] + == ACCOUNT_ID + ) + result = config_client.batch_get_resource_config( + resourceKeys=[ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + ) + assert len(result["baseConfigurationItems"]) == 1 + assert ( + json.loads(result["baseConfigurationItems"][0]["configuration"]) + == proper_config + ) + assert ( + result["baseConfigurationItems"][0]["accountId"] + == result["baseConfigurationItems"][0]["resourceId"] + == ACCOUNT_ID + ) + + for region in Session().get_available_regions("s3control"): + result = config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="testing", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": region, + "ResourceId": ACCOUNT_ID, + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "ResourceName": "", + } + ], + ) + assert len(result["BaseConfigurationItems"]) == 1 + assert ( + json.loads(result["BaseConfigurationItems"][0]["configuration"]) + == proper_config + ) + + @mock_s3_deprecated def test_ranged_get(): conn = boto.connect_s3() @@ -1170,10 +1627,10 @@ def test_policy(): } ) - with assert_raises(S3ResponseError) as err: + with pytest.raises(S3ResponseError) as err: bucket.get_policy() - ex = err.exception + ex = err.value ex.box_usage.should.be.none ex.error_code.should.equal("NoSuchBucketPolicy") ex.message.should.equal("The bucket policy does not exist") @@ -1191,7 +1648,7 @@ def test_policy(): bucket.delete_policy() - with assert_raises(S3ResponseError) as err: + with pytest.raises(S3ResponseError) as err: bucket.get_policy() @@ -1222,7 +1679,7 @@ def test_key_with_trailing_slash_in_ordinary_calling_format(): @mock_s3 def test_boto3_key_etag(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="steve", Body=b"is awesome") resp = s3.get_object(Bucket="mybucket", Key="steve") @@ -1231,7 +1688,7 @@ def test_boto3_key_etag(): @mock_s3 def test_website_redirect_location(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="steve", Body=b"is awesome") @@ -1246,9 +1703,20 @@ def test_website_redirect_location(): resp["WebsiteRedirectLocation"].should.equal(url) +@mock_s3 +def test_delimiter_optional_in_response(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + s3.create_bucket(Bucket="mybucket") + s3.put_object(Bucket="mybucket", Key="one", Body=b"1") + resp = s3.list_objects(Bucket="mybucket", MaxKeys=1) + assert resp.get("Delimiter") is None + resp = s3.list_objects(Bucket="mybucket", MaxKeys=1, Delimiter="/") + assert resp.get("Delimiter") == "/" + + @mock_s3 def test_boto3_list_objects_truncated_response(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1262,7 +1730,7 @@ def test_boto3_list_objects_truncated_response(): assert resp["MaxKeys"] == 1 assert resp["IsTruncated"] == True assert resp.get("Prefix") is None - assert resp["Delimiter"] == "None" + assert resp.get("Delimiter") is None assert "NextMarker" in resp next_marker = resp["NextMarker"] @@ -1275,7 +1743,7 @@ def test_boto3_list_objects_truncated_response(): assert resp["MaxKeys"] == 1 assert resp["IsTruncated"] == True assert resp.get("Prefix") is None - assert resp["Delimiter"] == "None" + assert resp.get("Delimiter") is None assert "NextMarker" in resp next_marker = resp["NextMarker"] @@ -1288,13 +1756,13 @@ def test_boto3_list_objects_truncated_response(): assert resp["MaxKeys"] == 1 assert resp["IsTruncated"] == False assert resp.get("Prefix") is None - assert resp["Delimiter"] == "None" + assert resp.get("Delimiter") is None assert "NextMarker" not in resp @mock_s3 def test_boto3_list_keys_xml_escaped(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") key_name = "Q&A.txt" s3.put_object(Bucket="mybucket", Key=key_name, Body=b"is awesome") @@ -1314,7 +1782,7 @@ def test_boto3_list_keys_xml_escaped(): @mock_s3 def test_boto3_list_objects_v2_common_prefix_pagination(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") max_keys = 1 @@ -1343,7 +1811,7 @@ def test_boto3_list_objects_v2_common_prefix_pagination(): @mock_s3 def test_boto3_list_objects_v2_truncated_response(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1400,7 +1868,7 @@ def test_boto3_list_objects_v2_truncated_response(): @mock_s3 def test_boto3_list_objects_v2_truncated_response_start_after(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1442,7 +1910,7 @@ def test_boto3_list_objects_v2_truncated_response_start_after(): @mock_s3 def test_boto3_list_objects_v2_fetch_owner(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"11") @@ -1456,7 +1924,7 @@ def test_boto3_list_objects_v2_fetch_owner(): @mock_s3 def test_boto3_list_objects_v2_truncate_combined_keys_and_folders(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="1/2", Body="") s3.put_object(Bucket="mybucket", Key="2", Body="") @@ -1486,7 +1954,7 @@ def test_boto3_list_objects_v2_truncate_combined_keys_and_folders(): @mock_s3 def test_boto3_bucket_create(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1502,27 +1970,30 @@ def test_bucket_create_duplicate(): s3.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-west-2"} ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-west-2"} ) - exc.exception.response["Error"]["Code"].should.equal("BucketAlreadyExists") + exc.value.response["Error"]["Code"].should.equal("BucketAlreadyExists") @mock_s3 def test_bucket_create_force_us_east_1(): - s3 = boto3.resource("s3", region_name="us-east-1") - with assert_raises(ClientError) as exc: + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) + with pytest.raises(ClientError) as exc: s3.create_bucket( - Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-east-1"} + Bucket="blah", + CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME}, ) - exc.exception.response["Error"]["Code"].should.equal("InvalidLocationConstraint") + exc.value.response["Error"]["Code"].should.equal("InvalidLocationConstraint") @mock_s3 def test_boto3_bucket_create_eu_central(): s3 = boto3.resource("s3", region_name="eu-central-1") - s3.create_bucket(Bucket="blah") + s3.create_bucket( + Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-central-1"} + ) s3.Object("blah", "hello.txt").put(Body="some text") @@ -1532,8 +2003,17 @@ def test_boto3_bucket_create_eu_central(): @mock_s3 -def test_boto3_head_object(): +def test_bucket_create_empty_bucket_configuration_should_return_malformed_xml_error(): s3 = boto3.resource("s3", region_name="us-east-1") + with pytest.raises(ClientError) as e: + s3.create_bucket(Bucket="whatever", CreateBucketConfiguration={}) + e.value.response["Error"]["Code"].should.equal("MalformedXML") + e.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@mock_s3 +def test_boto3_head_object(): + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1542,16 +2022,16 @@ def test_boto3_head_object(): Bucket="blah", Key="hello.txt" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.Object("blah", "hello2.txt").meta.client.head_object( Bucket="blah", Key="hello_bad.txt" ) - e.exception.response["Error"]["Code"].should.equal("404") + e.value.response["Error"]["Code"].should.equal("404") @mock_s3 def test_boto3_bucket_deletion(): - cli = boto3.client("s3", region_name="us-east-1") + cli = boto3.client("s3", region_name=DEFAULT_REGION_NAME) cli.create_bucket(Bucket="foobar") cli.put_object(Bucket="foobar", Key="the-key", Body="some value") @@ -1582,7 +2062,7 @@ def test_boto3_bucket_deletion(): @mock_s3 def test_boto3_get_object(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1591,28 +2071,44 @@ def test_boto3_get_object(): Bucket="blah", Key="hello.txt" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.Object("blah", "hello2.txt").get() - e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + e.value.response["Error"]["Code"].should.equal("NoSuchKey") + + +@mock_s3 +def test_boto3_s3_content_type(): + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) + my_bucket = s3.Bucket("my-cool-bucket") + my_bucket.create() + s3_path = "test_s3.py" + s3 = boto3.resource("s3", verify=False) + + content_type = "text/python-x" + s3.Object(my_bucket.name, s3_path).put( + ContentType=content_type, Body=b"some python code", ACL="public-read" + ) + + s3.Object(my_bucket.name, s3_path).content_type.should.equal(content_type) @mock_s3 def test_boto3_get_missing_object_with_part_number(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.Object("blah", "hello.txt").meta.client.head_object( Bucket="blah", Key="hello.txt", PartNumber=123 ) - e.exception.response["Error"]["Code"].should.equal("404") + e.value.response["Error"]["Code"].should.equal("404") @mock_s3 def test_boto3_head_object_with_versioning(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) bucket = s3.create_bucket(Bucket="blah") bucket.Versioning().enable() @@ -1642,7 +2138,7 @@ def test_boto3_head_object_with_versioning(): @mock_s3 def test_boto3_copy_object_with_versioning(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"} @@ -1674,13 +2170,13 @@ def test_boto3_copy_object_with_versioning(): obj3_version_new.should_not.equal(obj2_version_new) # Copy file that doesn't exist - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.copy_object( CopySource={"Bucket": "blah", "Key": "test4", "VersionId": obj2_version}, Bucket="blah", Key="test5", ) - e.exception.response["Error"]["Code"].should.equal("404") + e.value.response["Error"]["Code"].should.equal("404") response = client.create_multipart_upload(Bucket="blah", Key="test4") upload_id = response["UploadId"] @@ -1704,9 +2200,22 @@ def test_boto3_copy_object_with_versioning(): data.should.equal(b"test2") +@mock_s3 +def test_s3_abort_multipart_data_with_invalid_upload_and_key(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + + client.create_bucket(Bucket="blah") + + with pytest.raises(Exception) as err: + client.abort_multipart_upload( + Bucket="blah", Key="foobar", UploadId="dummy_upload_id" + ) + err.value.response["Error"]["Code"].should.equal("NoSuchUpload") + + @mock_s3 def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket( Bucket="src", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"} @@ -1728,9 +2237,37 @@ def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): obj2_version_new.should_not.equal(None) +@mock_s3 +def test_boto3_copy_object_with_replacement_tagging(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + client.create_bucket(Bucket="mybucket") + client.put_object( + Bucket="mybucket", Key="original", Body=b"test", Tagging="tag=old" + ) + + client.copy_object( + CopySource={"Bucket": "mybucket", "Key": "original"}, + Bucket="mybucket", + Key="copy1", + TaggingDirective="REPLACE", + Tagging="tag=new", + ) + client.copy_object( + CopySource={"Bucket": "mybucket", "Key": "original"}, + Bucket="mybucket", + Key="copy2", + TaggingDirective="COPY", + ) + + tags1 = client.get_object_tagging(Bucket="mybucket", Key="copy1")["TagSet"] + tags1.should.equal([{"Key": "tag", "Value": "new"}]) + tags2 = client.get_object_tagging(Bucket="mybucket", Key="copy2")["TagSet"] + tags2.should.equal([{"Key": "tag", "Value": "old"}]) + + @mock_s3 def test_boto3_deleted_versionings_list(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="blah") client.put_bucket_versioning( @@ -1745,9 +2282,32 @@ def test_boto3_deleted_versionings_list(): assert len(listed["Contents"]) == 1 +@mock_s3 +def test_boto3_delete_objects_for_specific_version_id(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + client.create_bucket(Bucket="blah") + client.put_bucket_versioning( + Bucket="blah", VersioningConfiguration={"Status": "Enabled"} + ) + + client.put_object(Bucket="blah", Key="test1", Body=b"test1a") + client.put_object(Bucket="blah", Key="test1", Body=b"test1b") + + response = client.list_object_versions(Bucket="blah", Prefix="test1") + id_to_delete = [v["VersionId"] for v in response["Versions"] if v["IsLatest"]][0] + + response = client.delete_objects( + Bucket="blah", Delete={"Objects": [{"Key": "test1", "VersionId": id_to_delete}]} + ) + assert response["Deleted"] == [{"Key": "test1", "VersionId": id_to_delete}] + + listed = client.list_objects_v2(Bucket="blah") + assert len(listed["Contents"]) == 1 + + @mock_s3 def test_boto3_delete_versioned_bucket(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="blah") client.put_bucket_versioning( @@ -1760,9 +2320,33 @@ def test_boto3_delete_versioned_bucket(): client.delete_bucket(Bucket="blah") +@mock_s3 +def test_boto3_delete_versioned_bucket_returns_meta(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + + client.create_bucket(Bucket="blah") + client.put_bucket_versioning( + Bucket="blah", VersioningConfiguration={"Status": "Enabled"} + ) + + put_resp = client.put_object(Bucket="blah", Key="test1", Body=b"test1") + + # Delete the object + del_resp = client.delete_object(Bucket="blah", Key="test1") + assert "DeleteMarker" not in del_resp + assert del_resp["VersionId"] is not None + + # Delete the delete marker + del_resp2 = client.delete_object( + Bucket="blah", Key="test1", VersionId=del_resp["VersionId"] + ) + assert del_resp2["DeleteMarker"] == True + assert "VersionId" not in del_resp2 + + @mock_s3 def test_boto3_get_object_if_modified_since(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "blah" s3.create_bucket(Bucket=bucket_name) @@ -1770,19 +2354,19 @@ def test_boto3_get_object_if_modified_since(): s3.put_object(Bucket=bucket_name, Key=key, Body="test") - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( Bucket=bucket_name, Key=key, IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1), ) - e = err.exception + e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @mock_s3 -def test_boto3_head_object_if_modified_since(): - s3 = boto3.client("s3", region_name="us-east-1") +def test_boto3_get_object_if_unmodified_since(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "blah" s3.create_bucket(Bucket=bucket_name) @@ -1790,13 +2374,127 @@ def test_boto3_head_object_if_modified_since(): s3.put_object(Bucket=bucket_name, Key=key, Body="test") - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: + s3.get_object( + Bucket=bucket_name, + Key=key, + IfUnmodifiedSince=datetime.datetime.utcnow() - datetime.timedelta(hours=1), + ) + e = err.value + e.response["Error"]["Code"].should.equal("PreconditionFailed") + e.response["Error"]["Condition"].should.equal("If-Unmodified-Since") + + +@mock_s3 +def test_boto3_get_object_if_match(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + s3.put_object(Bucket=bucket_name, Key=key, Body="test") + + with pytest.raises(botocore.exceptions.ClientError) as err: + s3.get_object( + Bucket=bucket_name, Key=key, IfMatch='"hello"', + ) + e = err.value + e.response["Error"]["Code"].should.equal("PreconditionFailed") + e.response["Error"]["Condition"].should.equal("If-Match") + + +@mock_s3 +def test_boto3_get_object_if_none_match(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + etag = s3.put_object(Bucket=bucket_name, Key=key, Body="test")["ETag"] + + with pytest.raises(botocore.exceptions.ClientError) as err: + s3.get_object( + Bucket=bucket_name, Key=key, IfNoneMatch=etag, + ) + e = err.value + e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) + + +@mock_s3 +def test_boto3_head_object_if_modified_since(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + s3.put_object(Bucket=bucket_name, Key=key, Body="test") + + with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( Bucket=bucket_name, Key=key, IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1), ) - e = err.exception + e = err.value + e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) + + +@mock_s3 +def test_boto3_head_object_if_unmodified_since(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + s3.put_object(Bucket=bucket_name, Key=key, Body="test") + + with pytest.raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, + Key=key, + IfUnmodifiedSince=datetime.datetime.utcnow() - datetime.timedelta(hours=1), + ) + e = err.value + e.response["Error"].should.equal({"Code": "412", "Message": "Precondition Failed"}) + + +@mock_s3 +def test_boto3_head_object_if_match(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + s3.put_object(Bucket=bucket_name, Key=key, Body="test") + + with pytest.raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, Key=key, IfMatch='"hello"', + ) + e = err.value + e.response["Error"].should.equal({"Code": "412", "Message": "Precondition Failed"}) + + +@mock_s3 +def test_boto3_head_object_if_none_match(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + etag = s3.put_object(Bucket=bucket_name, Key=key, Body="test")["ETag"] + + with pytest.raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, Key=key, IfNoneMatch=etag, + ) + e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -1804,7 +2502,7 @@ def test_boto3_head_object_if_modified_since(): @reduced_min_part_size def test_boto3_multipart_etag(): # Create Bucket so that test can run - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") upload_id = s3.create_multipart_upload(Bucket="mybucket", Key="the-key")["UploadId"] @@ -1848,7 +2546,7 @@ def test_boto3_multipart_etag(): @mock_s3 @reduced_min_part_size def test_boto3_multipart_part_size(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") mpu = s3.create_multipart_upload(Bucket="mybucket", Key="the-key") @@ -1883,21 +2581,25 @@ def test_boto3_multipart_part_size(): @mock_s3 def test_boto3_put_object_with_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) s3.put_object(Bucket=bucket_name, Key=key, Body="test", Tagging="foo=bar") - resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + s3.get_object_tagging(Bucket=bucket_name, Key=key)["TagSet"].should.contain( + {"Key": "foo", "Value": "bar"} + ) - resp["TagSet"].should.contain({"Key": "foo", "Value": "bar"}) + s3.delete_object_tagging(Bucket=bucket_name, Key=key) + + s3.get_object_tagging(Bucket=bucket_name, Key=key)["TagSet"].should.equal([]) @mock_s3 def test_boto3_put_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -1925,7 +2627,7 @@ def test_boto3_put_bucket_tagging(): resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) # With duplicate tag keys: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: resp = s3.put_bucket_tagging( Bucket=bucket_name, Tagging={ @@ -1935,16 +2637,34 @@ def test_boto3_put_bucket_tagging(): ] }, ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("InvalidTag") e.response["Error"]["Message"].should.equal( "Cannot provide multiple Tags with the same key" ) + # Cannot put tags that are "system" tags - i.e. tags that start with "aws:" + with pytest.raises(ClientError) as ce: + s3.put_bucket_tagging( + Bucket=bucket_name, + Tagging={"TagSet": [{"Key": "aws:sometag", "Value": "nope"}]}, + ) + e = ce.value + e.response["Error"]["Code"].should.equal("InvalidTag") + e.response["Error"]["Message"].should.equal( + "System tags cannot be added/updated by requester" + ) + + # This is OK though: + s3.put_bucket_tagging( + Bucket=bucket_name, + Tagging={"TagSet": [{"Key": "something:aws:stuff", "Value": "this is fine"}]}, + ) + @mock_s3 def test_boto3_get_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) s3.put_bucket_tagging( @@ -1965,17 +2685,17 @@ def test_boto3_get_bucket_tagging(): # With no tags: s3.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": []}) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.get_bucket_tagging(Bucket=bucket_name) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("NoSuchTagSet") e.response["Error"]["Message"].should.equal("The TagSet does not exist") @mock_s3 def test_boto3_delete_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -1992,17 +2712,17 @@ def test_boto3_delete_bucket_tagging(): resp = s3.delete_bucket_tagging(Bucket=bucket_name) resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(204) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.get_bucket_tagging(Bucket=bucket_name) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("NoSuchTagSet") e.response["Error"]["Message"].should.equal("The TagSet does not exist") @mock_s3 def test_boto3_put_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -2030,7 +2750,7 @@ def test_boto3_put_bucket_cors(): resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_cors( Bucket=bucket_name, CORSConfiguration={ @@ -2039,38 +2759,38 @@ def test_boto3_put_bucket_cors(): ] }, ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("InvalidRequest") e.response["Error"]["Message"].should.equal( "Found unsupported HTTP method in CORS config. " "Unsupported method is NOTREAL" ) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={"CORSRules": []}) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("MalformedXML") # And 101: many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101 - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_cors( Bucket=bucket_name, CORSConfiguration={"CORSRules": many_rules} ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("MalformedXML") @mock_s3 def test_boto3_get_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) # Without CORS: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.get_bucket_cors(Bucket=bucket_name) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") @@ -2103,7 +2823,7 @@ def test_boto3_get_bucket_cors(): @mock_s3 def test_boto3_delete_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) s3.put_bucket_cors( @@ -2117,17 +2837,17 @@ def test_boto3_delete_bucket_cors(): resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(204) # Verify deletion: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.get_bucket_cors(Bucket=bucket_name) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") @mock_s3 def test_put_bucket_acl_body(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] s3.put_bucket_acl( @@ -2180,7 +2900,7 @@ def test_put_bucket_acl_body(): assert len(result["Grants"]) == 1 # With no owner: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_acl( Bucket="bucket", AccessControlPolicy={ @@ -2195,10 +2915,10 @@ def test_put_bucket_acl_body(): ] }, ) - assert err.exception.response["Error"]["Code"] == "MalformedACLError" + assert err.value.response["Error"]["Code"] == "MalformedACLError" # With incorrect permission: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_acl( Bucket="bucket", AccessControlPolicy={ @@ -2214,7 +2934,7 @@ def test_put_bucket_acl_body(): "Owner": bucket_owner, }, ) - assert err.exception.response["Error"]["Code"] == "MalformedACLError" + assert err.value.response["Error"]["Code"] == "MalformedACLError" # Clear the ACLs: result = s3.put_bucket_acl( @@ -2223,9 +2943,42 @@ def test_put_bucket_acl_body(): assert not result.get("Grants") +@mock_s3 +def test_object_acl_with_presigned_post(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + + bucket_name = "imageS3Bucket" + object_name = "text.txt" + fields = {"acl": "public-read"} + file = open("text.txt", "w") + file.write("test") + file.close() + + s3.create_bucket(Bucket=bucket_name) + response = s3.generate_presigned_post( + bucket_name, object_name, Fields=fields, ExpiresIn=60000 + ) + + with open(object_name, "rb") as f: + files = {"file": (object_name, f)} + requests.post(response["url"], data=response["fields"], files=files) + + response = s3.get_object_acl(Bucket=bucket_name, Key=object_name) + + assert "Grants" in response + assert len(response["Grants"]) == 2 + assert response["Grants"][1]["Permission"] == "READ" + + response = s3.get_object(Bucket=bucket_name, Key=object_name) + + assert "ETag" in response + assert "Body" in response + os.remove("text.txt") + + @mock_s3 def test_put_bucket_notification(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") # With no configuration: @@ -2421,12 +3174,12 @@ def test_put_bucket_notification(): @mock_s3 def test_put_bucket_notification_errors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") # With incorrect ARNs: for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_notification_configuration( Bucket="bucket", NotificationConfiguration={ @@ -2441,13 +3194,11 @@ def test_put_bucket_notification_errors(): }, ) - assert err.exception.response["Error"]["Code"] == "InvalidArgument" - assert ( - err.exception.response["Error"]["Message"] == "The ARN is not well formed" - ) + assert err.value.response["Error"]["Code"] == "InvalidArgument" + assert err.value.response["Error"]["Message"] == "The ARN is not well formed" # Region not the same as the bucket: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_notification_configuration( Bucket="bucket", NotificationConfiguration={ @@ -2460,14 +3211,14 @@ def test_put_bucket_notification_errors(): }, ) - assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.value.response["Error"]["Code"] == "InvalidArgument" assert ( - err.exception.response["Error"]["Message"] + err.value.response["Error"]["Message"] == "The notification destination service region is not valid for the bucket location constraint" ) # Invalid event name: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_notification_configuration( Bucket="bucket", NotificationConfiguration={ @@ -2479,16 +3230,16 @@ def test_put_bucket_notification_errors(): ] }, ) - assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.value.response["Error"]["Code"] == "InvalidArgument" assert ( - err.exception.response["Error"]["Message"] + err.value.response["Error"]["Message"] == "The event is not supported for notifications" ) @mock_s3 def test_boto3_put_bucket_logging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" log_bucket = "logbucket" wrong_region_bucket = "wrongregionlogbucket" @@ -2504,25 +3255,25 @@ def test_boto3_put_bucket_logging(): assert not result.get("LoggingEnabled") # A log-bucket that doesn't exist: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_logging( Bucket=bucket_name, BucketLoggingStatus={ "LoggingEnabled": {"TargetBucket": "IAMNOTREAL", "TargetPrefix": ""} }, ) - assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert err.value.response["Error"]["Code"] == "InvalidTargetBucketForLogging" # A log-bucket that's missing the proper ACLs for LogDelivery: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_logging( Bucket=bucket_name, BucketLoggingStatus={ "LoggingEnabled": {"TargetBucket": log_bucket, "TargetPrefix": ""} }, ) - assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" - assert "log-delivery" in err.exception.response["Error"]["Message"] + assert err.value.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert "log-delivery" in err.value.response["Error"]["Message"] # Add the proper "log-delivery" ACL to the log buckets: bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] @@ -2555,7 +3306,7 @@ def test_boto3_put_bucket_logging(): ) # A log-bucket that's in the wrong region: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_logging( Bucket=bucket_name, BucketLoggingStatus={ @@ -2565,7 +3316,7 @@ def test_boto3_put_bucket_logging(): } }, ) - assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" + assert err.value.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" # Correct logging: s3.put_bucket_logging( @@ -2643,7 +3394,7 @@ def test_boto3_put_bucket_logging(): assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 # With an invalid grant: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_logging( Bucket=bucket_name, BucketLoggingStatus={ @@ -2662,17 +3413,17 @@ def test_boto3_put_bucket_logging(): } }, ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" @mock_s3 def test_boto3_put_object_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_object_tagging( Bucket=bucket_name, Key=key, @@ -2684,7 +3435,7 @@ def test_boto3_put_object_tagging(): }, ) - e = err.exception + e = err.value e.response["Error"].should.equal( { "Code": "NoSuchKey", @@ -2711,7 +3462,7 @@ def test_boto3_put_object_tagging(): @mock_s3 def test_boto3_put_object_tagging_on_earliest_version(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2720,7 +3471,7 @@ def test_boto3_put_object_tagging_on_earliest_version(): bucket_versioning.enable() bucket_versioning.status.should.equal("Enabled") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_object_tagging( Bucket=bucket_name, Key=key, @@ -2732,7 +3483,7 @@ def test_boto3_put_object_tagging_on_earliest_version(): }, ) - e = err.exception + e = err.value e.response["Error"].should.equal( { "Code": "NoSuchKey", @@ -2765,7 +3516,8 @@ def test_boto3_put_object_tagging_on_earliest_version(): # Older version has tags while the most recent does not resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id) resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - resp["TagSet"].should.equal( + sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"]) + sorted_tagset.should.equal( [{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}] ) @@ -2778,7 +3530,7 @@ def test_boto3_put_object_tagging_on_earliest_version(): @mock_s3 def test_boto3_put_object_tagging_on_both_version(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2787,7 +3539,7 @@ def test_boto3_put_object_tagging_on_both_version(): bucket_versioning.enable() bucket_versioning.status.should.equal("Enabled") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_object_tagging( Bucket=bucket_name, Key=key, @@ -2799,7 +3551,7 @@ def test_boto3_put_object_tagging_on_both_version(): }, ) - e = err.exception + e = err.value e.response["Error"].should.equal( { "Code": "NoSuchKey", @@ -2843,7 +3595,8 @@ def test_boto3_put_object_tagging_on_both_version(): resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id) resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - resp["TagSet"].should.equal( + sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"]) + sorted_tagset.should.equal( [{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}] ) @@ -2851,14 +3604,15 @@ def test_boto3_put_object_tagging_on_both_version(): Bucket=bucket_name, Key=key, VersionId=second_object.id ) resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - resp["TagSet"].should.equal( + sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"]) + sorted_tagset.should.equal( [{"Key": "item1", "Value": "baz"}, {"Key": "item2", "Value": "bin"}] ) @mock_s3 def test_boto3_put_object_tagging_with_single_tag(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2876,7 +3630,7 @@ def test_boto3_put_object_tagging_with_single_tag(): @mock_s3 def test_boto3_get_object_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2905,7 +3659,7 @@ def test_boto3_get_object_tagging(): @mock_s3 def test_boto3_list_object_versions(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2927,7 +3681,7 @@ def test_boto3_list_object_versions(): @mock_s3 def test_boto3_list_object_versions_with_versioning_disabled(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2950,7 +3704,7 @@ def test_boto3_list_object_versions_with_versioning_disabled(): @mock_s3 def test_boto3_list_object_versions_with_versioning_enabled_late(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2978,7 +3732,7 @@ def test_boto3_list_object_versions_with_versioning_enabled_late(): @mock_s3 def test_boto3_bad_prefix_list_object_versions(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" bad_prefix = "key-that-does-not-exist" @@ -2997,7 +3751,7 @@ def test_boto3_bad_prefix_list_object_versions(): @mock_s3 def test_boto3_delete_markers(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions-and-unicode-ó" s3.create_bucket(Bucket=bucket_name) @@ -3010,9 +3764,9 @@ def test_boto3_delete_markers(): s3.delete_objects(Bucket=bucket_name, Delete={"Objects": [{"Key": key}]}) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=key) - e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + e.value.response["Error"]["Code"].should.equal("NoSuchKey") response = s3.list_object_versions(Bucket=bucket_name) response["Versions"].should.have.length_of(2) @@ -3040,7 +3794,7 @@ def test_boto3_delete_markers(): @mock_s3 def test_boto3_multiple_delete_markers(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions-and-unicode-ó" s3.create_bucket(Bucket=bucket_name) @@ -3058,7 +3812,7 @@ def test_boto3_multiple_delete_markers(): response = s3.list_object_versions(Bucket=bucket_name) response["DeleteMarkers"].should.have.length_of(2) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=key) e.response["Error"]["Code"].should.equal("404") @@ -3091,7 +3845,7 @@ def test_boto3_multiple_delete_markers(): def test_get_stream_gzipped(): payload = b"this is some stuff here" - s3_client = boto3.client("s3", region_name="us-east-1") + s3_client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3_client.create_bucket(Bucket="moto-tests") buffer_ = BytesIO() with GzipFile(fileobj=buffer_, mode="w") as f: @@ -3129,24 +3883,24 @@ TEST_XML = """\ @mock_s3 def test_boto3_bucket_name_too_long(): - s3 = boto3.client("s3", region_name="us-east-1") - with assert_raises(ClientError) as exc: + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + with pytest.raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 64) - exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") + exc.value.response["Error"]["Code"].should.equal("InvalidBucketName") @mock_s3 def test_boto3_bucket_name_too_short(): - s3 = boto3.client("s3", region_name="us-east-1") - with assert_raises(ClientError) as exc: + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + with pytest.raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 2) - exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") + exc.value.response["Error"]["Code"].should.equal("InvalidBucketName") @mock_s3 def test_accelerated_none_when_unspecified(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) resp.shouldnt.have.key("Status") @@ -3155,7 +3909,7 @@ def test_accelerated_none_when_unspecified(): @mock_s3 def test_can_enable_bucket_acceleration(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} @@ -3171,7 +3925,7 @@ def test_can_enable_bucket_acceleration(): @mock_s3 def test_can_suspend_bucket_acceleration(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} @@ -3191,7 +3945,10 @@ def test_can_suspend_bucket_acceleration(): def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): bucket_name = "some_bucket" s3 = boto3.client("s3") - s3.create_bucket(Bucket=bucket_name) + s3.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Suspended"} ) @@ -3205,29 +3962,29 @@ def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): @mock_s3 def test_accelerate_configuration_status_validation(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "bad_status"} ) - exc.exception.response["Error"]["Code"].should.equal("MalformedXML") + exc.value.response["Error"]["Code"].should.equal("MalformedXML") @mock_s3 def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): bucket_name = "some.bucket.with.dots" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} ) - exc.exception.response["Error"]["Code"].should.equal("InvalidRequest") + exc.value.response["Error"]["Code"].should.equal("InvalidRequest") def store_and_read_back_a_key(key): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" body = b"Some body" @@ -3245,17 +4002,39 @@ def test_paths_with_leading_slashes_work(): @mock_s3 def test_root_dir_with_empty_name_works(): - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + if settings.TEST_SERVER_MODE: raise SkipTest("Does not work in server mode due to error in Workzeug") store_and_read_back_a_key("/") -@parameterized( - [("foo/bar/baz",), ("foo",), ("foo/run_dt%3D2019-01-01%252012%253A30%253A00",)] +@pytest.mark.parametrize("bucket_name", ["mybucket", "my.bucket"]) +@mock_s3 +def test_leading_slashes_not_removed(bucket_name): + """Make sure that leading slashes are not removed internally.""" + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + s3.create_bucket(Bucket=bucket_name) + + uploaded_key = "/key" + invalid_key_1 = "key" + invalid_key_2 = "//key" + + s3.put_object(Bucket=bucket_name, Key=uploaded_key, Body=b"Some body") + + with pytest.raises(ClientError) as e: + s3.get_object(Bucket=bucket_name, Key=invalid_key_1) + e.value.response["Error"]["Code"].should.equal("NoSuchKey") + + with pytest.raises(ClientError) as e: + s3.get_object(Bucket=bucket_name, Key=invalid_key_2) + e.value.response["Error"]["Code"].should.equal("NoSuchKey") + + +@pytest.mark.parametrize( + "key", ["foo/bar/baz", "foo", "foo/run_dt%3D2019-01-01%252012%253A30%253A00"] ) @mock_s3 def test_delete_objects_with_url_encoded_key(key): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" body = b"Some body" @@ -3265,10 +4044,10 @@ def test_delete_objects_with_url_encoded_key(key): s3.put_object(Bucket=bucket_name, Key=key, Body=body) def assert_deleted(): - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=key) - e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + e.value.response["Error"]["Code"].should.equal("NoSuchKey") put_object() s3.delete_object(Bucket=bucket_name, Key=key) @@ -3282,21 +4061,19 @@ def test_delete_objects_with_url_encoded_key(key): @mock_s3 @mock_config def test_public_access_block(): - client = boto3.client("s3") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="mybucket") # Try to get the public access block (should not exist by default) - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_public_access_block(Bucket="mybucket") + assert ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" assert ( - ce.exception.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" - ) - assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "The public access block configuration was not found" ) - assert ce.exception.response["ResponseMetadata"]["HTTPStatusCode"] == 404 + assert ce.value.response["ResponseMetadata"]["HTTPStatusCode"] == 404 # Put a public block in place: test_map = { @@ -3336,20 +4113,20 @@ def test_public_access_block(): } # Test with a blank PublicAccessBlockConfiguration: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_public_access_block( Bucket="mybucket", PublicAccessBlockConfiguration={} ) - assert ce.exception.response["Error"]["Code"] == "InvalidRequest" + assert ce.value.response["Error"]["Code"] == "InvalidRequest" assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "Must specify at least one configuration." ) - assert ce.exception.response["ResponseMetadata"]["HTTPStatusCode"] == 400 + assert ce.value.response["ResponseMetadata"]["HTTPStatusCode"] == 400 # Test that things work with AWS Config: - config_client = boto3.client("config", region_name="us-east-1") + config_client = boto3.client("config", region_name=DEFAULT_REGION_NAME) result = config_client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="mybucket" ) @@ -3369,11 +4146,9 @@ def test_public_access_block(): # Delete: client.delete_public_access_block(Bucket="mybucket") - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_public_access_block(Bucket="mybucket") - assert ( - ce.exception.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" @mock_s3 @@ -3514,10 +4289,10 @@ def test_list_config_discovered_resources(): ) # With an invalid page: - with assert_raises(InvalidNextTokenException) as inte: + with pytest.raises(InvalidNextTokenException) as inte: s3_config_query.list_config_service_resources(None, None, 1, "notabucket") - assert "The nextToken provided is invalid" in inte.exception.message + assert "The nextToken provided is invalid" in inte.value.message @mock_s3 @@ -3799,20 +4574,13 @@ def test_s3_config_dict(): FakeAcl, FakeGrant, FakeGrantee, - FakeTag, - FakeTagging, - FakeTagSet, OWNER, ) # Without any buckets: assert not s3_config_query.get_config_resource("some_bucket") - tags = FakeTagging( - FakeTagSet( - [FakeTag("someTag", "someValue"), FakeTag("someOtherTag", "someOtherValue")] - ) - ) + tags = {"someTag": "someValue", "someOtherTag": "someOtherValue"} # With 1 bucket in us-west-2: s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2") @@ -3935,3 +4703,211 @@ def test_s3_config_dict(): assert not logging_bucket["supplementaryConfiguration"].get( "BucketTaggingConfiguration" ) + + +@mock_s3 +def test_creating_presigned_post(): + bucket = "presigned-test" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket=bucket) + success_url = "http://localhost/completed" + fdata = b"test data\n" + file_uid = uuid.uuid4() + conditions = [ + {"Content-Type": "text/plain"}, + {"x-amz-server-side-encryption": "AES256"}, + {"success_action_redirect": success_url}, + ] + conditions.append(["content-length-range", 1, 30]) + data = s3.generate_presigned_post( + Bucket=bucket, + Key="{file_uid}.txt".format(file_uid=file_uid), + Fields={ + "content-type": "text/plain", + "success_action_redirect": success_url, + "x-amz-server-side-encryption": "AES256", + }, + Conditions=conditions, + ExpiresIn=1000, + ) + resp = requests.post( + data["url"], data=data["fields"], files={"file": fdata}, allow_redirects=False + ) + assert resp.headers["Location"] == success_url + assert resp.status_code == 303 + assert ( + s3.get_object(Bucket=bucket, Key="{file_uid}.txt".format(file_uid=file_uid))[ + "Body" + ].read() + == fdata + ) + + +@mock_s3 +def test_encryption(): + # Create Bucket so that test can run + conn = boto3.client("s3", region_name="us-east-1") + conn.create_bucket(Bucket="mybucket") + + with pytest.raises(ClientError) as exc: + conn.get_bucket_encryption(Bucket="mybucket") + + sse_config = { + "Rules": [ + { + "ApplyServerSideEncryptionByDefault": { + "SSEAlgorithm": "aws:kms", + "KMSMasterKeyID": "12345678", + } + } + ] + } + + conn.put_bucket_encryption( + Bucket="mybucket", ServerSideEncryptionConfiguration=sse_config + ) + + resp = conn.get_bucket_encryption(Bucket="mybucket") + assert "ServerSideEncryptionConfiguration" in resp + assert resp["ServerSideEncryptionConfiguration"] == sse_config + + conn.delete_bucket_encryption(Bucket="mybucket") + with pytest.raises(ClientError) as exc: + conn.get_bucket_encryption(Bucket="mybucket") + + +@mock_s3 +def test_presigned_url_restrict_parameters(): + # Only specific params can be set + # Ensure error is thrown when adding custom metadata this way + bucket = str(uuid.uuid4()) + key = "file.txt" + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + s3 = boto3.client("s3", region_name="us-east-1") + + # Create a pre-signed url with some metadata. + with pytest.raises(botocore.exceptions.ParamValidationError) as err: + s3.generate_presigned_url( + ClientMethod="put_object", + Params={"Bucket": bucket, "Key": key, "Unknown": "metadata"}, + ) + assert str(err.value).should.match( + r'Parameter validation failed:\nUnknown parameter in input: "Unknown", must be one of:.*' + ) + + s3.delete_bucket(Bucket=bucket) + + +@mock_s3 +def test_presigned_put_url_with_approved_headers(): + bucket = str(uuid.uuid4()) + key = "file.txt" + content = b"filecontent" + expected_contenttype = "app/sth" + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + s3 = boto3.client("s3", region_name="us-east-1") + + # Create a pre-signed url with some metadata. + url = s3.generate_presigned_url( + ClientMethod="put_object", + Params={"Bucket": bucket, "Key": key, "ContentType": expected_contenttype}, + ) + + # Verify S3 throws an error when the header is not provided + response = requests.put(url, data=content) + response.status_code.should.equal(403) + str(response.content).should.contain("SignatureDoesNotMatch") + str(response.content).should.contain( + "The request signature we calculated does not match the signature you provided. Check your key and signing method." + ) + + # Verify S3 throws an error when the header has the wrong value + response = requests.put( + url, data=content, headers={"Content-Type": "application/unknown"} + ) + response.status_code.should.equal(403) + str(response.content).should.contain("SignatureDoesNotMatch") + str(response.content).should.contain( + "The request signature we calculated does not match the signature you provided. Check your key and signing method." + ) + + # Verify S3 uploads correctly when providing the meta data + response = requests.put( + url, data=content, headers={"Content-Type": expected_contenttype} + ) + response.status_code.should.equal(200) + + # Assert the object exists + obj = s3.get_object(Bucket=bucket, Key=key) + obj["ContentType"].should.equal(expected_contenttype) + obj["ContentLength"].should.equal(11) + obj["Body"].read().should.equal(content) + obj["Metadata"].should.equal({}) + + s3.delete_object(Bucket=bucket, Key=key) + s3.delete_bucket(Bucket=bucket) + + +@mock_s3 +def test_presigned_put_url_with_custom_headers(): + bucket = str(uuid.uuid4()) + key = "file.txt" + content = b"filecontent" + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + s3 = boto3.client("s3", region_name="us-east-1") + + # Create a pre-signed url with some metadata. + url = s3.generate_presigned_url( + ClientMethod="put_object", + Params={"Bucket": bucket, "Key": key, "Metadata": {"venue": "123"}}, + ) + + # Verify S3 uploads correctly when providing the meta data + response = requests.put(url, data=content) + response.status_code.should.equal(200) + + # Assert the object exists + obj = s3.get_object(Bucket=bucket, Key=key) + obj["ContentLength"].should.equal(11) + obj["Body"].read().should.equal(content) + obj["Metadata"].should.equal({"venue": "123"}) + + s3.delete_object(Bucket=bucket, Key=key) + s3.delete_bucket(Bucket=bucket) + + +@mock_s3 +def test_request_partial_content_should_contain_content_length(): + bucket = "bucket" + object_key = "key" + s3 = boto3.resource("s3") + s3.create_bucket(Bucket=bucket) + s3.Object(bucket, object_key).put(Body="some text") + + file = s3.Object(bucket, object_key) + response = file.get(Range="bytes=0-1024") + response["ContentLength"].should.equal(9) + + +@mock_s3 +def test_request_partial_content_should_contain_actual_content_length(): + bucket = "bucket" + object_key = "key" + s3 = boto3.resource("s3") + s3.create_bucket(Bucket=bucket) + s3.Object(bucket, object_key).put(Body="some text") + + file = s3.Object(bucket, object_key) + requested_range = "bytes=1024-" + try: + file.get(Range=requested_range) + except botocore.client.ClientError as e: + e.response["Error"]["Code"].should.equal("InvalidRange") + e.response["Error"]["Message"].should.equal( + "The requested range is not satisfiable" + ) + e.response["Error"]["ActualObjectSize"].should.equal("9") + e.response["Error"]["RangeRequested"].should.equal(requested_range) diff --git a/tests/test_s3/test_s3_cloudformation.py b/tests/test_s3/test_s3_cloudformation.py new file mode 100644 index 000000000..ebaa03b78 --- /dev/null +++ b/tests/test_s3/test_s3_cloudformation.py @@ -0,0 +1,218 @@ +import json +import boto3 + +import sure # noqa + +from moto import mock_s3, mock_cloudformation + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_basic(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ + "StackId" + ] + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_with_properties(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + bucket_name = "MyBucket" + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": bucket_name, + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + }, + }, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ + "StackId" + ] + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=bucket_name) + + encryption = s3.get_bucket_encryption(Bucket=bucket_name) + encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ + "ApplyServerSideEncryptionByDefault" + ]["SSEAlgorithm"].should.equal("AES256") + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_update_no_interruption(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + } + }, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.update_stack(StackName="test_stack", TemplateBody=template_json) + encryption = s3.get_bucket_encryption( + Bucket=stack_description["Outputs"][0]["OutputValue"] + ) + encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ + "ApplyServerSideEncryptionByDefault" + ]["SSEAlgorithm"].should.equal("AES256") + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_update_replacement(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": "MyNewBucketName"}, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.update_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_outputs(): + region_name = "us-east-1" + s3 = boto3.client("s3", region_name=region_name) + cf = boto3.resource("cloudformation", region_name=region_name) + stack_name = "test-stack" + bucket_name = "test-bucket" + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "TestBucket": { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": bucket_name}, + } + }, + "Outputs": { + "BucketARN": { + "Value": {"Fn::GetAtt": ["TestBucket", "Arn"]}, + "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketARN"}}, + }, + "BucketDomainName": { + "Value": {"Fn::GetAtt": ["TestBucket", "DomainName"]}, + "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketDomainName"}}, + }, + "BucketDualStackDomainName": { + "Value": {"Fn::GetAtt": ["TestBucket", "DualStackDomainName"]}, + "Export": { + "Name": {"Fn::Sub": "${AWS::StackName}:BucketDualStackDomainName"} + }, + }, + "BucketRegionalDomainName": { + "Value": {"Fn::GetAtt": ["TestBucket", "RegionalDomainName"]}, + "Export": { + "Name": {"Fn::Sub": "${AWS::StackName}:BucketRegionalDomainName"} + }, + }, + "BucketWebsiteURL": { + "Value": {"Fn::GetAtt": ["TestBucket", "WebsiteURL"]}, + "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketWebsiteURL"}}, + }, + "BucketName": { + "Value": {"Ref": "TestBucket"}, + "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketName"}}, + }, + }, + } + cf.create_stack(StackName=stack_name, TemplateBody=json.dumps(template)) + outputs_list = cf.Stack(stack_name).outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + s3.head_bucket(Bucket=output["BucketName"]) + output["BucketARN"].should.match("arn:aws:s3.+{bucket}".format(bucket=bucket_name)) + output["BucketDomainName"].should.equal( + "{bucket}.s3.amazonaws.com".format(bucket=bucket_name) + ) + output["BucketDualStackDomainName"].should.equal( + "{bucket}.s3.dualstack.{region}.amazonaws.com".format( + bucket=bucket_name, region=region_name + ) + ) + output["BucketRegionalDomainName"].should.equal( + "{bucket}.s3.{region}.amazonaws.com".format( + bucket=bucket_name, region=region_name + ) + ) + output["BucketWebsiteURL"].should.equal( + "http://{bucket}.s3-website.{region}.amazonaws.com".format( + bucket=bucket_name, region=region_name + ) + ) + output["BucketName"].should.equal(bucket_name) diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 260b248f1..d3d9344ef 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -8,7 +8,7 @@ from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule import sure # noqa from botocore.exceptions import ClientError from datetime import datetime -from nose.tools import assert_raises +import pytest from moto import mock_s3_deprecated, mock_s3 @@ -16,7 +16,7 @@ from moto import mock_s3_deprecated, mock_s3 @mock_s3_deprecated def test_lifecycle_create(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() lifecycle.add_rule("myid", "", "Enabled", 30) @@ -33,7 +33,9 @@ def test_lifecycle_create(): @mock_s3 def test_lifecycle_with_filters(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) # Create a lifecycle rule with a Filter (no tags): lfc = { @@ -54,7 +56,7 @@ def test_lifecycle_with_filters(): assert result["Rules"][0]["Filter"]["Prefix"] == "" assert not result["Rules"][0]["Filter"].get("And") assert not result["Rules"][0]["Filter"].get("Tag") - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # Without any prefixes and an empty filter (this is by default a prefix for the whole bucket): @@ -73,16 +75,16 @@ def test_lifecycle_with_filters(): ) result = client.get_bucket_lifecycle_configuration(Bucket="bucket") assert len(result["Rules"]) == 1 - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # If we remove the filter -- and don't specify a Prefix, then this is bad: lfc["Rules"][0].pop("Filter") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" # With a tag: lfc["Rules"][0]["Filter"] = {"Tag": {"Key": "mytag", "Value": "mytagvalue"}} @@ -91,12 +93,12 @@ def test_lifecycle_with_filters(): ) result = client.get_bucket_lifecycle_configuration(Bucket="bucket") assert len(result["Rules"]) == 1 - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Filter"]["Prefix"] assert not result["Rules"][0]["Filter"].get("And") assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # With And (single tag): @@ -116,7 +118,7 @@ def test_lifecycle_with_filters(): assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # With multiple And tags: @@ -139,7 +141,7 @@ def test_lifecycle_with_filters(): assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # And filter without Prefix but multiple Tags: @@ -154,30 +156,30 @@ def test_lifecycle_with_filters(): ) result = client.get_bucket_lifecycle_configuration(Bucket="bucket") assert len(result["Rules"]) == 1 - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Filter"]["And"]["Prefix"] assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # Can't have both filter and prefix: lfc["Rules"][0]["Prefix"] = "" - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" lfc["Rules"][0]["Prefix"] = "some/path" - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" # No filters -- just a prefix: del lfc["Rules"][0]["Filter"] @@ -194,11 +196,11 @@ def test_lifecycle_with_filters(): "Prefix": "some/prefix", "Tag": {"Key": "mytag", "Value": "mytagvalue"}, } - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" lfc["Rules"][0]["Filter"] = { "Tag": {"Key": "mytag", "Value": "mytagvalue"}, @@ -210,11 +212,11 @@ def test_lifecycle_with_filters(): ], }, } - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" # Make sure multiple rules work: lfc = { @@ -245,7 +247,9 @@ def test_lifecycle_with_filters(): @mock_s3 def test_lifecycle_with_eodm(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -275,25 +279,27 @@ def test_lifecycle_with_eodm(): # With failure: lfc["Rules"][0]["Expiration"]["Days"] = 7 - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" del lfc["Rules"][0]["Expiration"]["Days"] lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" @mock_s3 def test_lifecycle_with_nve(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -327,7 +333,9 @@ def test_lifecycle_with_nve(): @mock_s3 def test_lifecycle_with_nvt(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -375,25 +383,27 @@ def test_lifecycle_with_nvt(): # With failures for missing children: del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" @mock_s3 def test_lifecycle_with_aimu(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -432,7 +442,7 @@ def test_lifecycle_with_aimu(): @mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() transition = Transition(days=30, storage_class="GLACIER") @@ -451,7 +461,7 @@ def test_lifecycle_with_glacier_transition(): @mock_s3_deprecated def test_lifecycle_multi(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") date = "2022-10-12T00:00:00.000Z" sc = "GLACIER" @@ -493,7 +503,7 @@ def test_lifecycle_multi(): @mock_s3_deprecated def test_lifecycle_delete(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() lifecycle.add_rule(expiration=30) diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py index dbdc85c42..ec7090369 100644 --- a/tests/test_s3/test_s3_storageclass.py +++ b/tests/test_s3/test_s3_storageclass.py @@ -4,14 +4,14 @@ import boto3 import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_s3 @mock_s3 def test_s3_storage_class_standard(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") # add an object to the bucket with standard storage @@ -26,7 +26,9 @@ def test_s3_storage_class_standard(): @mock_s3 def test_s3_storage_class_infrequent_access(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-2"} + ) # add an object to the bucket with standard storage @@ -46,7 +48,9 @@ def test_s3_storage_class_infrequent_access(): def test_s3_storage_class_intelligent_tiering(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-east-2"} + ) s3.put_object( Bucket="Bucket", Key="my_key_infrequent", @@ -61,7 +65,7 @@ def test_s3_storage_class_intelligent_tiering(): @mock_s3 def test_s3_storage_class_copy(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD" @@ -86,7 +90,7 @@ def test_s3_storage_class_copy(): @mock_s3 def test_s3_invalid_copied_storage_class(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD" @@ -101,7 +105,7 @@ def test_s3_invalid_copied_storage_class(): ) # Try to copy an object with an invalid storage class - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.copy_object( CopySource={"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", @@ -109,7 +113,7 @@ def test_s3_invalid_copied_storage_class(): StorageClass="STANDARD2", ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("InvalidStorageClass") e.response["Error"]["Message"].should.equal( "The storage class you specified is not valid" @@ -119,15 +123,17 @@ def test_s3_invalid_copied_storage_class(): @mock_s3 def test_s3_invalid_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) # Try to add an object with an invalid storage class - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD" ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("InvalidStorageClass") e.response["Error"]["Message"].should.equal( "The storage class you specified is not valid" @@ -137,7 +143,9 @@ def test_s3_invalid_storage_class(): @mock_s3 def test_s3_default_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") @@ -148,38 +156,96 @@ def test_s3_default_storage_class(): @mock_s3 -def test_s3_copy_object_error_for_glacier_storage_class(): +def test_s3_copy_object_error_for_glacier_storage_class_not_restored(): s3 = boto3.client("s3") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) + + s3.put_object( + Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="GLACIER" + ) + + with pytest.raises(ClientError) as ex: + s3.copy_object( + CopySource={"Bucket": "Bucket", "Key": "First_Object"}, + Bucket="Bucket", + Key="Second_Object", + ) + + ex.value.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") + + +@mock_s3 +def test_s3_copy_object_error_for_deep_archive_storage_class_not_restored(): + s3 = boto3.client("s3") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) + + s3.put_object( + Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="DEEP_ARCHIVE" + ) + + with pytest.raises(ClientError) as exc: + s3.copy_object( + CopySource={"Bucket": "Bucket", "Key": "First_Object"}, + Bucket="Bucket", + Key="Second_Object", + ) + + exc.value.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") + + +@mock_s3 +def test_s3_copy_object_for_glacier_storage_class_restored(): + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="GLACIER" ) - with assert_raises(ClientError) as exc: - s3.copy_object( - CopySource={"Bucket": "Bucket", "Key": "First_Object"}, - Bucket="Bucket", - Key="Second_Object", - ) + s3.create_bucket(Bucket="Bucket2") + s3.restore_object(Bucket="Bucket", Key="First_Object", RestoreRequest={"Days": 123}) - exc.exception.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") + s3.copy_object( + CopySource={"Bucket": "Bucket", "Key": "First_Object"}, + Bucket="Bucket2", + Key="Second_Object", + ) + + list_of_copied_objects = s3.list_objects(Bucket="Bucket2") + # checks that copy of restored Glacier object has STANDARD storage class + list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") + # checks that metadata of copy has no Restore property + s3.head_object(Bucket="Bucket2", Key="Second_Object").should.not_have.property( + "Restore" + ) @mock_s3 -def test_s3_copy_object_error_for_deep_archive_storage_class(): - s3 = boto3.client("s3") +def test_s3_copy_object_for_deep_archive_storage_class_restored(): + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="DEEP_ARCHIVE" ) - with assert_raises(ClientError) as exc: - s3.copy_object( - CopySource={"Bucket": "Bucket", "Key": "First_Object"}, - Bucket="Bucket", - Key="Second_Object", - ) + s3.create_bucket(Bucket="Bucket2") + s3.restore_object(Bucket="Bucket", Key="First_Object", RestoreRequest={"Days": 123}) - exc.exception.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") + s3.copy_object( + CopySource={"Bucket": "Bucket", "Key": "First_Object"}, + Bucket="Bucket2", + Key="Second_Object", + ) + + list_of_copied_objects = s3.list_objects(Bucket="Bucket2") + # checks that copy of restored Glacier object has STANDARD storage class + list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") + # checks that metadata of copy has no Restore property + s3.head_object(Bucket="Bucket2", Key="Second_Object").should.not_have.property( + "Restore" + ) diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index b90225597..64d1c2ca8 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import os +import pytest from sure import expect from moto.s3.utils import ( bucket_name_from_url, @@ -8,7 +9,6 @@ from moto.s3.utils import ( clean_key_name, undo_clean_key_name, ) -from parameterized import parameterized def test_base_url(): @@ -93,7 +93,8 @@ def test_parse_region_from_url(): parse_region_from_url(url).should.equal(expected) -@parameterized( +@pytest.mark.parametrize( + "key,expected", [ ("foo/bar/baz", "foo/bar/baz"), ("foo", "foo"), @@ -101,13 +102,14 @@ def test_parse_region_from_url(): "foo/run_dt%3D2019-01-01%252012%253A30%253A00", "foo/run_dt=2019-01-01%2012%3A30%3A00", ), - ] + ], ) def test_clean_key_name(key, expected): clean_key_name(key).should.equal(expected) -@parameterized( +@pytest.mark.parametrize( + "key,expected", [ ("foo/bar/baz", "foo/bar/baz"), ("foo", "foo"), @@ -115,7 +117,7 @@ def test_clean_key_name(key, expected): "foo/run_dt%3D2019-01-01%252012%253A30%253A00", "foo/run_dt%253D2019-01-01%25252012%25253A30%25253A00", ), - ] + ], ) def test_undo_clean_key_name(key, expected): undo_clean_key_name(key).should.equal(expected) diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 56d46de09..9ef1acb11 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -108,3 +108,31 @@ def test_s3_server_post_unicode_bucket_key(): } ) assert backend_app + + +def test_s3_server_post_cors(): + test_client = authenticated_client() + + preflight_headers = { + "Access-Control-Request-Method": "POST", + "Access-Control-Request-Headers": "origin, x-requested-with", + "Origin": "https://localhost:9000", + } + + res = test_client.options( + "/", "http://tester.localhost:5000/", headers=preflight_headers + ) + assert res.status_code in [200, 204] + + expected_methods = set(["DELETE", "PATCH", "PUT", "GET", "HEAD", "POST", "OPTIONS"]) + assert set(res.headers["Allow"].split(", ")) == expected_methods + assert ( + set(res.headers["Access-Control-Allow-Methods"].split(", ")) == expected_methods + ) + + res.headers.should.have.key("Access-Control-Allow-Origin").which.should.equal( + "https://localhost:9000" + ) + res.headers.should.have.key("Access-Control-Allow-Headers").which.should.equal( + "origin, x-requested-with" + ) diff --git a/tests/test_s3bucket_path/__init__.py b/tests/test_s3bucket_path/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_s3bucket_path/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_server.py similarity index 100% rename from tests/test_s3bucket_path/test_bucket_path_server.py rename to tests/test_s3bucket_path/test_server.py diff --git a/tests/test_sagemaker/__init__.py b/tests/test_sagemaker/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_sagemaker/test_sagemaker_endpoint.py b/tests/test_sagemaker/test_sagemaker_endpoint.py new file mode 100644 index 000000000..45ae96b12 --- /dev/null +++ b/tests/test_sagemaker/test_sagemaker_endpoint.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import datetime +import boto3 +from botocore.exceptions import ClientError +import sure # noqa + +from moto import mock_sagemaker +from moto.sts.models import ACCOUNT_ID +import pytest + +TEST_REGION_NAME = "us-east-1" +FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +GENERIC_TAGS_PARAM = [ + {"Key": "newkey1", "Value": "newval1"}, + {"Key": "newkey2", "Value": "newval2"}, +] + + +@mock_sagemaker +def test_create_endpoint_config(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + model_name = "MyModel" + production_variants = [ + { + "VariantName": "MyProductionVariant", + "ModelName": model_name, + "InitialInstanceCount": 1, + "InstanceType": "ml.t2.medium", + }, + ] + + endpoint_config_name = "MyEndpointConfig" + with pytest.raises(ClientError) as e: + sagemaker.create_endpoint_config( + EndpointConfigName=endpoint_config_name, + ProductionVariants=production_variants, + ) + assert e.value.response["Error"]["Message"].startswith("Could not find model") + + _create_model(sagemaker, model_name) + resp = sagemaker.create_endpoint_config( + EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants + ) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + + resp = sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + resp["EndpointConfigName"].should.equal(endpoint_config_name) + resp["ProductionVariants"].should.equal(production_variants) + + +@mock_sagemaker +def test_delete_endpoint_config(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + model_name = "MyModel" + _create_model(sagemaker, model_name) + + endpoint_config_name = "MyEndpointConfig" + production_variants = [ + { + "VariantName": "MyProductionVariant", + "ModelName": model_name, + "InitialInstanceCount": 1, + "InstanceType": "ml.t2.medium", + }, + ] + + resp = sagemaker.create_endpoint_config( + EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants + ) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + + resp = sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + + resp = sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) + with pytest.raises(ClientError) as e: + sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) + assert e.value.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) + + with pytest.raises(ClientError) as e: + sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) + assert e.value.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) + + +@mock_sagemaker +def test_create_endpoint_invalid_instance_type(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + model_name = "MyModel" + _create_model(sagemaker, model_name) + + instance_type = "InvalidInstanceType" + production_variants = [ + { + "VariantName": "MyProductionVariant", + "ModelName": model_name, + "InitialInstanceCount": 1, + "InstanceType": instance_type, + }, + ] + + endpoint_config_name = "MyEndpointConfig" + with pytest.raises(ClientError) as e: + sagemaker.create_endpoint_config( + EndpointConfigName=endpoint_config_name, + ProductionVariants=production_variants, + ) + assert e.value.response["Error"]["Code"] == "ValidationException" + expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( + instance_type + ) + assert expected_message in e.value.response["Error"]["Message"] + + +@mock_sagemaker +def test_create_endpoint(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + endpoint_name = "MyEndpoint" + with pytest.raises(ClientError) as e: + sagemaker.create_endpoint( + EndpointName=endpoint_name, EndpointConfigName="NonexistentEndpointConfig" + ) + assert e.value.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) + + model_name = "MyModel" + _create_model(sagemaker, model_name) + + endpoint_config_name = "MyEndpointConfig" + _create_endpoint_config(sagemaker, endpoint_config_name, model_name) + + resp = sagemaker.create_endpoint( + EndpointName=endpoint_name, + EndpointConfigName=endpoint_config_name, + Tags=GENERIC_TAGS_PARAM, + ) + resp["EndpointArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(endpoint_name) + ) + + resp = sagemaker.describe_endpoint(EndpointName=endpoint_name) + resp["EndpointArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(endpoint_name) + ) + resp["EndpointName"].should.equal(endpoint_name) + resp["EndpointConfigName"].should.equal(endpoint_config_name) + resp["EndpointStatus"].should.equal("InService") + assert isinstance(resp["CreationTime"], datetime.datetime) + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + resp["ProductionVariants"][0]["VariantName"].should.equal("MyProductionVariant") + + resp = sagemaker.list_tags(ResourceArn=resp["EndpointArn"]) + assert resp["Tags"] == GENERIC_TAGS_PARAM + + +@mock_sagemaker +def test_delete_endpoint(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + model_name = "MyModel" + _create_model(sagemaker, model_name) + + endpoint_config_name = "MyEndpointConfig" + _create_endpoint_config(sagemaker, endpoint_config_name, model_name) + + endpoint_name = "MyEndpoint" + _create_endpoint(sagemaker, endpoint_name, endpoint_config_name) + + sagemaker.delete_endpoint(EndpointName=endpoint_name) + with pytest.raises(ClientError) as e: + sagemaker.describe_endpoint(EndpointName=endpoint_name) + assert e.value.response["Error"]["Message"].startswith("Could not find endpoint") + + with pytest.raises(ClientError) as e: + sagemaker.delete_endpoint(EndpointName=endpoint_name) + assert e.value.response["Error"]["Message"].startswith("Could not find endpoint") + + +def _create_model(boto_client, model_name): + resp = boto_client.create_model( + ModelName=model_name, + PrimaryContainer={ + "Image": "382416733822.dkr.ecr.us-east-1.amazonaws.com/factorization-machines:1", + "ModelDataUrl": "s3://MyBucket/model.tar.gz", + }, + ExecutionRoleArn=FAKE_ROLE_ARN, + ) + assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 + + +def _create_endpoint_config(boto_client, endpoint_config_name, model_name): + production_variants = [ + { + "VariantName": "MyProductionVariant", + "ModelName": model_name, + "InitialInstanceCount": 1, + "InstanceType": "ml.t2.medium", + }, + ] + resp = boto_client.create_endpoint_config( + EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants + ) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + + +def _create_endpoint(boto_client, endpoint_name, endpoint_config_name): + resp = boto_client.create_endpoint( + EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name + ) + resp["EndpointArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(endpoint_name) + ) diff --git a/tests/test_sagemaker/test_sagemaker_models.py b/tests/test_sagemaker/test_sagemaker_models.py new file mode 100644 index 000000000..91fc3bb5b --- /dev/null +++ b/tests/test_sagemaker/test_sagemaker_models.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import boto3 +from botocore.exceptions import ClientError +import pytest +from moto import mock_sagemaker + +import sure # noqa + +from moto.sagemaker.models import VpcConfig + + +class MySageMakerModel(object): + def __init__(self, name, arn, container=None, vpc_config=None): + self.name = name + self.arn = arn + self.container = container if container else {} + self.vpc_config = ( + vpc_config if vpc_config else {"sg-groups": ["sg-123"], "subnets": ["123"]} + ) + + def save(self): + client = boto3.client("sagemaker", region_name="us-east-1") + vpc_config = VpcConfig( + self.vpc_config.get("sg-groups"), self.vpc_config.get("subnets") + ) + client.create_model( + ModelName=self.name, + ExecutionRoleArn=self.arn, + VpcConfig=vpc_config.response_object, + ) + + +@mock_sagemaker +def test_describe_model(): + client = boto3.client("sagemaker", region_name="us-east-1") + test_model = MySageMakerModel( + name="blah", + arn="arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar", + vpc_config={"sg-groups": ["sg-123"], "subnets": ["123"]}, + ) + test_model.save() + model = client.describe_model(ModelName="blah") + assert model.get("ModelName").should.equal("blah") + + +@mock_sagemaker +def test_create_model(): + client = boto3.client("sagemaker", region_name="us-east-1") + vpc_config = VpcConfig(["sg-foobar"], ["subnet-xxx"]) + exec_role_arn = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar" + name = "blah" + model = client.create_model( + ModelName=name, + ExecutionRoleArn=exec_role_arn, + VpcConfig=vpc_config.response_object, + ) + + model["ModelArn"].should.match(r"^arn:aws:sagemaker:.*:.*:model/{}$".format(name)) + + +@mock_sagemaker +def test_delete_model(): + client = boto3.client("sagemaker", region_name="us-east-1") + name = "blah" + arn = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar" + test_model = MySageMakerModel(name=name, arn=arn) + test_model.save() + + assert len(client.list_models()["Models"]).should.equal(1) + client.delete_model(ModelName=name) + assert len(client.list_models()["Models"]).should.equal(0) + + +@mock_sagemaker +def test_delete_model_not_found(): + with pytest.raises(ClientError) as err: + boto3.client("sagemaker", region_name="us-east-1").delete_model( + ModelName="blah" + ) + assert err.value.response["Error"]["Code"].should.equal("404") + + +@mock_sagemaker +def test_list_models(): + client = boto3.client("sagemaker", region_name="us-east-1") + name = "blah" + arn = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar" + test_model = MySageMakerModel(name=name, arn=arn) + test_model.save() + models = client.list_models() + assert len(models["Models"]).should.equal(1) + assert models["Models"][0]["ModelName"].should.equal(name) + assert models["Models"][0]["ModelArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:model/{}$".format(name) + ) + + +@mock_sagemaker +def test_list_models_multiple(): + client = boto3.client("sagemaker", region_name="us-east-1") + + name_model_1 = "blah" + arn_model_1 = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar" + test_model_1 = MySageMakerModel(name=name_model_1, arn=arn_model_1) + test_model_1.save() + + name_model_2 = "blah2" + arn_model_2 = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar2" + test_model_2 = MySageMakerModel(name=name_model_2, arn=arn_model_2) + test_model_2.save() + models = client.list_models() + assert len(models["Models"]).should.equal(2) + + +@mock_sagemaker +def test_list_models_none(): + client = boto3.client("sagemaker", region_name="us-east-1") + models = client.list_models() + assert len(models["Models"]).should.equal(0) diff --git a/tests/test_sagemaker/test_sagemaker_notebooks.py b/tests/test_sagemaker/test_sagemaker_notebooks.py new file mode 100644 index 000000000..3a3137dec --- /dev/null +++ b/tests/test_sagemaker/test_sagemaker_notebooks.py @@ -0,0 +1,277 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import datetime +import boto3 +from botocore.exceptions import ClientError, ParamValidationError +import sure # noqa + +from moto import mock_sagemaker +from moto.sts.models import ACCOUNT_ID +import pytest + +TEST_REGION_NAME = "us-east-1" +FAKE_SUBNET_ID = "subnet-012345678" +FAKE_SECURITY_GROUP_IDS = ["sg-0123456789abcdef0", "sg-0123456789abcdef1"] +FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +FAKE_KMS_KEY_ID = "62d4509a-9f96-446c-a9ba-6b1c353c8c58" +GENERIC_TAGS_PARAM = [ + {"Key": "newkey1", "Value": "newval1"}, + {"Key": "newkey2", "Value": "newval2"}, +] +FAKE_LIFECYCLE_CONFIG_NAME = "FakeLifecycleConfigName" +FAKE_DEFAULT_CODE_REPO = "https://github.com/user/repo1" +FAKE_ADDL_CODE_REPOS = [ + "https://github.com/user/repo2", + "https://github.com/user/repo2", +] + + +@mock_sagemaker +def test_create_notebook_instance_minimal_params(): + + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + NAME_PARAM = "MyNotebookInstance" + INSTANCE_TYPE_PARAM = "ml.t2.medium" + + args = { + "NotebookInstanceName": NAME_PARAM, + "InstanceType": INSTANCE_TYPE_PARAM, + "RoleArn": FAKE_ROLE_ARN, + } + resp = sagemaker.create_notebook_instance(**args) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) + assert resp["NotebookInstanceName"] == NAME_PARAM + assert resp["NotebookInstanceStatus"] == "InService" + assert resp["Url"] == "{}.notebook.{}.sagemaker.aws".format( + NAME_PARAM, TEST_REGION_NAME + ) + assert resp["InstanceType"] == INSTANCE_TYPE_PARAM + assert resp["RoleArn"] == FAKE_ROLE_ARN + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + assert isinstance(resp["CreationTime"], datetime.datetime) + assert resp["DirectInternetAccess"] == "Enabled" + assert resp["VolumeSizeInGB"] == 5 + + +# assert resp["RootAccess"] == True # ToDo: Not sure if this defaults... + + +@mock_sagemaker +def test_create_notebook_instance_params(): + + sagemaker = boto3.client("sagemaker", region_name="us-east-1") + + NAME_PARAM = "MyNotebookInstance" + INSTANCE_TYPE_PARAM = "ml.t2.medium" + DIRECT_INTERNET_ACCESS_PARAM = "Enabled" + VOLUME_SIZE_IN_GB_PARAM = 7 + ACCELERATOR_TYPES_PARAM = ["ml.eia1.medium", "ml.eia2.medium"] + ROOT_ACCESS_PARAM = "Disabled" + + args = { + "NotebookInstanceName": NAME_PARAM, + "InstanceType": INSTANCE_TYPE_PARAM, + "SubnetId": FAKE_SUBNET_ID, + "SecurityGroupIds": FAKE_SECURITY_GROUP_IDS, + "RoleArn": FAKE_ROLE_ARN, + "KmsKeyId": FAKE_KMS_KEY_ID, + "Tags": GENERIC_TAGS_PARAM, + "LifecycleConfigName": FAKE_LIFECYCLE_CONFIG_NAME, + "DirectInternetAccess": DIRECT_INTERNET_ACCESS_PARAM, + "VolumeSizeInGB": VOLUME_SIZE_IN_GB_PARAM, + "AcceleratorTypes": ACCELERATOR_TYPES_PARAM, + "DefaultCodeRepository": FAKE_DEFAULT_CODE_REPO, + "AdditionalCodeRepositories": FAKE_ADDL_CODE_REPOS, + "RootAccess": ROOT_ACCESS_PARAM, + } + resp = sagemaker.create_notebook_instance(**args) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) + assert resp["NotebookInstanceName"] == NAME_PARAM + assert resp["NotebookInstanceStatus"] == "InService" + assert resp["Url"] == "{}.notebook.{}.sagemaker.aws".format( + NAME_PARAM, TEST_REGION_NAME + ) + assert resp["InstanceType"] == INSTANCE_TYPE_PARAM + assert resp["RoleArn"] == FAKE_ROLE_ARN + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + assert isinstance(resp["CreationTime"], datetime.datetime) + assert resp["DirectInternetAccess"] == "Enabled" + assert resp["VolumeSizeInGB"] == VOLUME_SIZE_IN_GB_PARAM + # assert resp["RootAccess"] == True # ToDo: Not sure if this defaults... + assert resp["SubnetId"] == FAKE_SUBNET_ID + assert resp["SecurityGroups"] == FAKE_SECURITY_GROUP_IDS + assert resp["KmsKeyId"] == FAKE_KMS_KEY_ID + assert resp["NotebookInstanceLifecycleConfigName"] == FAKE_LIFECYCLE_CONFIG_NAME + assert resp["AcceleratorTypes"] == ACCELERATOR_TYPES_PARAM + assert resp["DefaultCodeRepository"] == FAKE_DEFAULT_CODE_REPO + assert resp["AdditionalCodeRepositories"] == FAKE_ADDL_CODE_REPOS + + resp = sagemaker.list_tags(ResourceArn=resp["NotebookInstanceArn"]) + assert resp["Tags"] == GENERIC_TAGS_PARAM + + +@mock_sagemaker +def test_create_notebook_instance_bad_volume_size(): + + sagemaker = boto3.client("sagemaker", region_name="us-east-1") + + vol_size = 2 + args = { + "NotebookInstanceName": "MyNotebookInstance", + "InstanceType": "ml.t2.medium", + "RoleArn": FAKE_ROLE_ARN, + "VolumeSizeInGB": vol_size, + } + with pytest.raises(ParamValidationError) as ex: + sagemaker.create_notebook_instance(**args) + assert ex.value.args[ + 0 + ] == "Parameter validation failed:\nInvalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf".format( + vol_size + ) + + +@mock_sagemaker +def test_create_notebook_instance_invalid_instance_type(): + + sagemaker = boto3.client("sagemaker", region_name="us-east-1") + + instance_type = "undefined_instance_type" + args = { + "NotebookInstanceName": "MyNotebookInstance", + "InstanceType": instance_type, + "RoleArn": FAKE_ROLE_ARN, + } + with pytest.raises(ClientError) as ex: + sagemaker.create_notebook_instance(**args) + assert ex.value.response["Error"]["Code"] == "ValidationException" + expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( + instance_type + ) + + assert expected_message in ex.value.response["Error"]["Message"] + + +@mock_sagemaker +def test_notebook_instance_lifecycle(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + NAME_PARAM = "MyNotebookInstance" + INSTANCE_TYPE_PARAM = "ml.t2.medium" + + args = { + "NotebookInstanceName": NAME_PARAM, + "InstanceType": INSTANCE_TYPE_PARAM, + "RoleArn": FAKE_ROLE_ARN, + } + resp = sagemaker.create_notebook_instance(**args) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + notebook_instance_arn = resp["NotebookInstanceArn"] + + with pytest.raises(ClientError) as ex: + sagemaker.delete_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert ex.value.response["Error"]["Code"] == "ValidationException" + expected_message = "Status (InService) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format( + notebook_instance_arn + ) + assert expected_message in ex.value.response["Error"]["Message"] + + sagemaker.stop_notebook_instance(NotebookInstanceName=NAME_PARAM) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert resp["NotebookInstanceStatus"] == "Stopped" + + sagemaker.start_notebook_instance(NotebookInstanceName=NAME_PARAM) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert resp["NotebookInstanceStatus"] == "InService" + + sagemaker.stop_notebook_instance(NotebookInstanceName=NAME_PARAM) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert resp["NotebookInstanceStatus"] == "Stopped" + + sagemaker.delete_notebook_instance(NotebookInstanceName=NAME_PARAM) + + with pytest.raises(ClientError) as ex: + sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert ex.value.response["Error"]["Message"] == "RecordNotFound" + + +@mock_sagemaker +def test_describe_nonexistent_model(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + with pytest.raises(ClientError) as e: + sagemaker.describe_model(ModelName="Nonexistent") + assert e.value.response["Error"]["Message"].startswith("Could not find model") + + +@mock_sagemaker +def test_notebook_instance_lifecycle_config(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + name = "MyLifeCycleConfig" + on_create = [{"Content": "Create Script Line 1"}] + on_start = [{"Content": "Start Script Line 1"}] + resp = sagemaker.create_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, OnCreate=on_create, OnStart=on_start + ) + assert resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceLifecycleConfigArn"].endswith(name) + + with pytest.raises(ClientError) as e: + resp = sagemaker.create_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + OnCreate=on_create, + OnStart=on_start, + ) + assert e.value.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config already exists.)" + ) + + resp = sagemaker.describe_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + ) + assert resp["NotebookInstanceLifecycleConfigName"] == name + assert resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceLifecycleConfigArn"].endswith(name) + assert resp["OnStart"] == on_start + assert resp["OnCreate"] == on_create + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + assert isinstance(resp["CreationTime"], datetime.datetime) + + sagemaker.delete_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + ) + + with pytest.raises(ClientError) as e: + sagemaker.describe_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + ) + assert e.value.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config does not exist.)" + ) + + with pytest.raises(ClientError) as e: + sagemaker.delete_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + ) + assert e.value.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config does not exist.)" + ) diff --git a/tests/test_sagemaker/test_sagemaker_training.py b/tests/test_sagemaker/test_sagemaker_training.py new file mode 100644 index 000000000..c7b631ae3 --- /dev/null +++ b/tests/test_sagemaker/test_sagemaker_training.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import boto3 +import datetime +import sure # noqa + +from moto import mock_sagemaker +from moto.sts.models import ACCOUNT_ID + +FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +TEST_REGION_NAME = "us-east-1" + + +@mock_sagemaker +def test_create_training_job(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + training_job_name = "MyTrainingJob" + container = "382416733822.dkr.ecr.us-east-1.amazonaws.com/linear-learner:1" + bucket = "my-bucket" + prefix = "sagemaker/DEMO-breast-cancer-prediction/" + + params = { + "RoleArn": FAKE_ROLE_ARN, + "TrainingJobName": training_job_name, + "AlgorithmSpecification": { + "TrainingImage": container, + "TrainingInputMode": "File", + }, + "ResourceConfig": { + "InstanceCount": 1, + "InstanceType": "ml.c4.2xlarge", + "VolumeSizeInGB": 10, + }, + "InputDataConfig": [ + { + "ChannelName": "train", + "DataSource": { + "S3DataSource": { + "S3DataType": "S3Prefix", + "S3Uri": "s3://{}/{}/train/".format(bucket, prefix), + "S3DataDistributionType": "ShardedByS3Key", + } + }, + "CompressionType": "None", + "RecordWrapperType": "None", + }, + { + "ChannelName": "validation", + "DataSource": { + "S3DataSource": { + "S3DataType": "S3Prefix", + "S3Uri": "s3://{}/{}/validation/".format(bucket, prefix), + "S3DataDistributionType": "FullyReplicated", + } + }, + "CompressionType": "None", + "RecordWrapperType": "None", + }, + ], + "OutputDataConfig": {"S3OutputPath": "s3://{}/{}/".format(bucket, prefix)}, + "HyperParameters": { + "feature_dim": "30", + "mini_batch_size": "100", + "predictor_type": "regressor", + "epochs": "10", + "num_models": "32", + "loss": "absolute_loss", + }, + "StoppingCondition": {"MaxRuntimeInSeconds": 60 * 60}, + } + + resp = sagemaker.create_training_job(**params) + resp["TrainingJobArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(training_job_name) + ) + + resp = sagemaker.describe_training_job(TrainingJobName=training_job_name) + resp["TrainingJobName"].should.equal(training_job_name) + resp["TrainingJobArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(training_job_name) + ) + assert resp["ModelArtifacts"]["S3ModelArtifacts"].startswith( + params["OutputDataConfig"]["S3OutputPath"] + ) + assert training_job_name in (resp["ModelArtifacts"]["S3ModelArtifacts"]) + assert resp["ModelArtifacts"]["S3ModelArtifacts"].endswith("output/model.tar.gz") + assert resp["TrainingJobStatus"] == "Completed" + assert resp["SecondaryStatus"] == "Completed" + assert resp["HyperParameters"] == params["HyperParameters"] + assert ( + resp["AlgorithmSpecification"]["TrainingImage"] + == params["AlgorithmSpecification"]["TrainingImage"] + ) + assert ( + resp["AlgorithmSpecification"]["TrainingInputMode"] + == params["AlgorithmSpecification"]["TrainingInputMode"] + ) + assert "MetricDefinitions" in resp["AlgorithmSpecification"] + assert "Name" in resp["AlgorithmSpecification"]["MetricDefinitions"][0] + assert "Regex" in resp["AlgorithmSpecification"]["MetricDefinitions"][0] + assert resp["RoleArn"] == FAKE_ROLE_ARN + assert resp["InputDataConfig"] == params["InputDataConfig"] + assert resp["OutputDataConfig"] == params["OutputDataConfig"] + assert resp["ResourceConfig"] == params["ResourceConfig"] + assert resp["StoppingCondition"] == params["StoppingCondition"] + assert isinstance(resp["CreationTime"], datetime.datetime) + assert isinstance(resp["TrainingStartTime"], datetime.datetime) + assert isinstance(resp["TrainingEndTime"], datetime.datetime) + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + assert "SecondaryStatusTransitions" in resp + assert "Status" in resp["SecondaryStatusTransitions"][0] + assert "StartTime" in resp["SecondaryStatusTransitions"][0] + assert "EndTime" in resp["SecondaryStatusTransitions"][0] + assert "StatusMessage" in resp["SecondaryStatusTransitions"][0] + assert "FinalMetricDataList" in resp + assert "MetricName" in resp["FinalMetricDataList"][0] + assert "Value" in resp["FinalMetricDataList"][0] + assert "Timestamp" in resp["FinalMetricDataList"][0] + + pass diff --git a/tests/test_secretsmanager/__init__.py b/tests/test_secretsmanager/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_secretsmanager/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_secretsmanager/test_list_secrets.py b/tests/test_secretsmanager/test_list_secrets.py new file mode 100644 index 000000000..8d8ddbd64 --- /dev/null +++ b/tests/test_secretsmanager/test_list_secrets.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import boto3 + +from moto import mock_secretsmanager +from botocore.exceptions import ClientError +import sure # noqa +import pytest + + +def boto_client(): + return boto3.client("secretsmanager", region_name="us-west-2") + + +@mock_secretsmanager +def test_empty(): + conn = boto_client() + + secrets = conn.list_secrets() + + assert secrets["SecretList"] == [] + + +@mock_secretsmanager +def test_list_secrets(): + conn = boto_client() + + conn.create_secret(Name="test-secret", SecretString="foosecret") + + conn.create_secret( + Name="test-secret-2", + SecretString="barsecret", + Tags=[{"Key": "a", "Value": "1"}], + ) + + secrets = conn.list_secrets() + + assert secrets["SecretList"][0]["ARN"] is not None + assert secrets["SecretList"][0]["Name"] == "test-secret" + assert secrets["SecretList"][0]["SecretVersionsToStages"] is not None + assert secrets["SecretList"][1]["ARN"] is not None + assert secrets["SecretList"][1]["Name"] == "test-secret-2" + assert secrets["SecretList"][1]["Tags"] == [{"Key": "a", "Value": "1"}] + assert secrets["SecretList"][1]["SecretVersionsToStages"] is not None + + +@mock_secretsmanager +def test_with_name_filter(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret") + conn.create_secret(Name="bar", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "name", "Values": ["foo"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert secret_names == ["foo"] + + +@mock_secretsmanager +def test_with_tag_key_filter(): + conn = boto_client() + + conn.create_secret( + Name="foo", SecretString="secret", Tags=[{"Key": "baz", "Value": "1"}] + ) + conn.create_secret(Name="bar", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "tag-key", "Values": ["baz"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert secret_names == ["foo"] + + +@mock_secretsmanager +def test_with_tag_value_filter(): + conn = boto_client() + + conn.create_secret( + Name="foo", SecretString="secret", Tags=[{"Key": "1", "Value": "baz"}] + ) + conn.create_secret(Name="bar", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "tag-value", "Values": ["baz"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert secret_names == ["foo"] + + +@mock_secretsmanager +def test_with_description_filter(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret", Description="baz qux") + conn.create_secret(Name="bar", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "description", "Values": ["baz"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert secret_names == ["foo"] + + +@mock_secretsmanager +def test_with_all_filter(): + # The 'all' filter will match a secret that contains ANY field with the criteria. In other words an implicit OR. + + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret") + conn.create_secret(Name="bar", SecretString="secret", Description="foo") + conn.create_secret( + Name="baz", SecretString="secret", Tags=[{"Key": "foo", "Value": "1"}] + ) + conn.create_secret( + Name="qux", SecretString="secret", Tags=[{"Key": "1", "Value": "foo"}] + ) + conn.create_secret( + Name="multi", SecretString="secret", Tags=[{"Key": "foo", "Value": "foo"}] + ) + conn.create_secret(Name="none", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "all", "Values": ["foo"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert sorted(secret_names) == ["bar", "baz", "foo", "multi", "qux"] + + +@mock_secretsmanager +def test_with_no_filter_key(): + conn = boto_client() + + with pytest.raises(ClientError) as ire: + conn.list_secrets(Filters=[{"Values": ["foo"]}]) + + ire.value.response["Error"]["Code"].should.equal("InvalidParameterException") + ire.value.response["Error"]["Message"].should.equal("Invalid filter key") + + +@mock_secretsmanager +def test_with_no_filter_values(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret", Description="hello") + + with pytest.raises(ClientError) as ire: + conn.list_secrets(Filters=[{"Key": "description"}]) + + ire.value.response["Error"]["Code"].should.equal("InvalidParameterException") + ire.value.response["Error"]["Message"].should.equal( + "Invalid filter values for key: description" + ) + + +@mock_secretsmanager +def test_with_invalid_filter_key(): + conn = boto_client() + + with pytest.raises(ClientError) as ire: + conn.list_secrets(Filters=[{"Key": "invalid", "Values": ["foo"]}]) + + ire.value.response["Error"]["Code"].should.equal("ValidationException") + ire.value.response["Error"]["Message"].should.equal( + "1 validation error detected: Value 'invalid' at 'filters.1.member.key' failed to satisfy constraint: Member " + "must satisfy enum value set: [all, name, tag-key, description, tag-value]" + ) + + +@mock_secretsmanager +def test_with_duplicate_filter_keys(): + # Multiple filters with the same key combine with an implicit AND operator + + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret", Description="one two") + conn.create_secret(Name="bar", SecretString="secret", Description="one") + conn.create_secret(Name="baz", SecretString="secret", Description="two") + conn.create_secret(Name="qux", SecretString="secret", Description="unrelated") + + secrets = conn.list_secrets( + Filters=[ + {"Key": "description", "Values": ["one"]}, + {"Key": "description", "Values": ["two"]}, + ] + ) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert secret_names == ["foo"] + + +@mock_secretsmanager +def test_with_multiple_filters(): + # Multiple filters combine with an implicit AND operator + + conn = boto_client() + + conn.create_secret( + Name="foo", SecretString="secret", Tags=[{"Key": "right", "Value": "right"}] + ) + conn.create_secret( + Name="bar", SecretString="secret", Tags=[{"Key": "right", "Value": "wrong"}] + ) + conn.create_secret( + Name="baz", SecretString="secret", Tags=[{"Key": "wrong", "Value": "right"}] + ) + conn.create_secret( + Name="qux", SecretString="secret", Tags=[{"Key": "wrong", "Value": "wrong"}] + ) + + secrets = conn.list_secrets( + Filters=[ + {"Key": "tag-key", "Values": ["right"]}, + {"Key": "tag-value", "Values": ["right"]}, + ] + ) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert secret_names == ["foo"] + + +@mock_secretsmanager +def test_with_filter_with_multiple_values(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret") + conn.create_secret(Name="bar", SecretString="secret") + conn.create_secret(Name="baz", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "name", "Values": ["foo", "bar"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert secret_names == ["foo", "bar"] + + +@mock_secretsmanager +def test_with_filter_with_value_with_multiple_words(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret", Description="one two") + conn.create_secret(Name="bar", SecretString="secret", Description="one and two") + conn.create_secret(Name="baz", SecretString="secret", Description="one") + conn.create_secret(Name="qux", SecretString="secret", Description="two") + conn.create_secret(Name="none", SecretString="secret", Description="unrelated") + + secrets = conn.list_secrets(Filters=[{"Key": "description", "Values": ["one two"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert secret_names == ["foo", "bar"] diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 3b8c74e81..539b878f9 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -9,7 +9,7 @@ import string import pytz from datetime import datetime import sure # noqa -from nose.tools import assert_raises, assert_equal +import pytest from six import b DEFAULT_SECRET_NAME = "test-secret" @@ -53,12 +53,12 @@ def test_get_secret_value_binary(): def test_get_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="i-dont-exist") - assert_equal( - "Secrets Manager can't find the specified secret.", - cm.exception.response["Error"]["Message"], + assert ( + "Secrets Manager can't find the specified secret." + == cm.value.response["Error"]["Message"] ) @@ -69,12 +69,12 @@ def test_get_secret_that_does_not_match(): Name="java-util-test-password", SecretString="foosecret" ) - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="i-dont-match") - assert_equal( - "Secrets Manager can't find the specified secret.", - cm.exception.response["Error"]["Message"], + assert ( + "Secrets Manager can't find the specified secret." + == cm.value.response["Error"]["Message"] ) @@ -86,7 +86,7 @@ def test_get_secret_value_that_is_marked_deleted(): conn.delete_secret(SecretId="test-secret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="test-secret") @@ -96,15 +96,32 @@ def test_get_secret_that_has_no_value(): create_secret = conn.create_secret(Name="java-util-test-password") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="java-util-test-password") - assert_equal( - "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT", - cm.exception.response["Error"]["Message"], + assert ( + "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" + == cm.value.response["Error"]["Message"] ) +@mock_secretsmanager +def test_get_secret_version_that_does_not_exist(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + result = conn.create_secret(Name="java-util-test-password") + secret_arn = result["ARN"] + missing_version_id = "00000000-0000-0000-0000-000000000000" + + with pytest.raises(ClientError) as cm: + conn.get_secret_value(SecretId=secret_arn, VersionId=missing_version_id) + + assert ( + "An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets " + "Manager can't find the specified secret value for VersionId: 00000000-0000-0000-0000-000000000000" + ) == cm.value.response["Error"]["Message"] + + @mock_secretsmanager def test_create_secret(): conn = boto3.client("secretsmanager", region_name="us-east-1") @@ -137,6 +154,45 @@ def test_create_secret_with_tags(): ] +@mock_secretsmanager +def test_create_secret_with_description(): + conn = boto3.client("secretsmanager", region_name="us-east-1") + secret_name = "test-secret-with-tags" + + result = conn.create_secret( + Name=secret_name, SecretString="foosecret", Description="desc" + ) + assert result["ARN"] + assert result["Name"] == secret_name + secret_value = conn.get_secret_value(SecretId=secret_name) + assert secret_value["SecretString"] == "foosecret" + secret_details = conn.describe_secret(SecretId=secret_name) + assert secret_details["Description"] == "desc" + + +@mock_secretsmanager +def test_create_secret_with_tags_and_description(): + conn = boto3.client("secretsmanager", region_name="us-east-1") + secret_name = "test-secret-with-tags" + + result = conn.create_secret( + Name=secret_name, + SecretString="foosecret", + Description="desc", + Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}], + ) + assert result["ARN"] + assert result["Name"] == secret_name + secret_value = conn.get_secret_value(SecretId=secret_name) + assert secret_value["SecretString"] == "foosecret" + secret_details = conn.describe_secret(SecretId=secret_name) + assert secret_details["Tags"] == [ + {"Key": "Foo", "Value": "Bar"}, + {"Key": "Mykey", "Value": "Myvalue"}, + ] + assert secret_details["Description"] == "desc" + + @mock_secretsmanager def test_delete_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") @@ -168,7 +224,25 @@ def test_delete_secret_force(): assert result["DeletionDate"] > datetime.fromtimestamp(1, pytz.utc) assert result["Name"] == "test-secret" - with assert_raises(ClientError): + with pytest.raises(ClientError): + result = conn.get_secret_value(SecretId="test-secret") + + +@mock_secretsmanager +def test_delete_secret_force_with_arn(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + create_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + + result = conn.delete_secret( + SecretId=create_secret["ARN"], ForceDeleteWithoutRecovery=True + ) + + assert result["ARN"] + assert result["DeletionDate"] > datetime.fromtimestamp(1, pytz.utc) + assert result["Name"] == "test-secret" + + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="test-secret") @@ -176,7 +250,7 @@ def test_delete_secret_force(): def test_delete_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret( SecretId="i-dont-exist", ForceDeleteWithoutRecovery=True ) @@ -188,7 +262,7 @@ def test_delete_secret_fails_with_both_force_delete_flag_and_recovery_window_fla conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret( SecretId="test-secret", RecoveryWindowInDays=1, @@ -202,7 +276,7 @@ def test_delete_secret_recovery_window_too_short(): conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret(SecretId="test-secret", RecoveryWindowInDays=6) @@ -212,7 +286,7 @@ def test_delete_secret_recovery_window_too_long(): conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret(SecretId="test-secret", RecoveryWindowInDays=31) @@ -224,7 +298,7 @@ def test_delete_secret_that_is_marked_deleted(): deleted_secret = conn.delete_secret(SecretId="test-secret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret(SecretId="test-secret") @@ -262,7 +336,7 @@ def test_get_random_exclude_lowercase(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=55, ExcludeLowercase=True) - assert any(c.islower() for c in random_password["RandomPassword"]) == False + assert not any(c.islower() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -270,7 +344,7 @@ def test_get_random_exclude_uppercase(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=55, ExcludeUppercase=True) - assert any(c.isupper() for c in random_password["RandomPassword"]) == False + assert not any(c.isupper() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -280,7 +354,8 @@ def test_get_random_exclude_characters_and_symbols(): random_password = conn.get_random_password( PasswordLength=20, ExcludeCharacters="xyzDje@?!." ) - assert any(c in "xyzDje@?!." for c in random_password["RandomPassword"]) == False + assert not any(c in "xyzDje@?!." for c in random_password["RandomPassword"]) + assert len(random_password["RandomPassword"]) == 20 @mock_secretsmanager @@ -288,7 +363,7 @@ def test_get_random_exclude_numbers(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=100, ExcludeNumbers=True) - assert any(c.isdigit() for c in random_password["RandomPassword"]) == False + assert not any(c.isdigit() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -298,9 +373,7 @@ def test_get_random_exclude_punctuation(): random_password = conn.get_random_password( PasswordLength=100, ExcludePunctuation=True ) - assert ( - any(c in string.punctuation for c in random_password["RandomPassword"]) == False - ) + assert not any(c in string.punctuation for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -308,7 +381,7 @@ def test_get_random_include_space_false(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=300) - assert any(c.isspace() for c in random_password["RandomPassword"]) == False + assert not any(c.isspace() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -316,7 +389,7 @@ def test_get_random_include_space_true(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=4, IncludeSpace=True) - assert any(c.isspace() for c in random_password["RandomPassword"]) == True + assert any(c.isspace() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -326,25 +399,17 @@ def test_get_random_require_each_included_type(): random_password = conn.get_random_password( PasswordLength=4, RequireEachIncludedType=True ) - assert ( - any(c in string.punctuation for c in random_password["RandomPassword"]) == True - ) - assert ( - any(c in string.ascii_lowercase for c in random_password["RandomPassword"]) - == True - ) - assert ( - any(c in string.ascii_uppercase for c in random_password["RandomPassword"]) - == True - ) - assert any(c in string.digits for c in random_password["RandomPassword"]) == True + assert any(c in string.punctuation for c in random_password["RandomPassword"]) + assert any(c in string.ascii_lowercase for c in random_password["RandomPassword"]) + assert any(c in string.ascii_uppercase for c in random_password["RandomPassword"]) + assert any(c in string.digits for c in random_password["RandomPassword"]) @mock_secretsmanager def test_get_random_too_short_password(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): random_password = conn.get_random_password(PasswordLength=3) @@ -352,7 +417,7 @@ def test_get_random_too_short_password(): def test_get_random_too_long_password(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(Exception): + with pytest.raises(Exception): random_password = conn.get_random_password(PasswordLength=5555) @@ -381,15 +446,16 @@ def test_describe_secret_with_arn(): secret_description = conn.describe_secret(SecretId=results["ARN"]) assert secret_description # Returned dict is not empty - assert secret_description["Name"] == ("test-secret") - assert secret_description["ARN"] != results["ARN"] + secret_description["Name"].should.equal("test-secret") + secret_description["ARN"].should.equal(results["ARN"]) + conn.list_secrets()["SecretList"][0]["ARN"].should.equal(results["ARN"]) @mock_secretsmanager def test_describe_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="i-dont-exist") @@ -398,40 +464,10 @@ def test_describe_secret_that_does_not_match(): conn = boto3.client("secretsmanager", region_name="us-west-2") conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="i-dont-match") -@mock_secretsmanager -def test_list_secrets_empty(): - conn = boto3.client("secretsmanager", region_name="us-west-2") - - secrets = conn.list_secrets() - - assert secrets["SecretList"] == [] - - -@mock_secretsmanager -def test_list_secrets(): - conn = boto3.client("secretsmanager", region_name="us-west-2") - - conn.create_secret(Name="test-secret", SecretString="foosecret") - - conn.create_secret( - Name="test-secret-2", - SecretString="barsecret", - Tags=[{"Key": "a", "Value": "1"}], - ) - - secrets = conn.list_secrets() - - assert secrets["SecretList"][0]["ARN"] is not None - assert secrets["SecretList"][0]["Name"] == "test-secret" - assert secrets["SecretList"][1]["ARN"] is not None - assert secrets["SecretList"][1]["Name"] == "test-secret-2" - assert secrets["SecretList"][1]["Tags"] == [{"Key": "a", "Value": "1"}] - - @mock_secretsmanager def test_restore_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") @@ -466,14 +502,16 @@ def test_restore_secret_that_is_not_deleted(): def test_restore_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.restore_secret(SecretId="i-dont-exist") @mock_secretsmanager def test_rotate_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") - conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString="foosecret") + conn.create_secret( + Name=DEFAULT_SECRET_NAME, SecretString="foosecret", Description="foodescription" + ) rotated_secret = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME) @@ -482,6 +520,10 @@ def test_rotate_secret(): assert rotated_secret["Name"] == DEFAULT_SECRET_NAME assert rotated_secret["VersionId"] != "" + describe_secret = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) + + assert describe_secret["Description"] == "foodescription" + @mock_secretsmanager def test_rotate_secret_enable_rotation(): @@ -511,7 +553,7 @@ def test_rotate_secret_that_is_marked_deleted(): conn.delete_secret(SecretId="test-secret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret(SecretId="test-secret") @@ -519,7 +561,7 @@ def test_rotate_secret_that_is_marked_deleted(): def test_rotate_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", "us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret(SecretId="i-dont-exist") @@ -528,7 +570,7 @@ def test_rotate_secret_that_does_not_match(): conn = boto3.client("secretsmanager", region_name="us-west-2") conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret(SecretId="i-dont-match") @@ -548,7 +590,7 @@ def test_rotate_secret_client_request_token_too_long(): client_request_token = ( "ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-" "ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C" ) - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret( SecretId=DEFAULT_SECRET_NAME, ClientRequestToken=client_request_token ) @@ -560,7 +602,7 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString="foosecret") rotation_lambda_arn = "85B7-446A-B7E4" * 147 # == 2058 characters - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret( SecretId=DEFAULT_SECRET_NAME, RotationLambdaARN=rotation_lambda_arn ) @@ -580,15 +622,31 @@ def test_rotate_secret_rotation_period_too_long(): conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString="foosecret") rotation_rules = {"AutomaticallyAfterDays": 1001} - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret( SecretId=DEFAULT_SECRET_NAME, RotationRules=rotation_rules ) +@mock_secretsmanager +def test_put_secret_value_on_non_existing_secret(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + with pytest.raises(ClientError) as cm: + conn.put_secret_value( + SecretId=DEFAULT_SECRET_NAME, + SecretString="foosecret", + VersionStages=["AWSCURRENT"], + ) + + cm.value.response["Error"]["Message"].should.equal( + "Secrets Manager can't find the specified secret." + ) + + @mock_secretsmanager def test_put_secret_value_puts_new_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary=b("foosecret")) put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="foosecret", @@ -607,6 +665,7 @@ def test_put_secret_value_puts_new_secret(): @mock_secretsmanager def test_put_secret_binary_value_puts_new_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary=b("foosecret")) put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretBinary=b("foosecret"), @@ -639,11 +698,11 @@ def test_create_and_put_secret_binary_value_puts_new_secret(): @mock_secretsmanager def test_put_secret_binary_requires_either_string_or_binary(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError) as ire: + with pytest.raises(ClientError) as ire: conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME) - ire.exception.response["Error"]["Code"].should.equal("InvalidRequestException") - ire.exception.response["Error"]["Message"].should.equal( + ire.value.response["Error"]["Code"].should.equal("InvalidRequestException") + ire.value.response["Error"]["Message"].should.equal( "You must provide either SecretString or SecretBinary." ) @@ -651,6 +710,7 @@ def test_put_secret_binary_requires_either_string_or_binary(): @mock_secretsmanager def test_put_secret_value_can_get_first_version_if_put_twice(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary=b("foosecret")) put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="first_secret", @@ -674,6 +734,7 @@ def test_put_secret_value_can_get_first_version_if_put_twice(): @mock_secretsmanager def test_put_secret_value_versions_differ_if_same_secret_put_twice(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary="foosecret") put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="dupe_secret", @@ -690,9 +751,43 @@ def test_put_secret_value_versions_differ_if_same_secret_put_twice(): assert first_version_id != second_version_id +@mock_secretsmanager +def test_put_secret_value_maintains_description_and_tags(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + previous_response = conn.create_secret( + Name=DEFAULT_SECRET_NAME, + SecretString="foosecret", + Description="desc", + Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}], + ) + previous_version_id = previous_response["VersionId"] + + conn = boto3.client("secretsmanager", region_name="us-west-2") + current_response = conn.put_secret_value( + SecretId=DEFAULT_SECRET_NAME, + SecretString="dupe_secret", + VersionStages=["AWSCURRENT"], + ) + current_version_id = current_response["VersionId"] + + secret_details = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) + assert secret_details["Tags"] == [ + {"Key": "Foo", "Value": "Bar"}, + {"Key": "Mykey", "Value": "Myvalue"}, + ] + assert secret_details["Description"] == "desc" + assert secret_details["VersionIdsToStages"] is not None + assert previous_version_id in secret_details["VersionIdsToStages"] + assert current_version_id in secret_details["VersionIdsToStages"] + assert secret_details["VersionIdsToStages"][previous_version_id] == ["AWSPREVIOUS"] + assert secret_details["VersionIdsToStages"][current_version_id] == ["AWSCURRENT"] + + @mock_secretsmanager def test_can_list_secret_version_ids(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary="foosecret") put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="dupe_secret", @@ -711,3 +806,174 @@ def test_can_list_secret_version_ids(): returned_version_ids = [v["VersionId"] for v in versions_list["Versions"]] assert [first_version_id, second_version_id].sort() == returned_version_ids.sort() + + +@mock_secretsmanager +def test_update_secret(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + created_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + + assert created_secret["ARN"] + assert created_secret["Name"] == "test-secret" + assert created_secret["VersionId"] != "" + + secret = conn.get_secret_value(SecretId="test-secret") + assert secret["SecretString"] == "foosecret" + + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert updated_secret["ARN"] + assert updated_secret["Name"] == "test-secret" + assert updated_secret["VersionId"] != "" + + secret = conn.get_secret_value(SecretId="test-secret") + assert secret["SecretString"] == "barsecret" + assert created_secret["VersionId"] != updated_secret["VersionId"] + + +@mock_secretsmanager +def test_update_secret_with_tags_and_description(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + created_secret = conn.create_secret( + Name="test-secret", + SecretString="foosecret", + Description="desc", + Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}], + ) + + assert created_secret["ARN"] + assert created_secret["Name"] == "test-secret" + assert created_secret["VersionId"] != "" + + secret = conn.get_secret_value(SecretId="test-secret") + assert secret["SecretString"] == "foosecret" + + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert updated_secret["ARN"] + assert updated_secret["Name"] == "test-secret" + assert updated_secret["VersionId"] != "" + + secret = conn.get_secret_value(SecretId="test-secret") + assert secret["SecretString"] == "barsecret" + assert created_secret["VersionId"] != updated_secret["VersionId"] + secret_details = conn.describe_secret(SecretId="test-secret") + assert secret_details["Tags"] == [ + {"Key": "Foo", "Value": "Bar"}, + {"Key": "Mykey", "Value": "Myvalue"}, + ] + assert secret_details["Description"] == "desc" + + +@mock_secretsmanager +def test_update_secret_which_does_not_exit(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + with pytest.raises(ClientError) as cm: + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert ( + "Secrets Manager can't find the specified secret." + == cm.value.response["Error"]["Message"] + ) + + +@mock_secretsmanager +def test_update_secret_marked_as_deleted(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + created_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + deleted_secret = conn.delete_secret(SecretId="test-secret") + + with pytest.raises(ClientError) as cm: + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert ( + "because it was marked for deletion." in cm.value.response["Error"]["Message"] + ) + + +@mock_secretsmanager +def test_update_secret_marked_as_deleted_after_restoring(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + created_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + deleted_secret = conn.delete_secret(SecretId="test-secret") + restored_secret = conn.restore_secret(SecretId="test-secret") + + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert updated_secret["ARN"] + assert updated_secret["Name"] == "test-secret" + assert updated_secret["VersionId"] != "" + + +@mock_secretsmanager +def test_tag_resource(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name="test-secret", SecretString="foosecret") + conn.tag_resource( + SecretId="test-secret", Tags=[{"Key": "FirstTag", "Value": "SomeValue"},], + ) + + conn.tag_resource( + SecretId="test-secret", Tags=[{"Key": "SecondTag", "Value": "AnotherValue"},], + ) + + secrets = conn.list_secrets() + assert secrets["SecretList"][0].get("Tags") == [ + {"Key": "FirstTag", "Value": "SomeValue"}, + {"Key": "SecondTag", "Value": "AnotherValue"}, + ] + + with pytest.raises(ClientError) as cm: + conn.tag_resource( + SecretId="dummy-test-secret", + Tags=[{"Key": "FirstTag", "Value": "SomeValue"},], + ) + + assert ( + "Secrets Manager can't find the specified secret." + == cm.value.response["Error"]["Message"] + ) + + +@mock_secretsmanager +def test_secret_versions_to_stages_attribute_discrepancy(): + client = boto3.client("secretsmanager", region_name="us-west-2") + + resp = client.create_secret(Name=DEFAULT_SECRET_NAME, SecretString="foosecret") + previous_version_id = resp["VersionId"] + + resp = client.put_secret_value( + SecretId=DEFAULT_SECRET_NAME, + SecretString="dupe_secret", + VersionStages=["AWSCURRENT"], + ) + current_version_id = resp["VersionId"] + + secret = client.describe_secret(SecretId=DEFAULT_SECRET_NAME) + describe_vtos = secret["VersionIdsToStages"] + assert describe_vtos[current_version_id] == ["AWSCURRENT"] + assert describe_vtos[previous_version_id] == ["AWSPREVIOUS"] + + secret = client.list_secrets( + Filters=[{"Key": "name", "Values": [DEFAULT_SECRET_NAME]}] + ).get("SecretList")[0] + list_vtos = secret["SecretVersionsToStages"] + assert list_vtos[current_version_id] == ["AWSCURRENT"] + assert list_vtos[previous_version_id] == ["AWSPREVIOUS"] + + assert describe_vtos == list_vtos diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index 81cb641bd..da41eb5fb 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -408,7 +408,11 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): def test_put_secret_value_puts_new_secret(): backend = server.create_backend_app("secretsmanager") test_client = backend.test_client() - + test_client.post( + "/", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, + headers={"X-Amz-Target": "secretsmanager.CreateSecret"}, + ) test_client.post( "/", data={ @@ -458,6 +462,12 @@ def test_put_secret_value_can_get_first_version_if_put_twice(): first_secret_string = "first_secret" second_secret_string = "second_secret" + test_client.post( + "/", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, + headers={"X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + put_first_secret_value_json = test_client.post( "/", data={ @@ -507,6 +517,11 @@ def test_put_secret_value_versions_differ_if_same_secret_put_twice(): backend = server.create_backend_app("secretsmanager") test_client = backend.test_client() + test_client.post( + "/", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, + headers={"X-Amz-Target": "secretsmanager.CreateSecret"}, + ) put_first_secret_value_json = test_client.post( "/", data={ @@ -543,6 +558,11 @@ def test_can_list_secret_version_ids(): backend = server.create_backend_app("secretsmanager") test_client = backend.test_client() + test_client.post( + "/", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, + headers={"X-Amz-Target": "secretsmanager.CreateSecret"}, + ) put_first_secret_value_json = test_client.post( "/", data={ diff --git a/tests/test_ses/__init__.py b/tests/test_ses/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_ses/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 851327b9d..ce0062974 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -127,3 +127,53 @@ def test_send_raw_email(): send_quota["GetSendQuotaResponse"]["GetSendQuotaResult"]["SentLast24Hours"] ) sent_count.should.equal(1) + + +@mock_ses_deprecated +def test_get_send_statistics(): + conn = boto.connect_ses("the_key", "the_secret") + + conn.send_email.when.called_with( + "test@example.com", + "test subject", + "test body", + "test_to@example.com", + format="html", + ).should.throw(BotoServerError) + + # tests to verify rejects in get_send_statistics + result = conn.get_send_statistics() + + reject_count = int( + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["Rejects"] + ) + delivery_count = int( + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["DeliveryAttempts"] + ) + reject_count.should.equal(1) + delivery_count.should.equal(0) + + conn.verify_email_identity("test@example.com") + conn.send_email( + "test@example.com", "test subject", "test body", "test_to@example.com" + ) + + # tests to delivery attempts in get_send_statistics + result = conn.get_send_statistics() + + reject_count = int( + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["Rejects"] + ) + delivery_count = int( + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["DeliveryAttempts"] + ) + reject_count.should.equal(1) + delivery_count.should.equal(1) diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index ee7c92aa1..e3c2b6d3d 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -4,6 +4,8 @@ import boto3 from botocore.exceptions import ClientError from six.moves.email_mime_multipart import MIMEMultipart from six.moves.email_mime_text import MIMEText +import pytest + import sure # noqa @@ -82,6 +84,35 @@ def test_send_email(): sent_count.should.equal(3) +@mock_ses +def test_send_email_when_verify_source(): + conn = boto3.client("ses", region_name="us-east-1") + + kwargs = dict( + Destination={"ToAddresses": ["test_to@example.com"],}, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Text": {"Data": "test body"}}, + }, + ) + + conn.send_email.when.called_with( + Source="verify_email_address@example.com", **kwargs + ).should.throw(ClientError) + conn.verify_email_address(EmailAddress="verify_email_address@example.com") + conn.send_email(Source="verify_email_address@example.com", **kwargs) + + conn.send_email.when.called_with( + Source="verify_email_identity@example.com", **kwargs + ).should.throw(ClientError) + conn.verify_email_identity(EmailAddress="verify_email_identity@example.com") + conn.send_email(Source="verify_email_identity@example.com", **kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota["SentLast24Hours"]) + sent_count.should.equal(2) + + @mock_ses def test_send_templated_email(): conn = boto3.client("ses", region_name="us-east-1") @@ -139,19 +170,7 @@ def test_send_html_email(): def test_send_raw_email(): conn = boto3.client("ses", region_name="us-east-1") - message = MIMEMultipart() - message["Subject"] = "Test" - message["From"] = "test@example.com" - message["To"] = "to@example.com, foo@example.com" - - # Message body - part = MIMEText("test file attached") - message.attach(part) - - # Attachment - part = MIMEText("contents of test file here") - part.add_header("Content-Disposition", "attachment; filename=test.txt") - message.attach(part) + message = get_raw_email() kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()}) @@ -165,6 +184,39 @@ def test_send_raw_email(): sent_count.should.equal(2) +@mock_ses +def test_send_raw_email_validate_domain(): + conn = boto3.client("ses", region_name="us-east-1") + + message = get_raw_email() + + kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()}) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_domain_identity(Domain="example.com") + conn.send_raw_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota["SentLast24Hours"]) + sent_count.should.equal(2) + + +def get_raw_email(): + message = MIMEMultipart() + message["Subject"] = "Test" + message["From"] = "test@example.com" + message["To"] = "to@example.com, foo@example.com" + # Message body + part = MIMEText("test file attached") + message.attach(part) + # Attachment + part = MIMEText("contents of test file here") + part.add_header("Content-Disposition", "attachment; filename=test.txt") + message.attach(part) + return message + + @mock_ses def test_send_raw_email_without_source(): conn = boto3.client("ses", region_name="us-east-1") @@ -214,3 +266,221 @@ def test_send_raw_email_without_source_or_from(): kwargs = dict(RawMessage={"Data": message.as_string()}) conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + +@mock_ses +def test_send_email_notification_with_encoded_sender(): + sender = "Foo " + conn = boto3.client("ses", region_name="us-east-1") + conn.verify_email_identity(EmailAddress=sender) + response = conn.send_email( + Source=sender, + Destination={"ToAddresses": ["your.friend@hotmail.com"]}, + Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}},}, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_ses +def test_create_configuration_set(): + conn = boto3.client("ses", region_name="us-east-1") + conn.create_configuration_set(ConfigurationSet=dict({"Name": "test"})) + + conn.create_configuration_set_event_destination( + ConfigurationSetName="test", + EventDestination={ + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) + + with pytest.raises(ClientError) as ex: + conn.create_configuration_set_event_destination( + ConfigurationSetName="failtest", + EventDestination={ + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) + + ex.value.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") + + with pytest.raises(ClientError) as ex: + conn.create_configuration_set_event_destination( + ConfigurationSetName="test", + EventDestination={ + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) + + ex.value.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") + + +@mock_ses +def test_create_receipt_rule_set(): + conn = boto3.client("ses", region_name="us-east-1") + result = conn.create_receipt_rule_set(RuleSetName="testRuleSet") + + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + with pytest.raises(ClientError) as ex: + conn.create_receipt_rule_set(RuleSetName="testRuleSet") + + ex.value.response["Error"]["Code"].should.equal("RuleSetNameAlreadyExists") + + +@mock_ses +def test_create_receipt_rule(): + conn = boto3.client("ses", region_name="us-east-1") + rule_set_name = "testRuleSet" + conn.create_receipt_rule_set(RuleSetName=rule_set_name) + + result = conn.create_receipt_rule( + RuleSetName=rule_set_name, + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + with pytest.raises(ClientError) as ex: + conn.create_receipt_rule( + RuleSetName=rule_set_name, + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + ex.value.response["Error"]["Code"].should.equal("RuleAlreadyExists") + + with pytest.raises(ClientError) as ex: + conn.create_receipt_rule( + RuleSetName="InvalidRuleSetaName", + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + ex.value.response["Error"]["Code"].should.equal("RuleSetDoesNotExist") + + +@mock_ses +def test_create_ses_template(): + conn = boto3.client("ses", region_name="us-east-1") + + conn.create_template( + Template={ + "TemplateName": "MyTemplate", + "SubjectPart": "Greetings, {{name}}!", + "TextPart": "Dear {{name}}," + "\r\nYour favorite animal is {{favoriteanimal}}.", + "HtmlPart": "

Hello {{name}}," + "

Your favorite animal is {{favoriteanimal}}.

", + } + ) + with pytest.raises(ClientError) as ex: + conn.create_template( + Template={ + "TemplateName": "MyTemplate", + "SubjectPart": "Greetings, {{name}}!", + "TextPart": "Dear {{name}}," + "\r\nYour favorite animal is {{favoriteanimal}}.", + "HtmlPart": "

Hello {{name}}," + "

Your favorite animal is {{favoriteanimal}}.

", + } + ) + + ex.value.response["Error"]["Code"].should.equal("TemplateNameAlreadyExists") + + # get a template which is already added + result = conn.get_template(TemplateName="MyTemplate") + result["Template"]["TemplateName"].should.equal("MyTemplate") + result["Template"]["SubjectPart"].should.equal("Greetings, {{name}}!") + result["Template"]["HtmlPart"].should.equal( + "

Hello {{name}}," "

Your favorite animal is {{favoriteanimal}}.

" + ) + # get a template which is not present + with pytest.raises(ClientError) as ex: + conn.get_template(TemplateName="MyFakeTemplate") + + ex.value.response["Error"]["Code"].should.equal("TemplateDoesNotExist") + + result = conn.list_templates() + result["TemplatesMetadata"][0]["Name"].should.equal("MyTemplate") diff --git a/tests/test_ses/test_ses_sns_boto3.py b/tests/test_ses/test_ses_sns_boto3.py index 43d4000bf..2a165080e 100644 --- a/tests/test_ses/test_ses_sns_boto3.py +++ b/tests/test_ses/test_ses_sns_boto3.py @@ -7,7 +7,6 @@ from six.moves.email_mime_multipart import MIMEMultipart from six.moves.email_mime_text import MIMEText import sure # noqa -from nose import tools from moto import mock_ses, mock_sns, mock_sqs from moto.ses.models import SESFeedback from moto.core import ACCOUNT_ID diff --git a/tests/test_sns/__init__.py b/tests/test_sns/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_sns/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index 30fa80f15..cc7dbb8d6 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -54,7 +54,7 @@ def test_publish_to_sqs(): "us-east-1", ) acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", message.get_body(), ) @@ -98,7 +98,7 @@ def test_publish_to_sqs_in_different_region(): ) acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", message.get_body(), ) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 51e0a9f57..797ccdaba 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -10,9 +10,10 @@ import sure # noqa import responses from botocore.exceptions import ClientError -from nose.tools import assert_raises -from moto import mock_sns, mock_sqs +import pytest +from moto import mock_sns, mock_sqs, settings from moto.core import ACCOUNT_ID +from moto.sns import sns_backend MESSAGE_FROM_SQS_TEMPLATE = ( '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:' @@ -48,7 +49,7 @@ def test_publish_to_sqs(): messages = queue.receive_messages(MaxNumberOfMessages=1) expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, "us-east-1") acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", messages[0].body, ) @@ -148,36 +149,41 @@ def test_publish_to_sqs_msg_attr_byte_value(): conn.create_topic(Name="some-topic") response = conn.list_topics() topic_arn = response["Topics"][0]["TopicArn"] - - sqs_conn = boto3.resource("sqs", region_name="us-east-1") - queue = sqs_conn.create_queue(QueueName="test-queue") - + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="test-queue") + conn.subscribe( + TopicArn=topic_arn, Protocol="sqs", Endpoint=queue.attributes["QueueArn"], + ) + queue_raw = sqs.create_queue(QueueName="test-queue-raw") conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), + Endpoint=queue_raw.attributes["QueueArn"], + Attributes={"RawMessageDelivery": "true"}, ) - message = "my message" + conn.publish( TopicArn=topic_arn, - Message=message, + Message="my message", MessageAttributes={ "store": {"DataType": "Binary", "BinaryValue": b"\x02\x03\x04"} }, ) - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages] - message_attributes.should.equal( - [ - { - "store": { - "Type": "Binary", - "Value": base64.b64encode(b"\x02\x03\x04").decode(), - } + + message = json.loads(queue.receive_messages()[0].body) + message["Message"].should.equal("my message") + message["MessageAttributes"].should.equal( + { + "store": { + "Type": "Binary", + "Value": base64.b64encode(b"\x02\x03\x04").decode(), } - ] + } ) + message = queue_raw.receive_messages()[0] + message.body.should.equal("my message") + @mock_sqs @mock_sns @@ -187,6 +193,12 @@ def test_publish_to_sqs_msg_attr_number_type(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") topic.subscribe(Protocol="sqs", Endpoint=queue.attributes["QueueArn"]) + queue_raw = sqs.create_queue(QueueName="test-queue-raw") + topic.subscribe( + Protocol="sqs", + Endpoint=queue_raw.attributes["QueueArn"], + Attributes={"RawMessageDelivery": "true"}, + ) topic.publish( Message="test message", @@ -199,40 +211,38 @@ def test_publish_to_sqs_msg_attr_number_type(): {"retries": {"Type": "Number", "Value": 0}} ) + message = queue_raw.receive_messages()[0] + message.body.should.equal("test message") + @mock_sns def test_publish_sms(): client = boto3.client("sns", region_name="us-east-1") - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp["TopicArn"] - - client.subscribe(TopicArn=arn, Protocol="sms", Endpoint="+15551234567") result = client.publish(PhoneNumber="+15551234567", Message="my message") + result.should.contain("MessageId") + if not settings.TEST_SERVER_MODE: + sns_backend.sms_messages.should.have.key(result["MessageId"]).being.equal( + ("+15551234567", "my message") + ) @mock_sns def test_publish_bad_sms(): client = boto3.client("sns", region_name="us-east-1") - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp["TopicArn"] - client.subscribe(TopicArn=arn, Protocol="sms", Endpoint="+15551234567") - - try: - # Test invalid number + # Test invalid number + with pytest.raises(ClientError) as cm: client.publish(PhoneNumber="NAA+15551234567", Message="my message") - except ClientError as err: - err.response["Error"]["Code"].should.equal("InvalidParameter") + cm.value.response["Error"]["Code"].should.equal("InvalidParameter") + cm.value.response["Error"]["Message"].should.contain("not meet the E164") - try: - # Test not found number - client.publish(PhoneNumber="+44001234567", Message="my message") - except ClientError as err: - err.response["Error"]["Code"].should.equal("ParameterValueInvalid") + # Test to long ASCII message + with pytest.raises(ClientError) as cm: + client.publish(PhoneNumber="+15551234567", Message="a" * 1601) + cm.value.response["Error"]["Code"].should.equal("InvalidParameter") + cm.value.response["Error"]["Message"].should.contain("must be less than 1600") @mock_sqs @@ -274,7 +284,7 @@ def test_publish_to_sqs_dump_json(): escaped = message.replace('"', '\\"') expected = MESSAGE_FROM_SQS_TEMPLATE % (escaped, published_message_id, "us-east-1") acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", messages[0].body, ) @@ -307,7 +317,7 @@ def test_publish_to_sqs_in_different_region(): messages = queue.receive_messages(MaxNumberOfMessages=1) expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, "us-west-1") acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", messages[0].body, ) @@ -377,7 +387,7 @@ def test_publish_message_too_long(): sns = boto3.resource("sns", region_name="us-east-1") topic = sns.create_topic(Name="some-topic") - with assert_raises(ClientError): + with pytest.raises(ClientError): topic.publish(Message="".join(["." for i in range(0, 262145)])) # message short enough - does not raise an error diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index f773438d7..d11830dc6 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -72,9 +72,7 @@ def test_deleting_subscriptions_by_deleting_topic(): subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ "ListSubscriptionsResult" ]["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["SubscriptionArn"].should.equal(subscription_arn) + subscriptions.should.have.length_of(0) # Now delete hanging subscription conn.unsubscribe(subscription_arn) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index d91b3566b..b476cd86d 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -5,9 +5,9 @@ import json import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest -from moto import mock_sns +from moto import mock_sns, mock_sqs from moto.sns.models import ( DEFAULT_PAGE_SIZE, DEFAULT_EFFECTIVE_DELIVERY_POLICY, @@ -124,11 +124,9 @@ def test_unsubscribe_from_deleted_topic(): topics = topics_json["Topics"] topics.should.have.length_of(0) - # And the subscription should still be left + # as per the documentation deleting a topic deletes all the subscriptions subscriptions = client.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["SubscriptionArn"].should.equal(subscription_arn) + subscriptions.should.have.length_of(0) # Now delete hanging subscription client.unsubscribe(SubscriptionArn=subscription_arn) @@ -295,7 +293,7 @@ def test_creating_subscription_with_attributes(): subscriptions.should.have.length_of(0) # invalid attr name - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.subscribe( TopicArn=topic_arn, Protocol="http", @@ -304,6 +302,28 @@ def test_creating_subscription_with_attributes(): ) +@mock_sns +@mock_sqs +def test_delete_subscriptions_on_delete_topic(): + sqs = boto3.client("sqs", region_name="us-east-1") + conn = boto3.client("sns", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-queue") + topic = conn.create_topic(Name="some-topic") + + conn.subscribe( + TopicArn=topic.get("TopicArn"), Protocol="sqs", Endpoint=queue.get("QueueUrl") + ) + subscriptions = conn.list_subscriptions()["Subscriptions"] + + subscriptions.should.have.length_of(1) + + conn.delete_topic(TopicArn=topic.get("TopicArn")) + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + @mock_sns def test_set_subscription_attributes(): conn = boto3.client("sns", region_name="us-east-1") @@ -367,17 +387,17 @@ def test_set_subscription_attributes(): attrs["Attributes"]["FilterPolicy"].should.equal(filter_policy) # not existing subscription - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.set_subscription_attributes( SubscriptionArn="invalid", AttributeName="RawMessageDelivery", AttributeValue="true", ) - with assert_raises(ClientError): + with pytest.raises(ClientError): attrs = conn.get_subscription_attributes(SubscriptionArn="invalid") # invalid attr name - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.set_subscription_attributes( SubscriptionArn=subscription_arn, AttributeName="InvalidName", @@ -482,7 +502,7 @@ def test_check_opted_out_invalid(): conn = boto3.client("sns", region_name="us-east-1") # Invalid phone number - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.check_if_phone_number_is_opted_out(phoneNumber="+44742LALALA") diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index e91ab6e2d..e46c44cc7 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -32,6 +32,12 @@ def test_create_and_delete_topic(): topics.should.have.length_of(0) +@mock_sns_deprecated +def test_delete_non_existent_topic(): + conn = boto.connect_sns() + conn.delete_topic.when.called_with("a-fake-arn").should.throw(BotoServerError) + + @mock_sns_deprecated def test_get_missing_topic(): conn = boto.connect_sns() @@ -162,3 +168,25 @@ def test_topic_paging(): topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) next_token.should.equal(None) + + +@mock_sns_deprecated +def test_topic_kms_master_key_id_attribute(): + conn = boto.connect_sns() + + conn.create_topic("test-sns-no-key-attr") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0][ + "TopicArn" + ] + attributes = conn.get_topic_attributes(topic_arn)["GetTopicAttributesResponse"][ + "GetTopicAttributesResult" + ]["Attributes"] + attributes.should_not.have.key("KmsMasterKeyId") + + conn.set_topic_attributes(topic_arn, "KmsMasterKeyId", "test-key") + attributes = conn.get_topic_attributes(topic_arn)["GetTopicAttributesResponse"][ + "GetTopicAttributesResult" + ]["Attributes"] + attributes.should.have.key("KmsMasterKeyId") + attributes["KmsMasterKeyId"].should.equal("test-key") diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 87800bd84..6b1e52df6 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -35,6 +35,15 @@ def test_create_and_delete_topic(): topics.should.have.length_of(0) +@mock_sns +def test_delete_non_existent_topic(): + conn = boto3.client("sns", region_name="us-east-1") + + conn.delete_topic.when.called_with( + TopicArn="arn:aws:sns:us-east-1:123456789012:fake-topic" + ).should.throw(conn.exceptions.NotFoundException) + + @mock_sns def test_create_topic_with_attributes(): conn = boto3.client("sns", region_name="us-east-1") @@ -511,3 +520,27 @@ def test_untag_resource_error(): conn.untag_resource.when.called_with( ResourceArn="not-existing-topic", TagKeys=["tag_key_1"] ).should.throw(ClientError, "Resource does not exist") + + +@mock_sns +def test_topic_kms_master_key_id_attribute(): + client = boto3.client("sns", region_name="us-west-2") + resp = client.create_topic(Name="test-sns-no-key-attr",) + topic_arn = resp["TopicArn"] + resp = client.get_topic_attributes(TopicArn=topic_arn) + resp["Attributes"].should_not.have.key("KmsMasterKeyId") + + client.set_topic_attributes( + TopicArn=topic_arn, AttributeName="KmsMasterKeyId", AttributeValue="test-key" + ) + resp = client.get_topic_attributes(TopicArn=topic_arn) + resp["Attributes"].should.have.key("KmsMasterKeyId") + resp["Attributes"]["KmsMasterKeyId"].should.equal("test-key") + + resp = client.create_topic( + Name="test-sns-with-key-attr", Attributes={"KmsMasterKeyId": "key-id",}, + ) + topic_arn = resp["TopicArn"] + resp = client.get_topic_attributes(TopicArn=topic_arn) + resp["Attributes"].should.have.key("KmsMasterKeyId") + resp["Attributes"]["KmsMasterKeyId"].should.equal("key-id") diff --git a/tests/test_sqs/__init__.py b/tests/test_sqs/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_sqs/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index f2ab8c37c..c234f5cdc 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -3,7 +3,6 @@ from __future__ import unicode_literals import base64 import json -import os import time import uuid @@ -12,16 +11,40 @@ import boto3 import botocore.exceptions import six import sure # noqa -import tests.backport_assert_raises # noqa from boto.exception import SQSError from boto.sqs.message import Message, RawMessage from botocore.exceptions import ClientError from freezegun import freeze_time -from moto import mock_sqs, mock_sqs_deprecated, settings -from nose import SkipTest -from nose.tools import assert_raises +from moto import mock_sqs, mock_sqs_deprecated, mock_lambda, mock_logs, settings +from unittest import SkipTest +import pytest from tests.helpers import requires_boto_gte +from tests.test_awslambda.test_lambda import get_test_zip_file1, get_role_name from moto.core import ACCOUNT_ID +from moto.sqs.models import ( + MAXIMUM_MESSAGE_SIZE_ATTR_LOWER_BOUND, + MAXIMUM_MESSAGE_SIZE_ATTR_UPPER_BOUND, + MAXIMUM_MESSAGE_LENGTH, +) + +TEST_POLICY = """ +{ + "Version":"2012-10-17", + "Statement":[ + { + "Effect": "Allow", + "Principal": { "AWS": "*" }, + "Action": "sqs:SendMessage", + "Resource": "'$sqs_queue_arn'", + "Condition":{ + "ArnEquals":{ + "aws:SourceArn":"'$sns_topic_arn'" + } + } + } + ] +} +""" @mock_sqs @@ -70,6 +93,18 @@ def test_create_queue_with_different_attributes_fail(): else: raise RuntimeError("Should of raised QueueAlreadyExists Exception") + response = sqs.create_queue( + QueueName="test-queue1", Attributes={"FifoQueue": "True"} + ) + + attributes = {"VisibilityTimeout": "60"} + sqs.set_queue_attributes(QueueUrl=response.get("QueueUrl"), Attributes=attributes) + + new_response = sqs.create_queue( + QueueName="test-queue1", Attributes={"FifoQueue": "True"} + ) + new_response["QueueUrl"].should.equal(response.get("QueueUrl")) + @mock_sqs def test_create_fifo_queue(): @@ -176,22 +211,26 @@ def test_get_queue_url_errors(): client = boto3.client("sqs", region_name="us-east-1") client.get_queue_url.when.called_with(QueueName="non-existing-queue").should.throw( - ClientError, "The specified queue does not exist for this wsdl version." + ClientError, + "The specified queue non-existing-queue does not exist for this wsdl version.", ) @mock_sqs def test_get_nonexistent_queue(): sqs = boto3.resource("sqs", region_name="us-east-1") - with assert_raises(ClientError) as err: - sqs.get_queue_by_name(QueueName="nonexisting-queue") - ex = err.exception + with pytest.raises(ClientError) as err: + sqs.get_queue_by_name(QueueName="non-existing-queue") + ex = err.value ex.operation_name.should.equal("GetQueueUrl") ex.response["Error"]["Code"].should.equal("AWS.SimpleQueueService.NonExistentQueue") + ex.response["Error"]["Message"].should.equal( + "The specified queue non-existing-queue does not exist for this wsdl version." + ) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: sqs.Queue("http://whatever-incorrect-queue-address").load() - ex = err.exception + ex = err.value ex.operation_name.should.equal("GetQueueAttributes") ex.response["Error"]["Code"].should.equal("AWS.SimpleQueueService.NonExistentQueue") @@ -216,11 +255,14 @@ def test_message_send_with_attributes(): msg = queue.send_message( MessageBody="derp", MessageAttributes={ - "timestamp": {"StringValue": "1493147359900", "DataType": "Number"} + "SOME_Valid.attribute-Name": { + "StringValue": "1493147359900", + "DataType": "Number", + } }, ) msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") - msg.get("MD5OfMessageAttributes").should.equal("235c5c510d26fb653d073faed50ae77c") + msg.get("MD5OfMessageAttributes").should.equal("36655e7e9d7c0e8479fa3f3f42247ae7") msg.get("MessageId").should_not.contain(" \n") messages = queue.receive_messages() @@ -228,26 +270,121 @@ def test_message_send_with_attributes(): @mock_sqs -def test_message_with_complex_attributes(): +def test_message_with_invalid_attributes(): + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="blah") + with pytest.raises(ClientError) as e: + queue.send_message( + MessageBody="derp", + MessageAttributes={ + "öther_encodings": {"DataType": "String", "StringValue": "str"}, + }, + ) + ex = e.value + ex.response["Error"]["Code"].should.equal("MessageAttributesInvalid") + ex.response["Error"]["Message"].should.equal( + "The message attribute name 'öther_encodings' is invalid. " + "Attribute name can contain A-Z, a-z, 0-9, underscore (_), hyphen (-), and period (.) characters." + ) + + +@mock_sqs +def test_message_with_string_attributes(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="blah") msg = queue.send_message( MessageBody="derp", MessageAttributes={ - "ccc": {"StringValue": "testjunk", "DataType": "String"}, - "aaa": {"BinaryValue": b"\x02\x03\x04", "DataType": "Binary"}, - "zzz": {"DataType": "Number", "StringValue": "0230.01"}, - "öther_encodings": {"DataType": "String", "StringValue": "T\xFCst"}, + "id": { + "StringValue": "2018fc74-4f77-1a5a-1be0-c2d037d5052b", + "DataType": "String", + }, + "contentType": {"StringValue": "application/json", "DataType": "String"}, + "timestamp": { + "StringValue": "1602845432024", + "DataType": "Number.java.lang.Long", + }, }, ) msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") - msg.get("MD5OfMessageAttributes").should.equal("8ae21a7957029ef04146b42aeaa18a22") + msg.get("MD5OfMessageAttributes").should.equal("b12289320bb6e494b18b645ef562b4a9") msg.get("MessageId").should_not.contain(" \n") messages = queue.receive_messages() messages.should.have.length_of(1) +@mock_sqs +def test_message_with_binary_attribute(): + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + "id": { + "StringValue": "453ae55e-f03b-21a6-a4b1-70c2e2e8fe71", + "DataType": "String", + }, + "mybin": {"BinaryValue": "kekchebukek", "DataType": "Binary"}, + "timestamp": { + "StringValue": "1603134247654", + "DataType": "Number.java.lang.Long", + }, + "contentType": {"StringValue": "application/json", "DataType": "String"}, + }, + ) + msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") + msg.get("MD5OfMessageAttributes").should.equal("049075255ebc53fb95f7f9f3cedf3c50") + msg.get("MessageId").should_not.contain(" \n") + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_with_attributes_have_labels(): + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + "timestamp": { + "DataType": "Number.java.lang.Long", + "StringValue": "1493147359900", + } + }, + ) + msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") + msg.get("MD5OfMessageAttributes").should.equal("2e2e4876d8e0bd6b8c2c8f556831c349") + msg.get("MessageId").should_not.contain(" \n") + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_with_attributes_invalid_datatype(): + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="blah") + + with pytest.raises(ClientError) as e: + queue.send_message( + MessageBody="derp", + MessageAttributes={ + "timestamp": { + "DataType": "InvalidNumber", + "StringValue": "149314735990a", + } + }, + ) + ex = e.value + ex.response["Error"]["Code"].should.equal("MessageAttributesInvalid") + ex.response["Error"]["Message"].should.equal( + "The message attribute 'timestamp' has an invalid message attribute type, the set of supported type " + "prefixes is Binary, Number, and String." + ) + + @mock_sqs def test_send_message_with_message_group_id(): sqs = boto3.resource("sqs", region_name="us-east-1") @@ -353,7 +490,7 @@ def test_delete_queue(): queue.delete() conn.list_queues().get("QueueUrls").should.equal(None) - with assert_raises(botocore.exceptions.ClientError): + with pytest.raises(botocore.exceptions.ClientError): queue.delete() @@ -384,7 +521,7 @@ def test_get_queue_attributes(): response["Attributes"]["CreatedTimestamp"].should.be.a(six.string_types) response["Attributes"]["DelaySeconds"].should.equal("0") response["Attributes"]["LastModifiedTimestamp"].should.be.a(six.string_types) - response["Attributes"]["MaximumMessageSize"].should.equal("65536") + response["Attributes"]["MaximumMessageSize"].should.equal("262144") response["Attributes"]["MessageRetentionPeriod"].should.equal("345600") response["Attributes"]["QueueArn"].should.equal( "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID) @@ -406,7 +543,7 @@ def test_get_queue_attributes(): response["Attributes"].should.equal( { "ApproximateNumberOfMessages": "0", - "MaximumMessageSize": "65536", + "MaximumMessageSize": "262144", "QueueArn": "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), "VisibilityTimeout": "30", "RedrivePolicy": json.dumps( @@ -514,9 +651,9 @@ def test_send_receive_message_with_attributes(): }, ) - messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)[ - "Messages" - ] + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2, MessageAttributeNames=["timestamp"] + )["Messages"] message1 = messages[0] message2 = messages[1] @@ -532,6 +669,65 @@ def test_send_receive_message_with_attributes(): ) +@mock_sqs +def test_send_receive_message_with_attributes_with_labels(): + sqs = boto3.resource("sqs", region_name="us-east-1") + conn = boto3.client("sqs", region_name="us-east-1") + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = "this is a test message" + body_two = "this is another test message" + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + }, + ) + + queue.send_message( + MessageBody=body_two, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359901", + "DataType": "Number.java.lang.Long", + } + }, + ) + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2, MessageAttributeNames=["timestamp"] + )["Messages"] + + message1 = messages[0] + message2 = messages[1] + + message1.get("Body").should.equal(body_one) + message2.get("Body").should.equal(body_two) + + message1.get("MD5OfMessageAttributes").should.equal( + "2e2e4876d8e0bd6b8c2c8f556831c349" + ) + message2.get("MD5OfMessageAttributes").should.equal( + "cfa7c73063c6e2dbf9be34232a1978cf" + ) + + response = queue.send_message( + MessageBody="test message", + MessageAttributes={ + "somevalue": {"StringValue": "somevalue", "DataType": "String.custom",} + }, + ) + + response.get("MD5OfMessageAttributes").should.equal( + "9e05cca738e70ff6c6041e82d5e77ef1" + ) + + @mock_sqs def test_send_receive_message_timestamps(): sqs = boto3.resource("sqs", region_name="us-east-1") @@ -561,10 +757,10 @@ def test_max_number_of_messages_invalid_param(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") - with assert_raises(ClientError): + with pytest.raises(ClientError): queue.receive_messages(MaxNumberOfMessages=11) - with assert_raises(ClientError): + with pytest.raises(ClientError): queue.receive_messages(MaxNumberOfMessages=0) # no error but also no messages returned @@ -576,10 +772,10 @@ def test_wait_time_seconds_invalid_param(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") - with assert_raises(ClientError): + with pytest.raises(ClientError): queue.receive_messages(WaitTimeSeconds=-1) - with assert_raises(ClientError): + with pytest.raises(ClientError): queue.receive_messages(WaitTimeSeconds=21) # no error but also no messages returned @@ -641,7 +837,14 @@ def test_send_message_with_attributes(): queue.write(message) - messages = conn.receive_message(queue) + messages = conn.receive_message( + queue, + message_attributes=[ + "test.attribute_name", + "test.binary_attribute", + "test.number_attribute", + ], + ) messages[0].get_body().should.equal(body) @@ -861,7 +1064,7 @@ def test_send_batch_operation_with_message_attributes(): ) queue.write_batch([message_tuple]) - messages = queue.get_messages() + messages = queue.get_messages(message_attributes=["name1"]) messages[0].get_body().should.equal("test message 1") for name, value in message_tuple[3].items(): @@ -984,6 +1187,38 @@ def test_purge_action(): queue.count().should.equal(0) +@mock_sqs +def test_purge_queue_before_delete_message(): + client = boto3.client("sqs", region_name="us-east-1") + + create_resp = client.create_queue( + QueueName="test-dlr-queue.fifo", Attributes={"FifoQueue": "true"} + ) + queue_url = create_resp["QueueUrl"] + + client.send_message( + QueueUrl=queue_url, + MessageGroupId="test", + MessageDeduplicationId="first_message", + MessageBody="first_message", + ) + receive_resp1 = client.receive_message(QueueUrl=queue_url) + + # purge before call delete_message + client.purge_queue(QueueUrl=queue_url) + + client.send_message( + QueueUrl=queue_url, + MessageGroupId="test", + MessageDeduplicationId="second_message", + MessageBody="second_message", + ) + receive_resp2 = client.receive_message(QueueUrl=queue_url) + + len(receive_resp2.get("Messages", [])).should.equal(1) + receive_resp2["Messages"][0]["Body"].should.equal("second_message") + + @mock_sqs_deprecated def test_delete_message_after_visibility_timeout(): VISIBILITY_TIMEOUT = 1 @@ -1044,6 +1279,8 @@ def test_send_message_batch(): "DataType": "String", } }, + "MessageGroupId": "message_group_id_1", + "MessageDeduplicationId": "message_deduplication_id_1", }, { "Id": "id_2", @@ -1052,6 +1289,8 @@ def test_send_message_batch(): "MessageAttributes": { "attribute_name_2": {"StringValue": "123", "DataType": "Number"} }, + "MessageGroupId": "message_group_id_2", + "MessageDeduplicationId": "message_deduplication_id_2", }, ], ) @@ -1060,16 +1299,101 @@ def test_send_message_batch(): ["id_1", "id_2"] ) - response = client.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=10) + response = client.receive_message( + QueueUrl=queue_url, + MaxNumberOfMessages=10, + MessageAttributeNames=["attribute_name_1", "attribute_name_2"], + ) response["Messages"][0]["Body"].should.equal("body_1") response["Messages"][0]["MessageAttributes"].should.equal( {"attribute_name_1": {"StringValue": "attribute_value_1", "DataType": "String"}} ) + response["Messages"][0]["Attributes"]["MessageGroupId"].should.equal( + "message_group_id_1" + ) + response["Messages"][0]["Attributes"]["MessageDeduplicationId"].should.equal( + "message_deduplication_id_1" + ) response["Messages"][1]["Body"].should.equal("body_2") response["Messages"][1]["MessageAttributes"].should.equal( {"attribute_name_2": {"StringValue": "123", "DataType": "Number"}} ) + response["Messages"][1]["Attributes"]["MessageGroupId"].should.equal( + "message_group_id_2" + ) + response["Messages"][1]["Attributes"]["MessageDeduplicationId"].should.equal( + "message_deduplication_id_2" + ) + + +@mock_sqs +def test_message_attributes_in_receive_message(): + sqs = boto3.resource("sqs", region_name="us-east-1") + conn = boto3.client("sqs", region_name="us-east-1") + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = "this is a test message" + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + }, + ) + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2, MessageAttributeNames=["timestamp"] + )["Messages"] + + messages[0]["MessageAttributes"].should.equal( + { + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + } + ) + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + }, + ) + messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)[ + "Messages" + ] + + messages[0].get("MessageAttributes").should.equal(None) + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + }, + ) + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2, MessageAttributeNames=["All"] + )["Messages"] + + messages[0]["MessageAttributes"].should.equal( + { + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + } + ) @mock_sqs @@ -1147,9 +1471,24 @@ def test_send_message_batch_errors(): ) +@mock_sqs +def test_send_message_batch_with_empty_list(): + client = boto3.client("sqs", region_name="us-east-1") + + response = client.create_queue(QueueName="test-queue") + queue_url = response["QueueUrl"] + + client.send_message_batch.when.called_with( + QueueUrl=queue_url, Entries=[] + ).should.throw( + ClientError, + "There should be at least one SendMessageBatchRequestEntry in the request.", + ) + + @mock_sqs def test_batch_change_message_visibility(): - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + if settings.TEST_SERVER_MODE: raise SkipTest("Cant manipulate time in server mode") with freeze_time("2015-01-01 12:00:00"): @@ -1159,9 +1498,15 @@ def test_batch_change_message_visibility(): ) queue_url = resp["QueueUrl"] - sqs.send_message(QueueUrl=queue_url, MessageBody="msg1") - sqs.send_message(QueueUrl=queue_url, MessageBody="msg2") - sqs.send_message(QueueUrl=queue_url, MessageBody="msg3") + sqs.send_message( + QueueUrl=queue_url, MessageBody="msg1", MessageGroupId="group1" + ) + sqs.send_message( + QueueUrl=queue_url, MessageBody="msg2", MessageGroupId="group2" + ) + sqs.send_message( + QueueUrl=queue_url, MessageBody="msg3", MessageGroupId="group3" + ) with freeze_time("2015-01-01 12:01:00"): receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) @@ -1264,6 +1609,36 @@ def test_permissions(): ) +@mock_sqs +def test_get_queue_attributes_template_response_validation(): + client = boto3.client("sqs", region_name="us-east-1") + + resp = client.create_queue( + QueueName="test-dlr-queue.fifo", Attributes={"FifoQueue": "true"} + ) + queue_url = resp["QueueUrl"] + + attrs = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"]) + assert attrs.get("Attributes").get("Policy") is None + + attributes = {"Policy": TEST_POLICY} + + client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes) + attrs = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["Policy"]) + assert attrs.get("Attributes").get("Policy") is not None + + assert ( + json.loads(attrs.get("Attributes").get("Policy")).get("Version") == "2012-10-17" + ) + assert len(json.loads(attrs.get("Attributes").get("Policy")).get("Statement")) == 1 + assert ( + json.loads(attrs.get("Attributes").get("Policy")) + .get("Statement")[0] + .get("Action") + == "sqs:SendMessage" + ) + + @mock_sqs def test_add_permission_errors(): client = boto3.client("sqs", region_name="us-east-1") @@ -1276,14 +1651,14 @@ def test_add_permission_errors(): Actions=["ReceiveMessage"], ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test", AWSAccountIds=["111111111111"], Actions=["ReceiveMessage", "SendMessage"], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterValue") @@ -1291,14 +1666,14 @@ def test_add_permission_errors(): "Value test for parameter Label is invalid. " "Reason: Already exists." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test-2", AWSAccountIds=["111111111111"], Actions=["RemovePermission"], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterValue") @@ -1307,14 +1682,14 @@ def test_add_permission_errors(): "Reason: Only the queue owner is allowed to invoke this action." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test-2", AWSAccountIds=["111111111111"], Actions=[], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("MissingParameter") @@ -1322,14 +1697,14 @@ def test_add_permission_errors(): "The request must contain the parameter Actions." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test-2", AWSAccountIds=[], Actions=["ReceiveMessage"], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterValue") @@ -1337,7 +1712,7 @@ def test_add_permission_errors(): "Value [] for parameter PrincipalId is invalid. Reason: Unable to verify." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test-2", @@ -1353,7 +1728,7 @@ def test_add_permission_errors(): "SendMessage", ], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.response["Error"]["Code"].should.contain("OverLimit") @@ -1368,9 +1743,9 @@ def test_remove_permission_errors(): response = client.create_queue(QueueName="test-queue") queue_url = response["QueueUrl"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.remove_permission(QueueUrl=queue_url, Label="test") - ex = e.exception + ex = e.value ex.operation_name.should.equal("RemovePermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterValue") @@ -1500,7 +1875,7 @@ def test_create_fifo_queue_with_dlq(): ) # Cant have fifo queue with non fifo DLQ - with assert_raises(ClientError): + with pytest.raises(ClientError): sqs.create_queue( QueueName="test-queue2.fifo", Attributes={ @@ -1514,7 +1889,7 @@ def test_create_fifo_queue_with_dlq(): @mock_sqs def test_queue_with_dlq(): - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + if settings.TEST_SERVER_MODE: raise SkipTest("Cant manipulate time in server mode") sqs = boto3.client("sqs", region_name="us-east-1") @@ -1539,8 +1914,12 @@ def test_queue_with_dlq(): ) queue_url2 = resp["QueueUrl"] - sqs.send_message(QueueUrl=queue_url2, MessageBody="msg1") - sqs.send_message(QueueUrl=queue_url2, MessageBody="msg2") + sqs.send_message( + QueueUrl=queue_url2, MessageBody="msg1", MessageGroupId="group" + ) + sqs.send_message( + QueueUrl=queue_url2, MessageBody="msg2", MessageGroupId="group" + ) with freeze_time("2015-01-01 13:00:00"): resp = sqs.receive_message( @@ -1590,7 +1969,7 @@ def test_redrive_policy_available(): assert json.loads(attributes["RedrivePolicy"]) == redrive_policy # Cant have redrive policy without maxReceiveCount - with assert_raises(ClientError): + with pytest.raises(ClientError): sqs.create_queue( QueueName="test-queue2", Attributes={ @@ -1608,7 +1987,7 @@ def test_redrive_policy_non_existent_queue(): "maxReceiveCount": 1, } - with assert_raises(ClientError): + with pytest.raises(ClientError): sqs.create_queue( QueueName="test-queue", Attributes={"RedrivePolicy": json.dumps(redrive_policy)}, @@ -1671,20 +2050,24 @@ def test_receive_messages_with_message_group_id(): queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) queue.send_message(MessageBody="message-1", MessageGroupId="group") queue.send_message(MessageBody="message-2", MessageGroupId="group") + queue.send_message(MessageBody="message-3", MessageGroupId="group") + queue.send_message(MessageBody="separate-message", MessageGroupId="anothergroup") - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] + messages = queue.receive_messages(MaxNumberOfMessages=2) + messages.should.have.length_of(2) + messages[0].attributes["MessageGroupId"].should.equal("group") - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) + # Different client can not 'see' messages from the group until they are processed + messages_for_client_2 = queue.receive_messages(WaitTimeSeconds=0) + messages_for_client_2.should.have.length_of(1) + messages_for_client_2[0].body.should.equal("separate-message") # message is now processed, next one should be available - message.delete() + for message in messages: + message.delete() messages = queue.receive_messages() messages.should.have.length_of(1) + messages[0].body.should.equal("message-3") @mock_sqs @@ -1715,7 +2098,7 @@ def test_receive_messages_with_message_group_id_on_requeue(): @mock_sqs def test_receive_messages_with_message_group_id_on_visibility_timeout(): - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + if settings.TEST_SERVER_MODE: raise SkipTest("Cant manipulate time in server mode") with freeze_time("2015-01-01 12:00:00"): @@ -1731,12 +2114,12 @@ def test_receive_messages_with_message_group_id_on_visibility_timeout(): messages.should.have.length_of(1) message = messages[0] - # received message is not deleted! + # received message is not processed yet + messages_for_second_client = queue.receive_messages(WaitTimeSeconds=0) + messages_for_second_client.should.have.length_of(0) - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - message.change_visibility(VisibilityTimeout=10) + for message in messages: + message.change_visibility(VisibilityTimeout=10) with freeze_time("2015-01-01 12:00:05"): # no timeout yet @@ -1779,3 +2162,124 @@ def test_list_queues_limits_to_1000_queues(): list(resource.queues.filter(QueueNamePrefix="test-queue")).should.have.length_of( 1000 ) + + +@mock_sqs +def test_send_messages_to_fifo_without_message_group_id(): + sqs = boto3.resource("sqs", region_name="eu-west-3") + queue = sqs.create_queue( + QueueName="blah.fifo", + Attributes={"FifoQueue": "true", "ContentBasedDeduplication": "true"}, + ) + + with pytest.raises(Exception) as e: + queue.send_message(MessageBody="message-1") + ex = e.value + ex.response["Error"]["Code"].should.equal("MissingParameter") + ex.response["Error"]["Message"].should.equal( + "The request must contain the parameter MessageGroupId." + ) + + +@mock_logs +@mock_lambda +@mock_sqs +def test_invoke_function_from_sqs_exception(): + logs_conn = boto3.client("logs", region_name="us-east-1") + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + conn = boto3.client("lambda", region_name="us-east-1") + func = conn.create_function( + FunctionName="testFunction", + Runtime="python2.7", + Role=get_role_name(), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.create_event_source_mapping( + EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"] + ) + + assert response["EventSourceArn"] == queue.attributes["QueueArn"] + assert response["State"] == "Enabled" + + entries = [ + { + "Id": "1", + "MessageBody": json.dumps({"uuid": str(uuid.uuid4()), "test": "test"}), + } + ] + + queue.send_messages(Entries=entries) + + start = time.time() + while (time.time() - start) < 30: + result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") + log_streams = result.get("logStreams") + if not log_streams: + time.sleep(1) + continue + assert len(log_streams) >= 1 + + result = logs_conn.get_log_events( + logGroupName="/aws/lambda/testFunction", + logStreamName=log_streams[0]["logStreamName"], + ) + for event in result.get("events"): + if "custom log event" in event["message"]: + return + time.sleep(1) + + assert False, "Test Failed" + + +@mock_sqs +def test_maximum_message_size_attribute_default(): + sqs = boto3.resource("sqs", region_name="eu-west-3") + queue = sqs.create_queue(QueueName="test-queue",) + int(queue.attributes["MaximumMessageSize"]).should.equal(MAXIMUM_MESSAGE_LENGTH) + with pytest.raises(Exception) as e: + queue.send_message(MessageBody="a" * (MAXIMUM_MESSAGE_LENGTH + 1)) + ex = e.value + ex.response["Error"]["Code"].should.equal("InvalidParameterValue") + + +@mock_sqs +def test_maximum_message_size_attribute_fails_for_invalid_values(): + sqs = boto3.resource("sqs", region_name="eu-west-3") + invalid_values = [ + MAXIMUM_MESSAGE_SIZE_ATTR_LOWER_BOUND - 1, + MAXIMUM_MESSAGE_SIZE_ATTR_UPPER_BOUND + 1, + ] + for message_size in invalid_values: + with pytest.raises(ClientError) as e: + sqs.create_queue( + QueueName="test-queue", + Attributes={"MaximumMessageSize": str(message_size)}, + ) + ex = e.value + ex.response["Error"]["Code"].should.equal("InvalidAttributeValue") + + +@mock_sqs +def test_send_message_fails_when_message_size_greater_than_max_message_size(): + sqs = boto3.resource("sqs", region_name="eu-west-3") + message_size_limit = 12345 + queue = sqs.create_queue( + QueueName="test-queue", + Attributes={"MaximumMessageSize": str(message_size_limit)}, + ) + int(queue.attributes["MaximumMessageSize"]).should.equal(message_size_limit) + with pytest.raises(ClientError) as e: + queue.send_message(MessageBody="a" * (message_size_limit + 1)) + ex = e.value + ex.response["Error"]["Code"].should.equal("InvalidParameterValue") + ex.response["Error"]["Message"].should.contain( + "{} bytes".format(message_size_limit) + ) diff --git a/tests/test_sqs/test_sqs_cloudformation.py b/tests/test_sqs/test_sqs_cloudformation.py new file mode 100644 index 000000000..73f76c8f6 --- /dev/null +++ b/tests/test_sqs/test_sqs_cloudformation.py @@ -0,0 +1,38 @@ +import boto3 +from moto import mock_sqs, mock_cloudformation + +sqs_template_with_tags = """ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "SQSQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "Tags" : [ + { + "Key" : "keyname1", + "Value" : "value1" + }, + { + "Key" : "keyname2", + "Value" : "value2" + } + ] + } + } + } +}""" + + +@mock_sqs +@mock_cloudformation +def test_create_from_cloudformation_json_with_tags(): + cf = boto3.client("cloudformation", region_name="us-east-1") + client = boto3.client("sqs", region_name="us-east-1") + + cf.create_stack(StackName="test-sqs", TemplateBody=sqs_template_with_tags) + + queue_url = client.list_queues()["QueueUrls"][0] + + queue_tags = client.list_queue_tags(QueueUrl=queue_url)["Tags"] + queue_tags.should.equal({"keyname1": "value1", "keyname2": "value2"}) diff --git a/tests/test_ssm/__init__.py b/tests/test_ssm/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_ssm/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 5b978520d..5aad14429 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -1,16 +1,17 @@ from __future__ import unicode_literals +import string + import boto3 import botocore.exceptions import sure # noqa import datetime import uuid -import json from botocore.exceptions import ClientError, ParamValidationError -from nose.tools import assert_raises +import pytest -from moto import mock_ssm, mock_cloudformation +from moto import mock_ec2, mock_ssm @mock_ssm @@ -30,6 +31,18 @@ def test_delete_parameter(): len(response["Parameters"]).should.equal(0) +@mock_ssm +def test_delete_nonexistent_parameter(): + client = boto3.client("ssm", region_name="us-east-1") + + with pytest.raises(ClientError) as ex: + client.delete_parameter(Name="test_noexist") + ex.value.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.value.response["Error"]["Message"].should.equal( + "Parameter test_noexist not found." + ) + + @mock_ssm def test_delete_parameters(): client = boto3.client("ssm", region_name="us-east-1") @@ -184,6 +197,33 @@ def test_get_parameters_by_path(): len(response["Parameters"]).should.equal(1) response.should_not.have.key("NextToken") + filters = [{"Key": "Name", "Values": ["error"]}] + client.get_parameters_by_path.when.called_with( + Path="/baz", ParameterFilters=filters + ).should.throw( + ClientError, + "The following filter key is not valid: Name. " + "Valid filter keys include: [Type, KeyId].", + ) + + filters = [{"Key": "Path", "Values": ["/error"]}] + client.get_parameters_by_path.when.called_with( + Path="/baz", ParameterFilters=filters + ).should.throw( + ClientError, + "The following filter key is not valid: Path. " + "Valid filter keys include: [Type, KeyId].", + ) + + filters = [{"Key": "Tier", "Values": ["Standard"]}] + client.get_parameters_by_path.when.called_with( + Path="/baz", ParameterFilters=filters + ).should.throw( + ClientError, + "The following filter key is not valid: Tier. " + "Valid filter keys include: [Type, KeyId].", + ) + @mock_ssm def test_put_parameter(): @@ -258,6 +298,73 @@ def test_put_parameter(): ) +@mock_ssm +def test_put_parameter_invalid_names(): + client = boto3.client("ssm", region_name="us-east-1") + + invalid_prefix_err = ( + 'Parameter name: can\'t be prefixed with "aws" or "ssm" (case-insensitive).' + ) + + client.put_parameter.when.called_with( + Name="ssm_test", Value="value", Type="String" + ).should.throw( + ClientError, invalid_prefix_err, + ) + + client.put_parameter.when.called_with( + Name="SSM_TEST", Value="value", Type="String" + ).should.throw( + ClientError, invalid_prefix_err, + ) + + client.put_parameter.when.called_with( + Name="aws_test", Value="value", Type="String" + ).should.throw( + ClientError, invalid_prefix_err, + ) + + client.put_parameter.when.called_with( + Name="AWS_TEST", Value="value", Type="String" + ).should.throw( + ClientError, invalid_prefix_err, + ) + + ssm_path = "/ssm_test/path/to/var" + client.put_parameter.when.called_with( + Name=ssm_path, Value="value", Type="String" + ).should.throw( + ClientError, + 'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). If formed as a path, it can consist of ' + "sub-paths divided by slash symbol; each sub-path can be formed as a mix of letters, numbers and the following " + "3 symbols .-_", + ) + + ssm_path = "/SSM/PATH/TO/VAR" + client.put_parameter.when.called_with( + Name=ssm_path, Value="value", Type="String" + ).should.throw( + ClientError, + 'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). If formed as a path, it can consist of ' + "sub-paths divided by slash symbol; each sub-path can be formed as a mix of letters, numbers and the following " + "3 symbols .-_", + ) + + aws_path = "/aws_test/path/to/var" + client.put_parameter.when.called_with( + Name=aws_path, Value="value", Type="String" + ).should.throw( + ClientError, "No access to reserved parameter name: {}.".format(aws_path), + ) + + aws_path = "/AWS/PATH/TO/VAR" + client.put_parameter.when.called_with( + Name=aws_path, Value="value", Type="String" + ).should.throw( + ClientError, "No access to reserved parameter name: {}.".format(aws_path), + ) + + @mock_ssm def test_put_parameter_china(): client = boto3.client("ssm", region_name="cn-north-1") @@ -288,6 +395,86 @@ def test_get_parameter(): ) +@mock_ssm +def test_get_parameter_with_version_and_labels(): + client = boto3.client("ssm", region_name="us-east-1") + + client.put_parameter( + Name="test-1", Description="A test parameter", Value="value", Type="String" + ) + client.put_parameter( + Name="test-2", Description="A test parameter", Value="value", Type="String" + ) + + client.label_parameter_version( + Name="test-2", ParameterVersion=1, Labels=["test-label"] + ) + + response = client.get_parameter(Name="test-1:1", WithDecryption=False) + + response["Parameter"]["Name"].should.equal("test-1") + response["Parameter"]["Value"].should.equal("value") + response["Parameter"]["Type"].should.equal("String") + response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) + response["Parameter"]["ARN"].should.equal( + "arn:aws:ssm:us-east-1:1234567890:parameter/test-1" + ) + + response = client.get_parameter(Name="test-2:1", WithDecryption=False) + response["Parameter"]["Name"].should.equal("test-2") + response["Parameter"]["Value"].should.equal("value") + response["Parameter"]["Type"].should.equal("String") + response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) + response["Parameter"]["ARN"].should.equal( + "arn:aws:ssm:us-east-1:1234567890:parameter/test-2" + ) + + response = client.get_parameter(Name="test-2:test-label", WithDecryption=False) + response["Parameter"]["Name"].should.equal("test-2") + response["Parameter"]["Value"].should.equal("value") + response["Parameter"]["Type"].should.equal("String") + response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) + response["Parameter"]["ARN"].should.equal( + "arn:aws:ssm:us-east-1:1234567890:parameter/test-2" + ) + + with pytest.raises(ClientError) as ex: + client.get_parameter(Name="test-2:2:3", WithDecryption=False) + ex.value.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.value.response["Error"]["Message"].should.equal( + "Parameter test-2:2:3 not found." + ) + + with pytest.raises(ClientError) as ex: + client.get_parameter(Name="test-2:2", WithDecryption=False) + ex.value.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.value.response["Error"]["Message"].should.equal("Parameter test-2:2 not found.") + + +@mock_ssm +def test_get_parameters_errors(): + client = boto3.client("ssm", region_name="us-east-1") + + ssm_parameters = {name: "value" for name in string.ascii_lowercase[:11]} + + for name, value in ssm_parameters.items(): + client.put_parameter(Name=name, Value=value, Type="String") + + with pytest.raises(ClientError) as e: + client.get_parameters(Names=list(ssm_parameters.keys())) + ex = e.value + ex.operation_name.should.equal("GetParameters") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ValidationException") + ex.response["Error"]["Message"].should.equal( + "1 validation error detected: " + "Value '[{}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10.".format( + ", ".join(ssm_parameters.keys()) + ) + ) + + @mock_ssm def test_get_nonexistant_parameter(): client = boto3.client("ssm", region_name="us-east-1") @@ -466,6 +653,9 @@ def test_describe_parameters_with_parameter_filters_name(): client = boto3.client("ssm", region_name="us-east-1") client.put_parameter(Name="param", Value="value", Type="String") client.put_parameter(Name="/param-2", Value="value-2", Type="String") + client.put_parameter(Name="/tangent-3", Value="value-3", Type="String") + client.put_parameter(Name="tangram-4", Value="value-4", Type="String") + client.put_parameter(Name="standby-5", Value="value-5", Type="String") response = client.describe_parameters( ParameterFilters=[{"Key": "Name", "Values": ["param"]}] @@ -505,6 +695,22 @@ def test_describe_parameters_with_parameter_filters_name(): parameters.should.have.length_of(2) response.should_not.have.key("NextToken") + response = client.describe_parameters( + ParameterFilters=[{"Key": "Name", "Option": "Contains", "Values": ["ram"]}] + ) + + parameters = response["Parameters"] + parameters.should.have.length_of(3) + response.should_not.have.key("NextToken") + + response = client.describe_parameters( + ParameterFilters=[{"Key": "Name", "Option": "Contains", "Values": ["/tan"]}] + ) + + parameters = response["Parameters"] + parameters.should.have.length_of(2) + response.should_not.have.key("NextToken") + @mock_ssm def test_describe_parameters_with_parameter_filters_path(): @@ -885,6 +1091,7 @@ def test_get_parameter_history(): param["Value"].should.equal("value-%d" % index) param["Version"].should.equal(index + 1) param["Description"].should.equal("A test parameter version %d" % index) + param["Labels"].should.equal([]) len(parameters_response).should.equal(3) @@ -926,6 +1133,424 @@ def test_get_parameter_history_with_secure_string(): len(parameters_response).should.equal(3) +@mock_ssm +def test_label_parameter_version(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + + response = client.label_parameter_version( + Name=test_parameter_name, Labels=["test-label"] + ) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + + +@mock_ssm +def test_label_parameter_version_with_specific_version(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["test-label"] + ) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + + +@mock_ssm +def test_label_parameter_version_twice(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=test_labels + ) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=test_labels + ) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + + response = client.get_parameter_history(Name=test_parameter_name) + len(response["Parameters"]).should.equal(1) + response["Parameters"][0]["Labels"].should.equal(test_labels) + + +@mock_ssm +def test_label_parameter_moving_versions(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True, + ) + + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=test_labels + ) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=2, Labels=test_labels + ) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(2) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = test_labels if param["Version"] == 2 else [] + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + + +@mock_ssm +def test_label_parameter_moving_versions_complex(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True, + ) + + response = client.label_parameter_version( + Name=test_parameter_name, + ParameterVersion=1, + Labels=["test-label1", "test-label2", "test-label3"], + ) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + response = client.label_parameter_version( + Name=test_parameter_name, + ParameterVersion=2, + Labels=["test-label2", "test-label3"], + ) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(2) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = ( + ["test-label2", "test-label3"] + if param["Version"] == 2 + else (["test-label1"] if param["Version"] == 1 else []) + ) + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + + +@mock_ssm +def test_label_parameter_version_exception_ten_labels_at_once(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = [ + "test-label1", + "test-label2", + "test-label3", + "test-label4", + "test-label5", + "test-label6", + "test-label7", + "test-label8", + "test-label9", + "test-label10", + "test-label11", + ] + + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + client.label_parameter_version.when.called_with( + Name="test", ParameterVersion=1, Labels=test_labels + ).should.throw( + ClientError, + "An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: " + "A parameter version can have maximum 10 labels." + "Move one or more labels to another version and try again.", + ) + + +@mock_ssm +def test_label_parameter_version_exception_ten_labels_over_multiple_calls(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + client.label_parameter_version( + Name=test_parameter_name, + ParameterVersion=1, + Labels=[ + "test-label1", + "test-label2", + "test-label3", + "test-label4", + "test-label5", + ], + ) + client.label_parameter_version.when.called_with( + Name="test", + ParameterVersion=1, + Labels=[ + "test-label6", + "test-label7", + "test-label8", + "test-label9", + "test-label10", + "test-label11", + ], + ).should.throw( + ClientError, + "An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: " + "A parameter version can have maximum 10 labels." + "Move one or more labels to another version and try again.", + ) + + +@mock_ssm +def test_label_parameter_version_invalid_name(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + + response = client.label_parameter_version.when.called_with( + Name=test_parameter_name, Labels=["test-label"] + ).should.throw( + ClientError, + "An error occurred (ParameterNotFound) when calling the LabelParameterVersion operation: " + "Parameter test not found.", + ) + + +@mock_ssm +def test_label_parameter_version_invalid_parameter_version(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + + response = client.label_parameter_version.when.called_with( + Name=test_parameter_name, Labels=["test-label"], ParameterVersion=5 + ).should.throw( + ClientError, + "An error occurred (ParameterVersionNotFound) when calling the LabelParameterVersion operation: " + "Systems Manager could not find version 5 of test. " + "Verify the version and try again.", + ) + + +@mock_ssm +def test_label_parameter_version_invalid_label(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["awsabc"] + ) + response["InvalidLabels"].should.equal(["awsabc"]) + + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["ssmabc"] + ) + response["InvalidLabels"].should.equal(["ssmabc"]) + + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["9abc"] + ) + response["InvalidLabels"].should.equal(["9abc"]) + + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["abc/123"] + ) + response["InvalidLabels"].should.equal(["abc/123"]) + + client.label_parameter_version.when.called_with( + Name=test_parameter_name, ParameterVersion=1, Labels=["a" * 101] + ).should.throw( + ClientError, + "1 validation error detected: " + "Value '[%s]' at 'labels' failed to satisfy constraint: " + "Member must satisfy constraint: " + "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" + % ("a" * 101), + ) + + +@mock_ssm +def test_get_parameter_history_with_label(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True, + ) + + client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=test_labels + ) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = test_labels if param["Version"] == 1 else [] + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + + +@mock_ssm +def test_get_parameter_history_with_label_non_latest(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True, + ) + + client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=2, Labels=test_labels + ) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = test_labels if param["Version"] == 2 else [] + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + + +@mock_ssm +def test_get_parameter_history_with_label_latest_assumed(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True, + ) + + client.label_parameter_version(Name=test_parameter_name, Labels=test_labels) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = test_labels if param["Version"] == 3 else [] + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + + @mock_ssm def test_get_parameter_history_missing_parameter(): client = boto3.client("ssm", region_name="us-east-1") @@ -1044,7 +1669,7 @@ def test_list_commands(): cmd["InstanceIds"].should.contain("i-123456") # test the error case for an invalid command id - with assert_raises(ClientError): + with pytest.raises(ClientError): response = client.list_commands(CommandId=str(uuid.uuid4())) @@ -1076,78 +1701,46 @@ def test_get_command_invocation(): invocation_response["InstanceId"].should.equal(instance_id) # test the error case for an invalid instance id - with assert_raises(ClientError): + with pytest.raises(ClientError): invocation_response = client.get_command_invocation( CommandId=cmd_id, InstanceId="i-FAKE" ) # test the error case for an invalid plugin name - with assert_raises(ClientError): + with pytest.raises(ClientError): invocation_response = client.get_command_invocation( CommandId=cmd_id, InstanceId=instance_id, PluginName="FAKE" ) +@mock_ec2 @mock_ssm -@mock_cloudformation -def test_get_command_invocations_from_stack(): - stack_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Test Stack", - "Resources": { - "EC2Instance1": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-test-image-id", - "KeyName": "test", - "InstanceType": "t2.micro", - "Tags": [ - {"Key": "Test Description", "Value": "Test tag"}, - {"Key": "Test Name", "Value": "Name tag for tests"}, - ], - }, - } - }, - "Outputs": { - "test": { - "Description": "Test Output", - "Value": "Test output value", - "Export": {"Name": "Test value to export"}, - }, - "PublicIP": {"Value": "Test public ip"}, - }, - } - - cloudformation_client = boto3.client("cloudformation", region_name="us-east-1") - - stack_template_str = json.dumps(stack_template) - - response = cloudformation_client.create_stack( - StackName="test_stack", - TemplateBody=stack_template_str, - Capabilities=("CAPABILITY_IAM",), +def test_get_command_invocations_by_instance_tag(): + ec2 = boto3.client("ec2", region_name="us-east-1") + ssm = boto3.client("ssm", region_name="us-east-1") + tag_specifications = [ + {"ResourceType": "instance", "Tags": [{"Key": "Name", "Value": "test-tag"}]} + ] + num_instances = 3 + resp = ec2.run_instances( + ImageId="ami-1234abcd", + MaxCount=num_instances, + MinCount=num_instances, + TagSpecifications=tag_specifications, ) + instance_ids = [] + for instance in resp["Instances"]: + instance_ids.append(instance["InstanceId"]) + instance_ids.should.have.length_of(num_instances) - client = boto3.client("ssm", region_name="us-east-1") + command_id = ssm.send_command( + DocumentName="AWS-RunShellScript", + Targets=[{"Key": "tag:Name", "Values": ["test-tag"]}], + )["Command"]["CommandId"] - ssm_document = "AWS-RunShellScript" - params = {"commands": ["#!/bin/bash\necho 'hello world'"]} + resp = ssm.list_commands(CommandId=command_id) + resp["Commands"][0]["TargetCount"].should.equal(num_instances) - response = client.send_command( - Targets=[ - {"Key": "tag:aws:cloudformation:stack-name", "Values": ("test_stack",)} - ], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region="us-east-2", - OutputS3BucketName="the-bucket", - OutputS3KeyPrefix="pref", - ) - - cmd = response["Command"] - cmd_id = cmd["CommandId"] - instance_ids = cmd["InstanceIds"] - - invocation_response = client.get_command_invocation( - CommandId=cmd_id, InstanceId=instance_ids[0], PluginName="aws:runShellScript" - ) + for instance_id in instance_ids: + resp = ssm.get_command_invocation(CommandId=command_id, InstanceId=instance_id) + resp["Status"].should.equal("Success") diff --git a/tests/test_ssm/test_ssm_cloudformation.py b/tests/test_ssm/test_ssm_cloudformation.py new file mode 100644 index 000000000..a2205ceba --- /dev/null +++ b/tests/test_ssm/test_ssm_cloudformation.py @@ -0,0 +1,70 @@ +import boto3 +import json + + +from moto import mock_ssm, mock_cloudformation + + +@mock_ssm +@mock_cloudformation +def test_get_command_invocations_from_stack(): + stack_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Test Stack", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-test-image-id", + "KeyName": "test", + "InstanceType": "t2.micro", + "Tags": [ + {"Key": "Test Description", "Value": "Test tag"}, + {"Key": "Test Name", "Value": "Name tag for tests"}, + ], + }, + } + }, + "Outputs": { + "test": { + "Description": "Test Output", + "Value": "Test output value", + "Export": {"Name": "Test value to export"}, + }, + "PublicIP": {"Value": "Test public ip"}, + }, + } + + cloudformation_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_template_str = json.dumps(stack_template) + + response = cloudformation_client.create_stack( + StackName="test_stack", + TemplateBody=stack_template_str, + Capabilities=("CAPABILITY_IAM",), + ) + + client = boto3.client("ssm", region_name="us-east-1") + + ssm_document = "AWS-RunShellScript" + params = {"commands": ["#!/bin/bash\necho 'hello world'"]} + + response = client.send_command( + Targets=[ + {"Key": "tag:aws:cloudformation:stack-name", "Values": ("test_stack",)} + ], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region="us-east-2", + OutputS3BucketName="the-bucket", + OutputS3KeyPrefix="pref", + ) + + cmd = response["Command"] + cmd_id = cmd["CommandId"] + instance_ids = cmd["InstanceIds"] + + invocation_response = client.get_command_invocation( + CommandId=cmd_id, InstanceId=instance_ids[0], PluginName="aws:runShellScript" + ) diff --git a/tests/test_ssm/test_ssm_docs.py b/tests/test_ssm/test_ssm_docs.py new file mode 100644 index 000000000..9a1fb7cf4 --- /dev/null +++ b/tests/test_ssm/test_ssm_docs.py @@ -0,0 +1,769 @@ +from __future__ import unicode_literals + +import boto3 +import botocore.exceptions +import sure # noqa +import datetime +import json +import pkg_resources +import yaml +import hashlib +import copy +from moto.core import ACCOUNT_ID + +from moto import mock_ssm + + +def _get_yaml_template(): + template_path = "/".join(["test_ssm", "test_templates", "good.yaml"]) + resource_path = pkg_resources.resource_string("tests", template_path) + return resource_path + + +def _validate_document_description( + doc_name, + doc_description, + json_doc, + expected_document_version, + expected_latest_version, + expected_default_version, + expected_format, +): + + if expected_format == "JSON": + doc_description["Hash"].should.equal( + hashlib.sha256(json.dumps(json_doc).encode("utf-8")).hexdigest() + ) + else: + doc_description["Hash"].should.equal( + hashlib.sha256(yaml.dump(json_doc).encode("utf-8")).hexdigest() + ) + + doc_description["HashType"].should.equal("Sha256") + doc_description["Name"].should.equal(doc_name) + doc_description["Owner"].should.equal(ACCOUNT_ID) + + difference = datetime.datetime.utcnow() - doc_description["CreatedDate"] + if difference.min > datetime.timedelta(minutes=1): + assert False + + doc_description["Status"].should.equal("Active") + doc_description["DocumentVersion"].should.equal(expected_document_version) + doc_description["Description"].should.equal(json_doc["description"]) + + doc_description["Parameters"] = sorted( + doc_description["Parameters"], key=lambda doc: doc["Name"] + ) + + doc_description["Parameters"][0]["Name"].should.equal("Parameter1") + doc_description["Parameters"][0]["Type"].should.equal("Integer") + doc_description["Parameters"][0]["Description"].should.equal("Command Duration.") + doc_description["Parameters"][0]["DefaultValue"].should.equal("3") + + doc_description["Parameters"][1]["Name"].should.equal("Parameter2") + doc_description["Parameters"][1]["Type"].should.equal("String") + doc_description["Parameters"][1]["DefaultValue"].should.equal("def") + + doc_description["Parameters"][2]["Name"].should.equal("Parameter3") + doc_description["Parameters"][2]["Type"].should.equal("Boolean") + doc_description["Parameters"][2]["Description"].should.equal("A boolean") + doc_description["Parameters"][2]["DefaultValue"].should.equal("False") + + doc_description["Parameters"][3]["Name"].should.equal("Parameter4") + doc_description["Parameters"][3]["Type"].should.equal("StringList") + doc_description["Parameters"][3]["Description"].should.equal("A string list") + doc_description["Parameters"][3]["DefaultValue"].should.equal('["abc", "def"]') + + doc_description["Parameters"][4]["Name"].should.equal("Parameter5") + doc_description["Parameters"][4]["Type"].should.equal("StringMap") + + doc_description["Parameters"][5]["Name"].should.equal("Parameter6") + doc_description["Parameters"][5]["Type"].should.equal("MapList") + + if expected_format == "JSON": + # We have to replace single quotes from the response to package it back up + json.loads(doc_description["Parameters"][4]["DefaultValue"]).should.equal( + { + "NotificationArn": "$dependency.topicArn", + "NotificationEvents": ["Failed"], + "NotificationType": "Command", + } + ) + + json.loads(doc_description["Parameters"][5]["DefaultValue"]).should.equal( + [ + {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": "50"}}, + {"DeviceName": "/dev/sdm", "Ebs": {"VolumeSize": "100"}}, + ] + ) + else: + yaml.safe_load(doc_description["Parameters"][4]["DefaultValue"]).should.equal( + { + "NotificationArn": "$dependency.topicArn", + "NotificationEvents": ["Failed"], + "NotificationType": "Command", + } + ) + yaml.safe_load(doc_description["Parameters"][5]["DefaultValue"]).should.equal( + [ + {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": "50"}}, + {"DeviceName": "/dev/sdm", "Ebs": {"VolumeSize": "100"}}, + ] + ) + + doc_description["DocumentType"].should.equal("Command") + doc_description["SchemaVersion"].should.equal("2.2") + doc_description["LatestVersion"].should.equal(expected_latest_version) + doc_description["DefaultVersion"].should.equal(expected_default_version) + doc_description["DocumentFormat"].should.equal(expected_format) + + +def _get_doc_validator( + response, version_name, doc_version, json_doc_content, document_format +): + response["Name"].should.equal("TestDocument3") + if version_name: + response["VersionName"].should.equal(version_name) + response["DocumentVersion"].should.equal(doc_version) + response["Status"].should.equal("Active") + if document_format == "JSON": + json.loads(response["Content"]).should.equal(json_doc_content) + else: + yaml.safe_load(response["Content"]).should.equal(json_doc_content) + response["DocumentType"].should.equal("Command") + response["DocumentFormat"].should.equal(document_format) + + +@mock_ssm +def test_create_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + + client = boto3.client("ssm", region_name="us-east-1") + + response = client.create_document( + Content=yaml.dump(json_doc), + Name="TestDocument", + DocumentType="Command", + DocumentFormat="YAML", + ) + doc_description = response["DocumentDescription"] + _validate_document_description( + "TestDocument", doc_description, json_doc, "1", "1", "1", "YAML" + ) + + response = client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument2", + DocumentType="Command", + DocumentFormat="JSON", + ) + doc_description = response["DocumentDescription"] + _validate_document_description( + "TestDocument2", doc_description, json_doc, "1", "1", "1", "JSON" + ) + + response = client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="JSON", + VersionName="Base", + TargetType="/AWS::EC2::Instance", + Tags=[{"Key": "testing", "Value": "testingValue"}], + ) + doc_description = response["DocumentDescription"] + doc_description["VersionName"].should.equal("Base") + doc_description["TargetType"].should.equal("/AWS::EC2::Instance") + doc_description["Tags"].should.equal([{"Key": "testing", "Value": "testingValue"}]) + + _validate_document_description( + "TestDocument3", doc_description, json_doc, "1", "1", "1", "JSON" + ) + + try: + client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="JSON", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("CreateDocument") + err.response["Error"]["Message"].should.equal( + "The specified document already exists." + ) + + try: + client.create_document( + Content=yaml.dump(json_doc), + Name="TestDocument4", + DocumentType="Command", + DocumentFormat="JSON", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("CreateDocument") + err.response["Error"]["Message"].should.equal( + "The content for the document is not valid." + ) + + del json_doc["parameters"] + response = client.create_document( + Content=yaml.dump(json_doc), + Name="EmptyParamDoc", + DocumentType="Command", + DocumentFormat="YAML", + ) + doc_description = response["DocumentDescription"] + + doc_description["Hash"].should.equal( + hashlib.sha256(yaml.dump(json_doc).encode("utf-8")).hexdigest() + ) + doc_description["HashType"].should.equal("Sha256") + doc_description["Name"].should.equal("EmptyParamDoc") + doc_description["Owner"].should.equal(ACCOUNT_ID) + + difference = datetime.datetime.utcnow() - doc_description["CreatedDate"] + if difference.min > datetime.timedelta(minutes=1): + assert False + + doc_description["Status"].should.equal("Active") + doc_description["DocumentVersion"].should.equal("1") + doc_description["Description"].should.equal(json_doc["description"]) + doc_description["DocumentType"].should.equal("Command") + doc_description["SchemaVersion"].should.equal("2.2") + doc_description["LatestVersion"].should.equal("1") + doc_description["DefaultVersion"].should.equal("1") + doc_description["DocumentFormat"].should.equal("YAML") + + +@mock_ssm +def test_get_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.get_document(Name="DNE") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + client.create_document( + Content=yaml.dump(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="YAML", + VersionName="Base", + ) + + new_json_doc = copy.copy(json_doc) + new_json_doc["description"] = "a new description" + + client.update_document( + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", + VersionName="NewBase", + ) + + response = client.get_document(Name="TestDocument3") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") + + response = client.get_document(Name="TestDocument3", DocumentFormat="YAML") + _get_doc_validator(response, "Base", "1", json_doc, "YAML") + + response = client.get_document(Name="TestDocument3", DocumentFormat="JSON") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") + + response = client.get_document(Name="TestDocument3", VersionName="Base") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") + + response = client.get_document(Name="TestDocument3", DocumentVersion="1") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") + + response = client.get_document(Name="TestDocument3", DocumentVersion="2") + _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") + + response = client.get_document(Name="TestDocument3", VersionName="NewBase") + _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") + + response = client.get_document( + Name="TestDocument3", VersionName="NewBase", DocumentVersion="2" + ) + _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") + + try: + response = client.get_document( + Name="TestDocument3", VersionName="BadName", DocumentVersion="2" + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + try: + response = client.get_document(Name="TestDocument3", DocumentVersion="3") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + # Updating default should update normal get + client.update_document_default_version(Name="TestDocument3", DocumentVersion="2") + + response = client.get_document(Name="TestDocument3", DocumentFormat="JSON") + _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") + + +@mock_ssm +def test_delete_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.delete_document(Name="DNE") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DeleteDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + # Test simple + client.create_document( + Content=yaml.dump(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="YAML", + VersionName="Base", + TargetType="/AWS::EC2::Instance", + ) + client.delete_document(Name="TestDocument3") + + try: + client.get_document(Name="TestDocument3") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + # Delete default version with other version is bad + client.create_document( + Content=yaml.dump(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="YAML", + VersionName="Base", + TargetType="/AWS::EC2::Instance", + ) + + new_json_doc = copy.copy(json_doc) + new_json_doc["description"] = "a new description" + + client.update_document( + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", + VersionName="NewBase", + ) + + new_json_doc["description"] = "a new description2" + client.update_document( + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", + ) + + new_json_doc["description"] = "a new description3" + client.update_document( + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", + ) + + new_json_doc["description"] = "a new description4" + client.update_document( + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", + ) + + try: + client.delete_document(Name="TestDocument3", DocumentVersion="1") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DeleteDocument") + err.response["Error"]["Message"].should.equal( + "Default version of the document can't be deleted." + ) + + try: + client.delete_document(Name="TestDocument3", VersionName="Base") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DeleteDocument") + err.response["Error"]["Message"].should.equal( + "Default version of the document can't be deleted." + ) + + # Make sure no ill side effects + response = client.get_document(Name="TestDocument3") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") + + client.delete_document(Name="TestDocument3", DocumentVersion="5") + + # Check that latest version is changed + response = client.describe_document(Name="TestDocument3") + response["Document"]["LatestVersion"].should.equal("4") + + client.delete_document(Name="TestDocument3", VersionName="NewBase") + + # Make sure other versions okay + client.get_document(Name="TestDocument3", DocumentVersion="1") + client.get_document(Name="TestDocument3", DocumentVersion="3") + client.get_document(Name="TestDocument3", DocumentVersion="4") + + client.delete_document(Name="TestDocument3") + + try: + client.get_document(Name="TestDocument3", DocumentVersion="1") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + try: + client.get_document(Name="TestDocument3", DocumentVersion="3") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + try: + client.get_document(Name="TestDocument3", DocumentVersion="4") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + response = client.list_documents() + len(response["DocumentIdentifiers"]).should.equal(0) + + +@mock_ssm +def test_update_document_default_version(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.update_document_default_version(Name="DNE", DocumentVersion="1") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocumentDefaultVersion") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentType="Command", + VersionName="Base", + ) + + json_doc["description"] = "a new description" + + client.update_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="$LATEST", + DocumentFormat="JSON", + ) + + json_doc["description"] = "a new description2" + + client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST" + ) + + response = client.update_document_default_version( + Name="TestDocument", DocumentVersion="2" + ) + response["Description"]["Name"].should.equal("TestDocument") + response["Description"]["DefaultVersion"].should.equal("2") + + json_doc["description"] = "a new description3" + + client.update_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="$LATEST", + VersionName="NewBase", + ) + + response = client.update_document_default_version( + Name="TestDocument", DocumentVersion="4" + ) + response["Description"]["Name"].should.equal("TestDocument") + response["Description"]["DefaultVersion"].should.equal("4") + response["Description"]["DefaultVersionName"].should.equal("NewBase") + + +@mock_ssm +def test_update_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.update_document( + Name="DNE", + Content=json.dumps(json_doc), + DocumentVersion="1", + DocumentFormat="JSON", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentType="Command", + DocumentFormat="JSON", + VersionName="Base", + ) + + try: + client.update_document( + Name="TestDocument", + Content=json.dumps(json_doc), + DocumentVersion="2", + DocumentFormat="JSON", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocument") + err.response["Error"]["Message"].should.equal( + "The document version is not valid or does not exist." + ) + + # Duplicate content throws an error + try: + client.update_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="1", + DocumentFormat="JSON", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocument") + err.response["Error"]["Message"].should.equal( + "The content of the association document matches another " + "document. Change the content of the document and try again." + ) + + json_doc["description"] = "a new description" + # Duplicate version name + try: + client.update_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="1", + DocumentFormat="JSON", + VersionName="Base", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocument") + err.response["Error"]["Message"].should.equal( + "The specified version name is a duplicate." + ) + + response = client.update_document( + Content=json.dumps(json_doc), + Name="TestDocument", + VersionName="Base2", + DocumentVersion="1", + DocumentFormat="JSON", + ) + response["DocumentDescription"]["Description"].should.equal("a new description") + response["DocumentDescription"]["DocumentVersion"].should.equal("2") + response["DocumentDescription"]["LatestVersion"].should.equal("2") + response["DocumentDescription"]["DefaultVersion"].should.equal("1") + + json_doc["description"] = "a new description2" + + response = client.update_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="$LATEST", + DocumentFormat="JSON", + VersionName="NewBase", + ) + response["DocumentDescription"]["Description"].should.equal("a new description2") + response["DocumentDescription"]["DocumentVersion"].should.equal("3") + response["DocumentDescription"]["LatestVersion"].should.equal("3") + response["DocumentDescription"]["DefaultVersion"].should.equal("1") + response["DocumentDescription"]["VersionName"].should.equal("NewBase") + + +@mock_ssm +def test_describe_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.describe_document(Name="DNE") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DescribeDocument") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) + + client.create_document( + Content=yaml.dump(json_doc), + Name="TestDocument", + DocumentType="Command", + DocumentFormat="YAML", + VersionName="Base", + TargetType="/AWS::EC2::Instance", + Tags=[{"Key": "testing", "Value": "testingValue"}], + ) + response = client.describe_document(Name="TestDocument") + doc_description = response["Document"] + _validate_document_description( + "TestDocument", doc_description, json_doc, "1", "1", "1", "YAML" + ) + + # Adding update to check for issues + new_json_doc = copy.copy(json_doc) + new_json_doc["description"] = "a new description2" + + client.update_document( + Content=json.dumps(new_json_doc), Name="TestDocument", DocumentVersion="$LATEST" + ) + response = client.describe_document(Name="TestDocument") + doc_description = response["Document"] + _validate_document_description( + "TestDocument", doc_description, json_doc, "1", "2", "1", "YAML" + ) + + +@mock_ssm +def test_list_documents(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + + client = boto3.client("ssm", region_name="us-east-1") + + client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentType="Command", + DocumentFormat="JSON", + ) + client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument2", + DocumentType="Command", + DocumentFormat="JSON", + ) + client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="JSON", + TargetType="/AWS::EC2::Instance", + ) + + response = client.list_documents() + len(response["DocumentIdentifiers"]).should.equal(3) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument") + response["DocumentIdentifiers"][1]["Name"].should.equal("TestDocument2") + response["DocumentIdentifiers"][2]["Name"].should.equal("TestDocument3") + response["NextToken"].should.equal("") + + response = client.list_documents(MaxResults=1) + len(response["DocumentIdentifiers"]).should.equal(1) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument") + response["DocumentIdentifiers"][0]["DocumentVersion"].should.equal("1") + response["NextToken"].should.equal("1") + + response = client.list_documents(MaxResults=1, NextToken=response["NextToken"]) + len(response["DocumentIdentifiers"]).should.equal(1) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument2") + response["DocumentIdentifiers"][0]["DocumentVersion"].should.equal("1") + response["NextToken"].should.equal("2") + + response = client.list_documents(MaxResults=1, NextToken=response["NextToken"]) + len(response["DocumentIdentifiers"]).should.equal(1) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument3") + response["DocumentIdentifiers"][0]["DocumentVersion"].should.equal("1") + response["NextToken"].should.equal("") + + # making sure no bad interactions with update + json_doc["description"] = "a new description" + client.update_document( + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="$LATEST", + DocumentFormat="JSON", + ) + + client.update_document( + Content=json.dumps(json_doc), + Name="TestDocument2", + DocumentVersion="$LATEST", + DocumentFormat="JSON", + ) + + client.update_document_default_version(Name="TestDocument", DocumentVersion="2") + + response = client.list_documents() + len(response["DocumentIdentifiers"]).should.equal(3) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument") + response["DocumentIdentifiers"][0]["DocumentVersion"].should.equal("2") + + response["DocumentIdentifiers"][1]["Name"].should.equal("TestDocument2") + response["DocumentIdentifiers"][1]["DocumentVersion"].should.equal("1") + + response["DocumentIdentifiers"][2]["Name"].should.equal("TestDocument3") + response["DocumentIdentifiers"][2]["DocumentVersion"].should.equal("1") + response["NextToken"].should.equal("") + + response = client.list_documents(Filters=[{"Key": "Owner", "Values": ["Self"]}]) + len(response["DocumentIdentifiers"]).should.equal(3) + + response = client.list_documents( + Filters=[{"Key": "TargetType", "Values": ["/AWS::EC2::Instance"]}] + ) + len(response["DocumentIdentifiers"]).should.equal(1) diff --git a/tests/test_ssm/test_templates/good.yaml b/tests/test_ssm/test_templates/good.yaml new file mode 100644 index 000000000..7f0372f3a --- /dev/null +++ b/tests/test_ssm/test_templates/good.yaml @@ -0,0 +1,47 @@ +schemaVersion: "2.2" +description: "Sample Yaml" +parameters: + Parameter1: + type: "Integer" + default: 3 + description: "Command Duration." + allowedValues: [1,2,3,4] + Parameter2: + type: "String" + default: "def" + description: + allowedValues: ["abc", "def", "ghi"] + allowedPattern: r"^[a-zA-Z0-9_\-.]{3,128}$" + Parameter3: + type: "Boolean" + default: false + description: "A boolean" + allowedValues: [True, False] + Parameter4: + type: "StringList" + default: ["abc", "def"] + description: "A string list" + Parameter5: + type: "StringMap" + default: + NotificationType: Command + NotificationEvents: + - Failed + NotificationArn: "$dependency.topicArn" + description: + Parameter6: + type: "MapList" + default: + - DeviceName: "/dev/sda1" + Ebs: + VolumeSize: '50' + - DeviceName: "/dev/sdm" + Ebs: + VolumeSize: '100' + description: +mainSteps: + - action: "aws:runShellScript" + name: "sampleCommand" + inputs: + runCommand: + - "echo hi" diff --git a/tests/test_stepfunctions/__init__.py b/tests/test_stepfunctions/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_stepfunctions/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 3e0a8115d..13a6809f5 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -1,14 +1,14 @@ from __future__ import unicode_literals import boto3 +import json import sure # noqa -import datetime from datetime import datetime from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest -from moto import mock_sts, mock_stepfunctions +from moto import mock_cloudformation, mock_sts, mock_stepfunctions from moto.core import ACCOUNT_ID region = "us-east-1" @@ -134,7 +134,7 @@ def test_state_machine_creation_fails_with_invalid_names(): # for invalid_name in invalid_names: - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError): client.create_state_machine( name=invalid_name, definition=str(simple_definition), @@ -147,7 +147,7 @@ def test_state_machine_creation_requires_valid_role_arn(): client = boto3.client("stepfunctions", region_name=region) name = "example_step_function" # - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError): client.create_state_machine( name=name, definition=str(simple_definition), @@ -155,6 +155,33 @@ def test_state_machine_creation_requires_valid_role_arn(): ) +@mock_stepfunctions +@mock_sts +def test_update_state_machine(): + client = boto3.client("stepfunctions", region_name=region) + + resp = client.create_state_machine( + name="test", definition=str(simple_definition), roleArn=_get_default_role() + ) + state_machine_arn = resp["stateMachineArn"] + + updated_role = _get_default_role() + "-updated" + updated_definition = str(simple_definition).replace( + "DefaultState", "DefaultStateUpdated" + ) + resp = client.update_state_machine( + stateMachineArn=state_machine_arn, + definition=updated_definition, + roleArn=updated_role, + ) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + resp["updateDate"].should.be.a(datetime) + + desc = client.describe_state_machine(stateMachineArn=state_machine_arn) + desc["definition"].should.equal(updated_definition) + desc["roleArn"].should.equal(updated_role) + + @mock_stepfunctions def test_state_machine_list_returns_empty_list_by_default(): client = boto3.client("stepfunctions", region_name=region) @@ -168,15 +195,15 @@ def test_state_machine_list_returns_empty_list_by_default(): def test_state_machine_list_returns_created_state_machines(): client = boto3.client("stepfunctions", region_name=region) # - machine2 = client.create_state_machine( - name="name2", definition=str(simple_definition), roleArn=_get_default_role() - ) machine1 = client.create_state_machine( name="name1", definition=str(simple_definition), roleArn=_get_default_role(), tags=[{"key": "tag_key", "value": "tag_value"}], ) + machine2 = client.create_state_machine( + name="name2", definition=str(simple_definition), roleArn=_get_default_role() + ) list = client.list_state_machines() # list["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @@ -195,6 +222,28 @@ def test_state_machine_list_returns_created_state_machines(): ) +@mock_stepfunctions +def test_state_machine_list_pagination(): + client = boto3.client("stepfunctions", region_name=region) + for i in range(25): + machine_name = "StateMachine-{}".format(i) + client.create_state_machine( + name=machine_name, + definition=str(simple_definition), + roleArn=_get_default_role(), + ) + + resp = client.list_state_machines() + resp.should_not.have.key("nextToken") + resp["stateMachines"].should.have.length_of(25) + + paginator = client.get_paginator("list_state_machines") + page_iterator = paginator.paginate(maxResults=5) + for page in page_iterator: + page["stateMachines"].should.have.length_of(5) + page["stateMachines"][-1]["name"].should.contain("24") + + @mock_stepfunctions @mock_sts def test_state_machine_creation_is_idempotent_by_name(): @@ -242,7 +291,7 @@ def test_state_machine_creation_can_be_described(): def test_state_machine_throws_error_when_describing_unknown_machine(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError): unknown_state_machine = ( "arn:aws:states:" + region @@ -253,12 +302,21 @@ def test_state_machine_throws_error_when_describing_unknown_machine(): client.describe_state_machine(stateMachineArn=unknown_state_machine) +@mock_stepfunctions +@mock_sts +def test_state_machine_throws_error_when_describing_bad_arn(): + client = boto3.client("stepfunctions", region_name=region) + # + with pytest.raises(ClientError): + client.describe_state_machine(stateMachineArn="bad") + + @mock_stepfunctions @mock_sts def test_state_machine_throws_error_when_describing_machine_in_different_account(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError): unknown_state_machine = ( "arn:aws:states:" + region + ":000000000000:stateMachine:unknown" ) @@ -295,6 +353,85 @@ def test_state_machine_can_deleted_nonexisting_machine(): sm_list["stateMachines"].should.have.length_of(0) +@mock_stepfunctions +def test_state_machine_tagging_non_existent_resource_fails(): + client = boto3.client("stepfunctions", region_name=region) + non_existent_arn = "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( + region=region, account=ACCOUNT_ID + ) + with pytest.raises(ClientError) as ex: + client.tag_resource(resourceArn=non_existent_arn, tags=[]) + ex.value.response["Error"]["Code"].should.equal("ResourceNotFound") + ex.value.response["Error"]["Message"].should.contain(non_existent_arn) + + +@mock_stepfunctions +def test_state_machine_untagging_non_existent_resource_fails(): + client = boto3.client("stepfunctions", region_name=region) + non_existent_arn = "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( + region=region, account=ACCOUNT_ID + ) + with pytest.raises(ClientError) as ex: + client.untag_resource(resourceArn=non_existent_arn, tagKeys=[]) + ex.value.response["Error"]["Code"].should.equal("ResourceNotFound") + ex.value.response["Error"]["Message"].should.contain(non_existent_arn) + + +@mock_stepfunctions +@mock_sts +def test_state_machine_tagging(): + client = boto3.client("stepfunctions", region_name=region) + tags = [ + {"key": "tag_key1", "value": "tag_value1"}, + {"key": "tag_key2", "value": "tag_value2"}, + ] + machine = client.create_state_machine( + name="test", definition=str(simple_definition), roleArn=_get_default_role(), + ) + client.tag_resource(resourceArn=machine["stateMachineArn"], tags=tags) + resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) + resp["tags"].should.equal(tags) + + tags_update = [ + {"key": "tag_key1", "value": "tag_value1_new"}, + {"key": "tag_key3", "value": "tag_value3"}, + ] + client.tag_resource(resourceArn=machine["stateMachineArn"], tags=tags_update) + resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) + tags_expected = [ + tags_update[0], + tags[1], + tags_update[1], + ] + resp["tags"].should.equal(tags_expected) + + +@mock_stepfunctions +@mock_sts +def test_state_machine_untagging(): + client = boto3.client("stepfunctions", region_name=region) + tags = [ + {"key": "tag_key1", "value": "tag_value1"}, + {"key": "tag_key2", "value": "tag_value2"}, + {"key": "tag_key3", "value": "tag_value3"}, + ] + machine = client.create_state_machine( + name="test", + definition=str(simple_definition), + roleArn=_get_default_role(), + tags=tags, + ) + resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) + resp["tags"].should.equal(tags) + tags_to_delete = ["tag_key1", "tag_key2"] + client.untag_resource( + resourceArn=machine["stateMachineArn"], tagKeys=tags_to_delete + ) + resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) + expected_tags = [tag for tag in tags if tag["key"] not in tags_to_delete] + resp["tags"].should.equal(expected_tags) + + @mock_stepfunctions @mock_sts def test_state_machine_list_tags_for_created_machine(): @@ -362,6 +499,15 @@ def test_state_machine_start_execution(): execution["startDate"].should.be.a(datetime) +@mock_stepfunctions +@mock_sts +def test_state_machine_start_execution_bad_arn_raises_exception(): + client = boto3.client("stepfunctions", region_name=region) + # + with pytest.raises(ClientError): + client.start_execution(stateMachineArn="bad") + + @mock_stepfunctions @mock_sts def test_state_machine_start_execution_with_custom_name(): @@ -386,6 +532,68 @@ def test_state_machine_start_execution_with_custom_name(): execution["startDate"].should.be.a(datetime) +@mock_stepfunctions +@mock_sts +def test_state_machine_start_execution_fails_on_duplicate_execution_name(): + client = boto3.client("stepfunctions", region_name=region) + # + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + execution_one = client.start_execution( + stateMachineArn=sm["stateMachineArn"], name="execution_name" + ) + # + with pytest.raises(ClientError) as ex: + _ = client.start_execution( + stateMachineArn=sm["stateMachineArn"], name="execution_name" + ) + ex.value.response["Error"]["Message"].should.equal( + "Execution Already Exists: '" + execution_one["executionArn"] + "'" + ) + + +@mock_stepfunctions +@mock_sts +def test_state_machine_start_execution_with_custom_input(): + client = boto3.client("stepfunctions", region_name=region) + # + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + execution_input = json.dumps({"input_key": "input_value"}) + execution = client.start_execution( + stateMachineArn=sm["stateMachineArn"], input=execution_input + ) + # + execution["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + uuid_regex = "[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + expected_exec_name = ( + "arn:aws:states:" + + region + + ":" + + _get_account_id() + + ":execution:name:" + + uuid_regex + ) + execution["executionArn"].should.match(expected_exec_name) + execution["startDate"].should.be.a(datetime) + + +@mock_stepfunctions +@mock_sts +def test_state_machine_start_execution_with_invalid_input(): + client = boto3.client("stepfunctions", region_name=region) + # + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + with pytest.raises(ClientError): + _ = client.start_execution(stateMachineArn=sm["stateMachineArn"], input="") + with pytest.raises(ClientError): + _ = client.start_execution(stateMachineArn=sm["stateMachineArn"], input="{") + + @mock_stepfunctions @mock_sts def test_state_machine_list_executions(): @@ -409,6 +617,69 @@ def test_state_machine_list_executions(): executions["executions"][0].shouldnt.have("stopDate") +@mock_stepfunctions +def test_state_machine_list_executions_with_filter(): + client = boto3.client("stepfunctions", region_name=region) + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + for i in range(20): + execution = client.start_execution(stateMachineArn=sm["stateMachineArn"]) + if not i % 4: + client.stop_execution(executionArn=execution["executionArn"]) + + resp = client.list_executions(stateMachineArn=sm["stateMachineArn"]) + resp["executions"].should.have.length_of(20) + + resp = client.list_executions( + stateMachineArn=sm["stateMachineArn"], statusFilter="ABORTED" + ) + resp["executions"].should.have.length_of(5) + all([e["status"] == "ABORTED" for e in resp["executions"]]).should.be.true + + +@mock_stepfunctions +def test_state_machine_list_executions_with_pagination(): + client = boto3.client("stepfunctions", region_name=region) + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + for _ in range(100): + client.start_execution(stateMachineArn=sm["stateMachineArn"]) + + resp = client.list_executions(stateMachineArn=sm["stateMachineArn"]) + resp.should_not.have.key("nextToken") + resp["executions"].should.have.length_of(100) + + paginator = client.get_paginator("list_executions") + page_iterator = paginator.paginate( + stateMachineArn=sm["stateMachineArn"], maxResults=25 + ) + for page in page_iterator: + page["executions"].should.have.length_of(25) + + with pytest.raises(ClientError) as ex: + resp = client.list_executions( + stateMachineArn=sm["stateMachineArn"], maxResults=10 + ) + client.list_executions( + stateMachineArn=sm["stateMachineArn"], + maxResults=10, + statusFilter="ABORTED", + nextToken=resp["nextToken"], + ) + ex.value.response["Error"]["Code"].should.equal("InvalidToken") + ex.value.response["Error"]["Message"].should.contain( + "Input inconsistent with page token" + ) + + with pytest.raises(ClientError) as ex: + client.list_executions( + stateMachineArn=sm["stateMachineArn"], nextToken="invalid" + ) + ex.value.response["Error"]["Code"].should.equal("InvalidToken") + + @mock_stepfunctions @mock_sts def test_state_machine_list_executions_when_none_exist(): @@ -425,7 +696,7 @@ def test_state_machine_list_executions_when_none_exist(): @mock_stepfunctions @mock_sts -def test_state_machine_describe_execution(): +def test_state_machine_describe_execution_with_no_input(): client = boto3.client("stepfunctions", region_name=region) # sm = client.create_state_machine( @@ -446,10 +717,34 @@ def test_state_machine_describe_execution(): @mock_stepfunctions @mock_sts -def test_state_machine_throws_error_when_describing_unknown_machine(): +def test_state_machine_describe_execution_with_custom_input(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + execution_input = json.dumps({"input_key": "input_val"}) + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + execution = client.start_execution( + stateMachineArn=sm["stateMachineArn"], input=execution_input + ) + description = client.describe_execution(executionArn=execution["executionArn"]) + # + description["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + description["executionArn"].should.equal(execution["executionArn"]) + description["input"].should.equal(execution_input) + description["name"].shouldnt.be.empty + description["startDate"].should.equal(execution["startDate"]) + description["stateMachineArn"].should.equal(sm["stateMachineArn"]) + description["status"].should.equal("RUNNING") + description.shouldnt.have("stopDate") + + +@mock_stepfunctions +@mock_sts +def test_execution_throws_error_when_describing_unknown_execution(): + client = boto3.client("stepfunctions", region_name=region) + # + with pytest.raises(ClientError): unknown_execution = ( "arn:aws:states:" + region + ":" + _get_account_id() + ":execution:unknown" ) @@ -480,7 +775,7 @@ def test_state_machine_can_be_described_by_execution(): def test_state_machine_throws_error_when_describing_unknown_execution(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError): unknown_execution = ( "arn:aws:states:" + region + ":" + _get_account_id() + ":execution:unknown" ) @@ -516,10 +811,202 @@ def test_state_machine_describe_execution_after_stoppage(): description = client.describe_execution(executionArn=execution["executionArn"]) # description["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - description["status"].should.equal("SUCCEEDED") + description["status"].should.equal("ABORTED") description["stopDate"].should.be.a(datetime) +@mock_stepfunctions +@mock_cloudformation +def test_state_machine_cloudformation(): + sf = boto3.client("stepfunctions", region_name="us-east-1") + cf = boto3.resource("cloudformation", region_name="us-east-1") + definition = '{"StartAt": "HelloWorld", "States": {"HelloWorld": {"Type": "Task", "Resource": "arn:aws:lambda:us-east-1:111122223333;:function:HelloFunction", "End": true}}}' + role_arn = ( + "arn:aws:iam::111122223333:role/service-role/StatesExecutionRole-us-east-1;" + ) + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "An example template for a Step Functions state machine.", + "Resources": { + "MyStateMachine": { + "Type": "AWS::StepFunctions::StateMachine", + "Properties": { + "StateMachineName": "HelloWorld-StateMachine", + "StateMachineType": "STANDARD", + "DefinitionString": definition, + "RoleArn": role_arn, + "Tags": [ + {"Key": "key1", "Value": "value1"}, + {"Key": "key2", "Value": "value2"}, + ], + }, + } + }, + "Outputs": { + "StateMachineArn": {"Value": {"Ref": "MyStateMachine"}}, + "StateMachineName": {"Value": {"Fn::GetAtt": ["MyStateMachine", "Name"]}}, + }, + } + cf.create_stack(StackName="test_stack", TemplateBody=json.dumps(template)) + outputs_list = cf.Stack("test_stack").outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + state_machine = sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + state_machine["stateMachineArn"].should.equal(output["StateMachineArn"]) + state_machine["name"].should.equal(output["StateMachineName"]) + state_machine["roleArn"].should.equal(role_arn) + state_machine["definition"].should.equal(definition) + tags = sf.list_tags_for_resource(resourceArn=output["StateMachineArn"]).get("tags") + for i, tag in enumerate(tags, 1): + tag["key"].should.equal("key{}".format(i)) + tag["value"].should.equal("value{}".format(i)) + + cf.Stack("test_stack").delete() + with pytest.raises(ClientError) as ex: + sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + ex.value.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") + ex.value.response["Error"]["Message"].should.contain("Does Not Exist") + + +@mock_stepfunctions +@mock_cloudformation +def test_state_machine_cloudformation_update_with_replacement(): + sf = boto3.client("stepfunctions", region_name="us-east-1") + cf = boto3.resource("cloudformation", region_name="us-east-1") + definition = '{"StartAt": "HelloWorld", "States": {"HelloWorld": {"Type": "Task", "Resource": "arn:aws:lambda:us-east-1:111122223333;:function:HelloFunction", "End": true}}}' + role_arn = ( + "arn:aws:iam::111122223333:role/service-role/StatesExecutionRole-us-east-1" + ) + properties = { + "StateMachineName": "HelloWorld-StateMachine", + "DefinitionString": definition, + "RoleArn": role_arn, + "Tags": [ + {"Key": "key1", "Value": "value1"}, + {"Key": "key2", "Value": "value2"}, + ], + } + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "An example template for a Step Functions state machine.", + "Resources": { + "MyStateMachine": { + "Type": "AWS::StepFunctions::StateMachine", + "Properties": {}, + } + }, + "Outputs": { + "StateMachineArn": {"Value": {"Ref": "MyStateMachine"}}, + "StateMachineName": {"Value": {"Fn::GetAtt": ["MyStateMachine", "Name"]}}, + }, + } + template["Resources"]["MyStateMachine"]["Properties"] = properties + cf.create_stack(StackName="test_stack", TemplateBody=json.dumps(template)) + outputs_list = cf.Stack("test_stack").outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + state_machine = sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + original_machine_arn = state_machine["stateMachineArn"] + original_creation_date = state_machine["creationDate"] + + # Update State Machine, with replacement. + updated_role = role_arn + "-updated" + updated_definition = definition.replace("HelloWorld", "HelloWorld2") + updated_properties = { + "StateMachineName": "New-StateMachine-Name", + "DefinitionString": updated_definition, + "RoleArn": updated_role, + "Tags": [ + {"Key": "key3", "Value": "value3"}, + {"Key": "key1", "Value": "updated_value"}, + ], + } + template["Resources"]["MyStateMachine"]["Properties"] = updated_properties + cf.Stack("test_stack").update(TemplateBody=json.dumps(template)) + outputs_list = cf.Stack("test_stack").outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + state_machine = sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + state_machine["stateMachineArn"].should_not.equal(original_machine_arn) + state_machine["name"].should.equal("New-StateMachine-Name") + state_machine["creationDate"].should.be.greater_than(original_creation_date) + state_machine["roleArn"].should.equal(updated_role) + state_machine["definition"].should.equal(updated_definition) + tags = sf.list_tags_for_resource(resourceArn=output["StateMachineArn"]).get("tags") + tags.should.have.length_of(3) + for tag in tags: + if tag["key"] == "key1": + tag["value"].should.equal("updated_value") + + with pytest.raises(ClientError) as ex: + sf.describe_state_machine(stateMachineArn=original_machine_arn) + ex.value.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") + ex.value.response["Error"]["Message"].should.contain("State Machine Does Not Exist") + + +@mock_stepfunctions +@mock_cloudformation +def test_state_machine_cloudformation_update_with_no_interruption(): + sf = boto3.client("stepfunctions", region_name="us-east-1") + cf = boto3.resource("cloudformation", region_name="us-east-1") + definition = '{"StartAt": "HelloWorld", "States": {"HelloWorld": {"Type": "Task", "Resource": "arn:aws:lambda:us-east-1:111122223333;:function:HelloFunction", "End": true}}}' + role_arn = ( + "arn:aws:iam::111122223333:role/service-role/StatesExecutionRole-us-east-1" + ) + properties = { + "StateMachineName": "HelloWorld-StateMachine", + "DefinitionString": definition, + "RoleArn": role_arn, + "Tags": [ + {"Key": "key1", "Value": "value1"}, + {"Key": "key2", "Value": "value2"}, + ], + } + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "An example template for a Step Functions state machine.", + "Resources": { + "MyStateMachine": { + "Type": "AWS::StepFunctions::StateMachine", + "Properties": {}, + } + }, + "Outputs": { + "StateMachineArn": {"Value": {"Ref": "MyStateMachine"}}, + "StateMachineName": {"Value": {"Fn::GetAtt": ["MyStateMachine", "Name"]}}, + }, + } + template["Resources"]["MyStateMachine"]["Properties"] = properties + cf.create_stack(StackName="test_stack", TemplateBody=json.dumps(template)) + outputs_list = cf.Stack("test_stack").outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + state_machine = sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + machine_arn = state_machine["stateMachineArn"] + creation_date = state_machine["creationDate"] + + # Update State Machine in-place, no replacement. + updated_role = role_arn + "-updated" + updated_definition = definition.replace("HelloWorld", "HelloWorldUpdated") + updated_properties = { + "DefinitionString": updated_definition, + "RoleArn": updated_role, + "Tags": [ + {"Key": "key3", "Value": "value3"}, + {"Key": "key1", "Value": "updated_value"}, + ], + } + template["Resources"]["MyStateMachine"]["Properties"] = updated_properties + cf.Stack("test_stack").update(TemplateBody=json.dumps(template)) + + state_machine = sf.describe_state_machine(stateMachineArn=machine_arn) + state_machine["name"].should.equal("HelloWorld-StateMachine") + state_machine["creationDate"].should.equal(creation_date) + state_machine["roleArn"].should.equal(updated_role) + state_machine["definition"].should.equal(updated_definition) + tags = sf.list_tags_for_resource(resourceArn=machine_arn).get("tags") + tags.should.have.length_of(3) + for tag in tags: + if tag["key"] == "key1": + tag["value"].should.equal("updated_value") + + def _get_account_id(): global account_id if account_id: diff --git a/tests/test_sts/__init__.py b/tests/test_sts/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_sts/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 4dee9184f..098da5881 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -1,11 +1,12 @@ from __future__ import unicode_literals +from base64 import b64encode import json import boto import boto3 from botocore.client import ClientError from freezegun import freeze_time -from nose.tools import assert_raises +import pytest import sure # noqa @@ -103,6 +104,128 @@ def test_assume_role(): ) +@freeze_time("2012-01-01 12:00:00") +@mock_sts +def test_assume_role_with_saml(): + client = boto3.client("sts", region_name="us-east-1") + + session_name = "session-name" + policy = json.dumps( + { + "Statement": [ + { + "Sid": "Stmt13690092345534", + "Action": ["S3:ListBucket"], + "Effect": "Allow", + "Resource": ["arn:aws:s3:::foobar-tester"], + } + ] + } + ) + role_name = "test-role" + provider_name = "TestProvFed" + user_name = "testuser" + role_input = "arn:aws:iam::{account_id}:role/{role_name}".format( + account_id=ACCOUNT_ID, role_name=role_name + ) + principal_role = "arn:aws:iam:{account_id}:saml-provider/{provider_name}".format( + account_id=ACCOUNT_ID, provider_name=provider_name + ) + saml_assertion = """ + + + http://localhost/ + + + + + http://localhost:3000/ + + + + + + + + + + + NTIyMzk0ZGI4MjI0ZjI5ZGNhYjkyOGQyZGQ1NTZjODViZjk5YTY4ODFjOWRjNjkyYzZmODY2ZDQ4NjlkZjY3YSAgLQo= + + + NTIyMzk0ZGI4MjI0ZjI5ZGNhYjkyOGQyZGQ1NTZjODViZjk5YTY4ODFjOWRjNjkyYzZmODY2ZDQ4NjlkZjY3YSAgLQo= + + + NTIyMzk0ZGI4MjI0ZjI5ZGNhYjkyOGQyZGQ1NTZjODViZjk5YTY4ODFjOWRjNjkyYzZmODY2ZDQ4NjlkZjY3YSAgLQo= + + + + + {username} + + + + + + + urn:amazon:webservices + + + + + {username}@localhost + + + arn:aws:iam::{account_id}:saml-provider/{provider_name},arn:aws:iam::{account_id}:role/{role_name} + + + 900 + + + + + urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport + + + +""".format( + account_id=ACCOUNT_ID, + role_name=role_name, + provider_name=provider_name, + username=user_name, + ).replace( + "\n", "" + ) + + assume_role_response = client.assume_role_with_saml( + RoleArn=role_input, + PrincipalArn=principal_role, + SAMLAssertion=b64encode(saml_assertion.encode("utf-8")).decode("utf-8"), + ) + + credentials = assume_role_response["Credentials"] + if not settings.TEST_SERVER_MODE: + credentials["Expiration"].isoformat().should.equal("2012-01-01T12:15:00+00:00") + credentials["SessionToken"].should.have.length_of(356) + assert credentials["SessionToken"].startswith("FQoGZXIvYXdzE") + credentials["AccessKeyId"].should.have.length_of(20) + assert credentials["AccessKeyId"].startswith("ASIA") + credentials["SecretAccessKey"].should.have.length_of(40) + + assume_role_response["AssumedRoleUser"]["Arn"].should.equal( + "arn:aws:sts::{account_id}:assumed-role/{role_name}/{fed_name}@localhost".format( + account_id=ACCOUNT_ID, role_name=role_name, fed_name=user_name + ) + ) + assert assume_role_response["AssumedRoleUser"]["AssumedRoleId"].startswith("AROA") + assert assume_role_response["AssumedRoleUser"]["AssumedRoleId"].endswith( + ":{fed_name}@localhost".format(fed_name=user_name) + ) + assume_role_response["AssumedRoleUser"]["AssumedRoleId"].should.have.length_of( + 21 + 1 + len("{fed_name}@localhost".format(fed_name=user_name)) + ) + + @freeze_time("2012-01-01 12:00:00") @mock_sts_deprecated def test_assume_role_with_web_identity(): @@ -234,9 +357,9 @@ def test_federation_token_with_too_long_policy(): json_policy = json.dumps(policy) assert len(json_policy) > MAX_FEDERATION_TOKEN_POLICY_LENGTH - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as ex: cli.get_federation_token(Name="foo", DurationSeconds=3600, Policy=json_policy) - exc.exception.response["Error"]["Code"].should.equal("ValidationError") - exc.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.contain( str(MAX_FEDERATION_TOKEN_POLICY_LENGTH) ) diff --git a/tests/test_swf/models/test_decision_task.py b/tests/test_swf/models/test_decision_task.py index 0661adffb..8ddb230e2 100644 --- a/tests/test_swf/models/test_decision_task.py +++ b/tests/test_swf/models/test_decision_task.py @@ -24,15 +24,16 @@ def test_decision_task_full_dict_representation(): fd = dt.to_full_dict() fd["events"].should.be.a("list") - fd["previousStartedEventId"].should.equal(0) + fd.should_not.contain("previousStartedEventId") fd.should_not.contain("startedEventId") fd.should.contain("taskToken") fd["workflowExecution"].should.equal(wfe.to_short_dict()) fd["workflowType"].should.equal(wft.to_short_dict()) - dt.start(1234) + dt.start(1234, 1230) fd = dt.to_full_dict() fd["startedEventId"].should.equal(1234) + fd["previousStartedEventId"].should.equal(1230) def test_decision_task_first_timeout(): diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index 389e516df..9e7579ddd 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -1,107 +1,104 @@ -from collections import namedtuple -import sure # noqa - -from moto.swf.exceptions import SWFUnknownResourceFault -from moto.swf.models import Domain - -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa - -# Fake WorkflowExecution for tests purposes -WorkflowExecution = namedtuple( - "WorkflowExecution", ["workflow_id", "run_id", "execution_status", "open"] -) - - -def test_domain_short_dict_representation(): - domain = Domain("foo", "52") - domain.to_short_dict().should.equal({"name": "foo", "status": "REGISTERED"}) - - domain.description = "foo bar" - domain.to_short_dict()["description"].should.equal("foo bar") - - -def test_domain_full_dict_representation(): - domain = Domain("foo", "52") - - domain.to_full_dict()["domainInfo"].should.equal(domain.to_short_dict()) - _config = domain.to_full_dict()["configuration"] - _config["workflowExecutionRetentionPeriodInDays"].should.equal("52") - - -def test_domain_string_representation(): - domain = Domain("my-domain", "60") - str(domain).should.equal("Domain(name: my-domain, status: REGISTERED)") - - -def test_domain_add_to_activity_task_list(): - domain = Domain("my-domain", "60") - domain.add_to_activity_task_list("foo", "bar") - domain.activity_task_lists.should.equal({"foo": ["bar"]}) - - -def test_domain_activity_tasks(): - domain = Domain("my-domain", "60") - domain.add_to_activity_task_list("foo", "bar") - domain.add_to_activity_task_list("other", "baz") - sorted(domain.activity_tasks).should.equal(["bar", "baz"]) - - -def test_domain_add_to_decision_task_list(): - domain = Domain("my-domain", "60") - domain.add_to_decision_task_list("foo", "bar") - domain.decision_task_lists.should.equal({"foo": ["bar"]}) - - -def test_domain_decision_tasks(): - domain = Domain("my-domain", "60") - domain.add_to_decision_task_list("foo", "bar") - domain.add_to_decision_task_list("other", "baz") - sorted(domain.decision_tasks).should.equal(["bar", "baz"]) - - -def test_domain_get_workflow_execution(): - domain = Domain("my-domain", "60") - - wfe1 = WorkflowExecution( - workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True - ) - wfe2 = WorkflowExecution( - workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False - ) - wfe3 = WorkflowExecution( - workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True - ) - wfe4 = WorkflowExecution( - workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False - ) - domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4] - - # get workflow execution through workflow_id and run_id - domain.get_workflow_execution("wf-id-1", run_id="run-id-1").should.equal(wfe1) - domain.get_workflow_execution("wf-id-1", run_id="run-id-2").should.equal(wfe2) - domain.get_workflow_execution("wf-id-3", run_id="run-id-4").should.equal(wfe4) - - domain.get_workflow_execution.when.called_with( - "wf-id-1", run_id="non-existent" - ).should.throw(SWFUnknownResourceFault) - - # get OPEN workflow execution by default if no run_id - domain.get_workflow_execution("wf-id-1").should.equal(wfe1) - domain.get_workflow_execution.when.called_with("wf-id-3").should.throw( - SWFUnknownResourceFault - ) - domain.get_workflow_execution.when.called_with("wf-id-non-existent").should.throw( - SWFUnknownResourceFault - ) - - # raise_if_closed attribute - domain.get_workflow_execution( - "wf-id-1", run_id="run-id-1", raise_if_closed=True - ).should.equal(wfe1) - domain.get_workflow_execution.when.called_with( - "wf-id-3", run_id="run-id-4", raise_if_closed=True - ).should.throw(SWFUnknownResourceFault) - - # raise_if_none attribute - domain.get_workflow_execution("foo", raise_if_none=False).should.be.none +from collections import namedtuple +import sure # noqa + +from moto.swf.exceptions import SWFUnknownResourceFault +from moto.swf.models import Domain + +# Fake WorkflowExecution for tests purposes +WorkflowExecution = namedtuple( + "WorkflowExecution", ["workflow_id", "run_id", "execution_status", "open"] +) + + +def test_domain_short_dict_representation(): + domain = Domain("foo", "52") + domain.to_short_dict().should.equal({"name": "foo", "status": "REGISTERED"}) + + domain.description = "foo bar" + domain.to_short_dict()["description"].should.equal("foo bar") + + +def test_domain_full_dict_representation(): + domain = Domain("foo", "52") + + domain.to_full_dict()["domainInfo"].should.equal(domain.to_short_dict()) + _config = domain.to_full_dict()["configuration"] + _config["workflowExecutionRetentionPeriodInDays"].should.equal("52") + + +def test_domain_string_representation(): + domain = Domain("my-domain", "60") + str(domain).should.equal("Domain(name: my-domain, status: REGISTERED)") + + +def test_domain_add_to_activity_task_list(): + domain = Domain("my-domain", "60") + domain.add_to_activity_task_list("foo", "bar") + domain.activity_task_lists.should.equal({"foo": ["bar"]}) + + +def test_domain_activity_tasks(): + domain = Domain("my-domain", "60") + domain.add_to_activity_task_list("foo", "bar") + domain.add_to_activity_task_list("other", "baz") + sorted(domain.activity_tasks).should.equal(["bar", "baz"]) + + +def test_domain_add_to_decision_task_list(): + domain = Domain("my-domain", "60") + domain.add_to_decision_task_list("foo", "bar") + domain.decision_task_lists.should.equal({"foo": ["bar"]}) + + +def test_domain_decision_tasks(): + domain = Domain("my-domain", "60") + domain.add_to_decision_task_list("foo", "bar") + domain.add_to_decision_task_list("other", "baz") + sorted(domain.decision_tasks).should.equal(["bar", "baz"]) + + +def test_domain_get_workflow_execution(): + domain = Domain("my-domain", "60") + + wfe1 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True + ) + wfe2 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False + ) + wfe3 = WorkflowExecution( + workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True + ) + wfe4 = WorkflowExecution( + workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False + ) + domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4] + + # get workflow execution through workflow_id and run_id + domain.get_workflow_execution("wf-id-1", run_id="run-id-1").should.equal(wfe1) + domain.get_workflow_execution("wf-id-1", run_id="run-id-2").should.equal(wfe2) + domain.get_workflow_execution("wf-id-3", run_id="run-id-4").should.equal(wfe4) + + domain.get_workflow_execution.when.called_with( + "wf-id-1", run_id="non-existent" + ).should.throw(SWFUnknownResourceFault) + + # get OPEN workflow execution by default if no run_id + domain.get_workflow_execution("wf-id-1").should.equal(wfe1) + domain.get_workflow_execution.when.called_with("wf-id-3").should.throw( + SWFUnknownResourceFault + ) + domain.get_workflow_execution.when.called_with("wf-id-non-existent").should.throw( + SWFUnknownResourceFault + ) + + # raise_if_closed attribute + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1", raise_if_closed=True + ).should.equal(wfe1) + domain.get_workflow_execution.when.called_with( + "wf-id-3", run_id="run-id-4", raise_if_closed=True + ).should.throw(SWFUnknownResourceFault) + + # raise_if_none attribute + domain.get_workflow_execution("foo", raise_if_none=False).should.be.none diff --git a/tests/test_swf/models/test_timeout.py b/tests/test_swf/models/test_timeout.py index fb52652fd..0ee059065 100644 --- a/tests/test_swf/models/test_timeout.py +++ b/tests/test_swf/models/test_timeout.py @@ -1,19 +1,19 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.models import Timeout - -from ..utils import make_workflow_execution - - -def test_timeout_creation(): - wfe = make_workflow_execution() - - # epoch 1420113600 == "2015-01-01 13:00:00" - timeout = Timeout(wfe, 1420117200, "START_TO_CLOSE") - - with freeze_time("2015-01-01 12:00:00"): - timeout.reached.should.be.falsy - - with freeze_time("2015-01-01 13:00:00"): - timeout.reached.should.be.truthy +from freezegun import freeze_time +import sure # noqa + +from moto.swf.models import Timeout + +from ..utils import make_workflow_execution + + +def test_timeout_creation(): + wfe = make_workflow_execution() + + # epoch 1420113600 == "2015-01-01 13:00:00" + timeout = Timeout(wfe, 1420117200, "START_TO_CLOSE") + + with freeze_time("2015-01-01 12:00:00"): + timeout.reached.should.be.falsy + + with freeze_time("2015-01-01 13:00:00"): + timeout.reached.should.be.truthy diff --git a/tests/test_swf/models/test_workflow_execution.py b/tests/test_swf/models/test_workflow_execution.py index 6c73a9686..503198f46 100644 --- a/tests/test_swf/models/test_workflow_execution.py +++ b/tests/test_swf/models/test_workflow_execution.py @@ -148,6 +148,39 @@ def test_workflow_execution_full_dict_representation(): ) +def test_closed_workflow_execution_full_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", + "v1.0", + task_list="queue", + default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + wfe.execution_status = "CLOSED" + wfe.close_status = "CANCELED" + wfe.close_timestamp = 1420066801.123 + + fd = wfe.to_full_dict() + medium_dict = wfe.to_medium_dict() + medium_dict["closeStatus"] = "CANCELED" + medium_dict["closeTimestamp"] = 1420066801.123 + fd["executionInfo"].should.equal(medium_dict) + fd["openCounts"]["openTimers"].should.equal(0) + fd["openCounts"]["openDecisionTasks"].should.equal(0) + fd["openCounts"]["openActivityTasks"].should.equal(0) + fd["executionConfiguration"].should.equal( + { + "childPolicy": "ABANDON", + "executionStartToCloseTimeout": "300", + "taskList": {"name": "queue"}, + "taskStartToCloseTimeout": "300", + } + ) + + def test_workflow_execution_list_dict_representation(): domain = get_basic_domain() wf_type = WorkflowType( diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index 0b72b7ca7..4fa965b11 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -35,14 +35,14 @@ def test_poll_for_activity_task_when_one(): def test_poll_for_activity_task_when_none(): conn = setup_workflow() resp = conn.poll_for_activity_task("test-domain", "activity-task-list") - resp.should.equal({"startedEventId": 0}) + resp.should.equal({"startedEventId": 0, "taskToken": ""}) @mock_swf_deprecated def test_poll_for_activity_task_on_non_existent_queue(): conn = setup_workflow() resp = conn.poll_for_activity_task("test-domain", "non-existent-queue") - resp.should.equal({"startedEventId": 0}) + resp.should.equal({"startedEventId": 0, "taskToken": ""}) # CountPendingActivityTasks endpoint diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index 3fa9ad6b1..d49e5d4cb 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -1,8 +1,11 @@ import boto from boto.swf.exceptions import SWFResponseError +import boto3 +from botocore.exceptions import ClientError import sure # noqa from moto import mock_swf_deprecated +from moto import mock_swf # RegisterActivityType endpoint @@ -110,6 +113,77 @@ def test_deprecate_non_existent_activity_type(): ).should.throw(SWFResponseError) +# DeprecateActivityType endpoint +@mock_swf +def test_undeprecate_activity_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_activity_type( + domain="test-domain", name="test-activity", version="v1.0" + ) + client.deprecate_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + client.undeprecate_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + + resp = client.describe_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + resp["typeInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf +def test_undeprecate_already_undeprecated_activity_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_activity_type( + domain="test-domain", name="test-activity", version="v1.0" + ) + client.deprecate_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + client.undeprecate_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + + client.undeprecate_activity_type.when.called_with( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ).should.throw(ClientError) + + +@mock_swf +def test_undeprecate_never_deprecated_activity_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_activity_type( + domain="test-domain", name="test-activity", version="v1.0" + ) + + client.undeprecate_activity_type.when.called_with( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ).should.throw(ClientError) + + +@mock_swf +def test_undeprecate_non_existent_activity_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + + client.undeprecate_activity_type.when.called_with( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ).should.throw(ClientError) + + # DescribeActivityType endpoint @mock_swf_deprecated def test_describe_activity_type(): diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 6389536e6..3c55b58c8 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -30,6 +30,30 @@ def test_poll_for_decision_task_when_one(): ) +@mock_swf_deprecated +def test_poll_for_decision_task_previous_started_event_id(): + conn = setup_workflow() + + resp = conn.poll_for_decision_task("test-domain", "queue") + assert resp["workflowExecution"]["runId"] == conn.run_id + assert "previousStartedEventId" not in resp + + # Require a failing decision, in this case a non-existant activity type + attrs = { + "activityId": "spam", + "activityType": {"name": "test-activity", "version": "v1.42"}, + "taskList": "eggs", + } + decision = { + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": attrs, + } + conn.respond_decision_task_completed(resp["taskToken"], decisions=[decision]) + resp = conn.poll_for_decision_task("test-domain", "queue") + assert resp["workflowExecution"]["runId"] == conn.run_id + assert resp["previousStartedEventId"] == 3 + + @mock_swf_deprecated def test_poll_for_decision_task_when_none(): conn = setup_workflow() @@ -38,14 +62,18 @@ def test_poll_for_decision_task_when_none(): resp = conn.poll_for_decision_task("test-domain", "queue") # this is the DecisionTask representation you get from the real SWF # after waiting 60s when there's no decision to be taken - resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) + resp.should.equal( + {"previousStartedEventId": 0, "startedEventId": 0, "taskToken": ""} + ) @mock_swf_deprecated def test_poll_for_decision_task_on_non_existent_queue(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "non-existent-queue") - resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) + resp.should.equal( + {"previousStartedEventId": 0, "startedEventId": 0, "taskToken": ""} + ) @mock_swf_deprecated diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 638bd410e..59ba551a6 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -1,114 +1,167 @@ -import boto -from boto.swf.exceptions import SWFResponseError -import sure # noqa - -from moto import mock_swf_deprecated - - -# RegisterDomain endpoint -@mock_swf_deprecated -def test_register_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - all_domains = conn.list_domains("REGISTERED") - domain = all_domains["domainInfos"][0] - - domain["name"].should.equal("test-domain") - domain["status"].should.equal("REGISTERED") - domain["description"].should.equal("A test domain") - - -@mock_swf_deprecated -def test_register_already_existing_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - conn.register_domain.when.called_with( - "test-domain", "60", description="A test domain" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_register_with_wrong_parameter_type(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.register_domain.when.called_with( - "test-domain", 60, description="A test domain" - ).should.throw(SWFResponseError) - - -# ListDomains endpoint -@mock_swf_deprecated -def test_list_domains_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("b-test-domain", "60") - conn.register_domain("a-test-domain", "60") - conn.register_domain("c-test-domain", "60") - - all_domains = conn.list_domains("REGISTERED") - names = [domain["name"] for domain in all_domains["domainInfos"]] - names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"]) - - -@mock_swf_deprecated -def test_list_domains_reverse_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("b-test-domain", "60") - conn.register_domain("a-test-domain", "60") - conn.register_domain("c-test-domain", "60") - - all_domains = conn.list_domains("REGISTERED", reverse_order=True) - names = [domain["name"] for domain in all_domains["domainInfos"]] - names.should.equal(["c-test-domain", "b-test-domain", "a-test-domain"]) - - -# DeprecateDomain endpoint -@mock_swf_deprecated -def test_deprecate_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn.deprecate_domain("test-domain") - - all_domains = conn.list_domains("DEPRECATED") - domain = all_domains["domainInfos"][0] - - domain["name"].should.equal("test-domain") - - -@mock_swf_deprecated -def test_deprecate_already_deprecated_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn.deprecate_domain("test-domain") - - conn.deprecate_domain.when.called_with("test-domain").should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_deprecate_non_existent_domain(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.deprecate_domain.when.called_with("non-existent").should.throw( - SWFResponseError - ) - - -# DescribeDomain endpoint -@mock_swf_deprecated -def test_describe_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - domain = conn.describe_domain("test-domain") - domain["configuration"]["workflowExecutionRetentionPeriodInDays"].should.equal("60") - domain["domainInfo"]["description"].should.equal("A test domain") - domain["domainInfo"]["name"].should.equal("test-domain") - domain["domainInfo"]["status"].should.equal("REGISTERED") - - -@mock_swf_deprecated -def test_describe_non_existent_domain(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.describe_domain.when.called_with("non-existent").should.throw(SWFResponseError) +import boto +from boto.swf.exceptions import SWFResponseError +import boto3 +from botocore.exceptions import ClientError +import sure # noqa + +from moto import mock_swf_deprecated +from moto import mock_swf + + +# RegisterDomain endpoint +@mock_swf_deprecated +def test_register_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + all_domains = conn.list_domains("REGISTERED") + domain = all_domains["domainInfos"][0] + + domain["name"].should.equal("test-domain") + domain["status"].should.equal("REGISTERED") + domain["description"].should.equal("A test domain") + + +@mock_swf_deprecated +def test_register_already_existing_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + conn.register_domain.when.called_with( + "test-domain", "60", description="A test domain" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_register_with_wrong_parameter_type(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.register_domain.when.called_with( + "test-domain", 60, description="A test domain" + ).should.throw(SWFResponseError) + + +# ListDomains endpoint +@mock_swf_deprecated +def test_list_domains_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("b-test-domain", "60") + conn.register_domain("a-test-domain", "60") + conn.register_domain("c-test-domain", "60") + + all_domains = conn.list_domains("REGISTERED") + names = [domain["name"] for domain in all_domains["domainInfos"]] + names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"]) + + +@mock_swf_deprecated +def test_list_domains_reverse_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("b-test-domain", "60") + conn.register_domain("a-test-domain", "60") + conn.register_domain("c-test-domain", "60") + + all_domains = conn.list_domains("REGISTERED", reverse_order=True) + names = [domain["name"] for domain in all_domains["domainInfos"]] + names.should.equal(["c-test-domain", "b-test-domain", "a-test-domain"]) + + +# DeprecateDomain endpoint +@mock_swf_deprecated +def test_deprecate_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn.deprecate_domain("test-domain") + + all_domains = conn.list_domains("DEPRECATED") + domain = all_domains["domainInfos"][0] + + domain["name"].should.equal("test-domain") + + +@mock_swf_deprecated +def test_deprecate_already_deprecated_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn.deprecate_domain("test-domain") + + conn.deprecate_domain.when.called_with("test-domain").should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_deprecate_non_existent_domain(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.deprecate_domain.when.called_with("non-existent").should.throw( + SWFResponseError + ) + + +# UndeprecateDomain endpoint +@mock_swf +def test_undeprecate_domain(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.deprecate_domain(name="test-domain") + client.undeprecate_domain(name="test-domain") + + resp = client.describe_domain(name="test-domain") + + resp["domainInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf +def test_undeprecate_already_undeprecated_domain(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.deprecate_domain(name="test-domain") + client.undeprecate_domain(name="test-domain") + + client.undeprecate_domain.when.called_with(name="test-domain").should.throw( + ClientError + ) + + +@mock_swf +def test_undeprecate_never_deprecated_domain(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + + client.undeprecate_domain.when.called_with(name="test-domain").should.throw( + ClientError + ) + + +@mock_swf +def test_undeprecate_non_existent_domain(): + client = boto3.client("swf", region_name="us-east-1") + + client.undeprecate_domain.when.called_with(name="non-existent").should.throw( + ClientError + ) + + +# DescribeDomain endpoint +@mock_swf_deprecated +def test_describe_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + domain = conn.describe_domain("test-domain") + domain["configuration"]["workflowExecutionRetentionPeriodInDays"].should.equal("60") + domain["domainInfo"]["description"].should.equal("A test domain") + domain["domainInfo"]["name"].should.equal("test-domain") + domain["domainInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf_deprecated +def test_describe_non_existent_domain(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.describe_domain.when.called_with("non-existent").should.throw(SWFResponseError) diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index bec352ce8..2832abf75 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -4,9 +4,6 @@ from datetime import datetime, timedelta import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa - from moto import mock_swf_deprecated from moto.core.utils import unix_time diff --git a/tests/test_swf/responses/test_workflow_types.py b/tests/test_swf/responses/test_workflow_types.py index 4c92d7762..e1990596b 100644 --- a/tests/test_swf/responses/test_workflow_types.py +++ b/tests/test_swf/responses/test_workflow_types.py @@ -1,8 +1,11 @@ import sure import boto +import boto3 from moto import mock_swf_deprecated +from moto import mock_swf from boto.swf.exceptions import SWFResponseError +from botocore.exceptions import ClientError # RegisterWorkflowType endpoint @@ -110,6 +113,77 @@ def test_deprecate_non_existent_workflow_type(): ).should.throw(SWFResponseError) +# UndeprecateWorkflowType endpoint +@mock_swf +def test_undeprecate_workflow_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_workflow_type( + domain="test-domain", name="test-workflow", version="v1.0" + ) + client.deprecate_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + client.undeprecate_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + + resp = client.describe_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + resp["typeInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf +def test_undeprecate_already_undeprecated_workflow_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_workflow_type( + domain="test-domain", name="test-workflow", version="v1.0" + ) + client.deprecate_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + client.undeprecate_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + + client.undeprecate_workflow_type.when.called_with( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ).should.throw(ClientError) + + +@mock_swf +def test_undeprecate_never_deprecated_workflow_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_workflow_type( + domain="test-domain", name="test-workflow", version="v1.0" + ) + + client.undeprecate_workflow_type.when.called_with( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ).should.throw(ClientError) + + +@mock_swf +def test_undeprecate_non_existent_workflow_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + + client.undeprecate_workflow_type.when.called_with( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ).should.throw(ClientError) + + # DescribeWorkflowType endpoint @mock_swf_deprecated def test_describe_workflow_type(): @@ -133,6 +207,41 @@ def test_describe_workflow_type(): infos["status"].should.equal("REGISTERED") +@mock_swf +def test_describe_workflow_type_full_boto3(): + # boto3 required as boto doesn't support all of the arguments + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="2" + ) + client.register_workflow_type( + domain="test-domain", + name="test-workflow", + version="v1.0", + description="Test workflow.", + defaultTaskStartToCloseTimeout="20", + defaultExecutionStartToCloseTimeout="60", + defaultTaskList={"name": "foo"}, + defaultTaskPriority="-2", + defaultChildPolicy="ABANDON", + defaultLambdaRole="arn:bar", + ) + + resp = client.describe_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + resp["typeInfo"]["workflowType"]["name"].should.equal("test-workflow") + resp["typeInfo"]["workflowType"]["version"].should.equal("v1.0") + resp["typeInfo"]["status"].should.equal("REGISTERED") + resp["typeInfo"]["description"].should.equal("Test workflow.") + resp["configuration"]["defaultTaskStartToCloseTimeout"].should.equal("20") + resp["configuration"]["defaultExecutionStartToCloseTimeout"].should.equal("60") + resp["configuration"]["defaultTaskList"]["name"].should.equal("foo") + resp["configuration"]["defaultTaskPriority"].should.equal("-2") + resp["configuration"]["defaultChildPolicy"].should.equal("ABANDON") + resp["configuration"]["defaultLambdaRole"].should.equal("arn:bar") + + @mock_swf_deprecated def test_describe_non_existent_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") diff --git a/tests/test_swf/test_utils.py b/tests/test_swf/test_utils.py index 328342bbe..143804ca9 100644 --- a/tests/test_swf/test_utils.py +++ b/tests/test_swf/test_utils.py @@ -1,9 +1,9 @@ -import sure # noqa - -from moto.swf.utils import decapitalize - - -def test_decapitalize(): - cases = {"fooBar": "fooBar", "FooBar": "fooBar", "FOO BAR": "fOO BAR"} - for before, after in cases.items(): - decapitalize(before).should.equal(after) +import sure # noqa + +from moto.swf.utils import decapitalize + + +def test_decapitalize(): + cases = {"fooBar": "fooBar", "FooBar": "fooBar", "FOO BAR": "fOO BAR"} + for before, after in cases.items(): + decapitalize(before).should.equal(after) diff --git a/tests/test_transcribe/__init__.py b/tests/test_transcribe/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_transcribe/test_transcribe_boto3.py b/tests/test_transcribe/test_transcribe_boto3.py new file mode 100644 index 000000000..3de958bc1 --- /dev/null +++ b/tests/test_transcribe/test_transcribe_boto3.py @@ -0,0 +1,391 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_transcribe + + +@mock_transcribe +def test_run_medical_transcription_job_minimal_params(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + job_name = "MyJob" + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "OutputBucketName": "my-output-bucket", + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + resp = client.start_medical_transcription_job(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # CREATED + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["MedicalTranscriptionJobName"].should.equal( + args["MedicalTranscriptionJobName"] + ) + transcription_job["TranscriptionJobStatus"].should.equal("QUEUED") + transcription_job["LanguageCode"].should.equal(args["LanguageCode"]) + transcription_job["Media"].should.equal(args["Media"]) + transcription_job.should.contain("CreationTime") + transcription_job.doesnt.contain("StartTime") + transcription_job.doesnt.contain("CompletionTime") + transcription_job.doesnt.contain("Transcript") + transcription_job["Settings"]["ChannelIdentification"].should.equal(False) + transcription_job["Settings"]["ShowAlternatives"].should.equal(False) + transcription_job["Specialty"].should.equal(args["Specialty"]) + transcription_job["Type"].should.equal(args["Type"]) + + # IN_PROGRESS + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["TranscriptionJobStatus"].should.equal("IN_PROGRESS") + transcription_job["MediaFormat"].should.equal("wav") + transcription_job.should.contain("StartTime") + transcription_job.doesnt.contain("CompletionTime") + transcription_job.doesnt.contain("Transcript") + transcription_job["MediaSampleRateHertz"].should.equal(44100) + + # COMPLETED + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["TranscriptionJobStatus"].should.equal("COMPLETED") + transcription_job.should.contain("CompletionTime") + transcription_job["Transcript"].should.equal( + { + "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/medical/{}.json".format( + region_name, + args["OutputBucketName"], + args["MedicalTranscriptionJobName"], + ) + } + ) + + # Delete + client.delete_medical_transcription_job(MedicalTranscriptionJobName=job_name) + client.get_medical_transcription_job.when.called_with( + MedicalTranscriptionJobName=job_name + ).should.throw(client.exceptions.BadRequestException) + + +@mock_transcribe +def test_run_medical_transcription_job_all_params(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + vocabulary_name = "MyMedicalVocabulary" + resp = client.create_medical_vocabulary( + VocabularyName=vocabulary_name, + LanguageCode="en-US", + VocabularyFileUri="https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt", + ) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + job_name = "MyJob2" + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "MediaSampleRateHertz": 48000, + "MediaFormat": "flac", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.dat",}, + "OutputBucketName": "my-output-bucket", + "OutputEncryptionKMSKeyId": "arn:aws:kms:us-east-1:012345678901:key/37111b5e-8eff-4706-ae3a-d4f9d1d559fc", + "Settings": { + "ShowSpeakerLabels": True, + "MaxSpeakerLabels": 5, + "ChannelIdentification": True, + "ShowAlternatives": True, + "MaxAlternatives": 6, + "VocabularyName": vocabulary_name, + }, + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + resp = client.start_medical_transcription_job(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # CREATED + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["MedicalTranscriptionJobName"].should.equal( + args["MedicalTranscriptionJobName"] + ) + transcription_job["TranscriptionJobStatus"].should.equal("QUEUED") + transcription_job["LanguageCode"].should.equal(args["LanguageCode"]) + transcription_job["Media"].should.equal(args["Media"]) + transcription_job.should.contain("CreationTime") + transcription_job.doesnt.contain("StartTime") + transcription_job.doesnt.contain("CompletionTime") + transcription_job.doesnt.contain("Transcript") + transcription_job["Settings"]["ShowSpeakerLabels"].should.equal( + args["Settings"]["ShowSpeakerLabels"] + ) + transcription_job["Settings"]["MaxSpeakerLabels"].should.equal( + args["Settings"]["MaxSpeakerLabels"] + ) + transcription_job["Settings"]["ChannelIdentification"].should.equal( + args["Settings"]["ChannelIdentification"] + ) + transcription_job["Settings"]["ShowAlternatives"].should.equal( + args["Settings"]["ShowAlternatives"] + ) + transcription_job["Settings"]["MaxAlternatives"].should.equal( + args["Settings"]["MaxAlternatives"] + ) + transcription_job["Settings"]["VocabularyName"].should.equal( + args["Settings"]["VocabularyName"] + ) + + transcription_job["Specialty"].should.equal(args["Specialty"]) + transcription_job["Type"].should.equal(args["Type"]) + + # IN_PROGRESS + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["TranscriptionJobStatus"].should.equal("IN_PROGRESS") + transcription_job["MediaFormat"].should.equal("flac") + transcription_job.should.contain("StartTime") + transcription_job.doesnt.contain("CompletionTime") + transcription_job.doesnt.contain("Transcript") + transcription_job["MediaSampleRateHertz"].should.equal(48000) + + # COMPLETED + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["TranscriptionJobStatus"].should.equal("COMPLETED") + transcription_job.should.contain("CompletionTime") + transcription_job["Transcript"].should.equal( + { + "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/medical/{}.json".format( + region_name, + args["OutputBucketName"], + args["MedicalTranscriptionJobName"], + ) + } + ) + + +@mock_transcribe +def test_get_nonexistent_medical_transcription_job(): + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + client.get_medical_transcription_job.when.called_with( + MedicalTranscriptionJobName="NonexistentJobName" + ).should.throw(client.exceptions.BadRequestException) + + +@mock_transcribe +def test_run_medical_transcription_job_with_existing_job_name(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + job_name = "MyJob" + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "OutputBucketName": "my-output-bucket", + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + resp = client.start_medical_transcription_job(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + client.start_medical_transcription_job.when.called_with(**args).should.throw( + client.exceptions.ConflictException + ) + + +@mock_transcribe +def test_run_medical_transcription_job_nonexistent_vocabulary(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + job_name = "MyJob3" + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.dat",}, + "OutputBucketName": "my-output-bucket", + "Settings": {"VocabularyName": "NonexistentVocabulary"}, + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + client.start_medical_transcription_job.when.called_with(**args).should.throw( + client.exceptions.BadRequestException + ) + + +@mock_transcribe +def test_list_medical_transcription_jobs(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + def run_job(index, target_status): + job_name = "Job_{}".format(index) + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "OutputBucketName": "my-output-bucket", + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + resp = client.start_medical_transcription_job(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # IMPLICITLY PROMOTE JOB STATUS TO QUEUED + resp = client.get_medical_transcription_job( + MedicalTranscriptionJobName=job_name + ) + + # IN_PROGRESS + if target_status in ["IN_PROGRESS", "COMPLETED"]: + resp = client.get_medical_transcription_job( + MedicalTranscriptionJobName=job_name + ) + + # COMPLETED + if target_status == "COMPLETED": + resp = client.get_medical_transcription_job( + MedicalTranscriptionJobName=job_name + ) + + # Run 5 pending jobs + for i in range(5): + run_job(i, "PENDING") + + # Run 10 job to IN_PROGRESS + for i in range(5, 15): + run_job(i, "IN_PROGRESS") + + # Run 15 job to COMPLETED + for i in range(15, 30): + run_job(i, "COMPLETED") + + # List all + response = client.list_medical_transcription_jobs() + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(30) + response.shouldnt.contain("NextToken") + response.shouldnt.contain("Status") + + # List IN_PROGRESS + response = client.list_medical_transcription_jobs(Status="IN_PROGRESS") + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(10) + response.shouldnt.contain("NextToken") + response.should.contain("Status") + response["Status"].should.equal("IN_PROGRESS") + + # List JobName contains "8" + response = client.list_medical_transcription_jobs(JobNameContains="8") + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(3) + response.shouldnt.contain("NextToken") + response.shouldnt.contain("Status") + + # Pagination by 11 + response = client.list_medical_transcription_jobs(MaxResults=11) + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(11) + response.should.contain("NextToken") + response.shouldnt.contain("Status") + + response = client.list_medical_transcription_jobs( + NextToken=response["NextToken"], MaxResults=11 + ) + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(11) + response.should.contain("NextToken") + + response = client.list_medical_transcription_jobs( + NextToken=response["NextToken"], MaxResults=11 + ) + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(8) + response.shouldnt.contain("NextToken") + + +@mock_transcribe +def test_create_medical_vocabulary(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + vocabulary_name = "MyVocabulary" + resp = client.create_medical_vocabulary( + VocabularyName=vocabulary_name, + LanguageCode="en-US", + VocabularyFileUri="https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt", + ) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # PENDING + resp = client.get_medical_vocabulary(VocabularyName=vocabulary_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + resp["VocabularyName"].should.equal(vocabulary_name) + resp["LanguageCode"].should.equal("en-US") + resp["VocabularyState"].should.equal("PENDING") + resp.should.contain("LastModifiedTime") + resp.shouldnt.contain("FailureReason") + resp["DownloadUri"].should.contain(vocabulary_name) + + # IN_PROGRESS + resp = client.get_medical_vocabulary(VocabularyName=vocabulary_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + resp["VocabularyState"].should.equal("READY") + + # Delete + client.delete_medical_vocabulary(VocabularyName=vocabulary_name) + client.get_medical_vocabulary.when.called_with( + VocabularyName=vocabulary_name + ).should.throw(client.exceptions.BadRequestException) + + +@mock_transcribe +def test_get_nonexistent_medical_vocabulary(): + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + client.get_medical_vocabulary.when.called_with( + VocabularyName="NonexistentVocabularyName" + ).should.throw(client.exceptions.BadRequestException) + + +@mock_transcribe +def test_create_medical_vocabulary_with_existing_vocabulary_name(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + vocabulary_name = "MyVocabulary" + args = { + "VocabularyName": vocabulary_name, + "LanguageCode": "en-US", + "VocabularyFileUri": "https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt", + } + resp = client.create_medical_vocabulary(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + client.create_medical_vocabulary.when.called_with(**args).should.throw( + client.exceptions.ConflictException + ) diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py new file mode 100644 index 000000000..1eac276a1 --- /dev/null +++ b/tests/test_utilities/test_tagging_service.py @@ -0,0 +1,110 @@ +import sure + +from moto.utilities.tagging_service import TaggingService + + +def test_list_empty(): + svc = TaggingService() + result = svc.list_tags_for_resource("test") + + {"Tags": []}.should.be.equal(result) + + +def test_create_tag(): + svc = TaggingService("TheTags", "TagKey", "TagValue") + tags = [{"TagKey": "key_key", "TagValue": "value_value"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"TheTags": [{"TagKey": "key_key", "TagValue": "value_value"}]} + + expected.should.be.equal(actual) + + +def test_create_tag_without_value(): + svc = TaggingService() + tags = [{"Key": "key_key"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"Tags": [{"Key": "key_key", "Value": None}]} + + expected.should.be.equal(actual) + + +def test_delete_tag_using_names(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + + +def test_delete_all_tags_for_resource(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + tags2 = [{"Key": "key_key2", "Value": "value_value2"}] + svc.tag_resource("arn", tags) + svc.tag_resource("arn", tags2) + svc.delete_all_tags_for_resource("arn") + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + + +def test_list_empty_delete(): + svc = TaggingService() + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + + +def test_delete_tag_using_tags(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_tags("arn", tags) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + + +def test_extract_tag_names(): + svc = TaggingService() + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + actual = svc.extract_tag_names(tags) + expected = ["key1", "key2"] + + expected.should.be.equal(actual) + + +def test_copy_non_existing_arn(): + svc = TaggingService() + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + svc.tag_resource("new_arn", tags) + # + svc.copy_tags("non_existing_arn", "new_arn") + # Copying from a non-existing ARN should a NOOP + # Assert the old tags still exist + actual = sorted( + svc.list_tags_for_resource("new_arn")["Tags"], key=lambda t: t["Key"] + ) + actual.should.equal(tags) + + +def test_copy_existing_arn(): + svc = TaggingService() + tags_old_arn = [{"Key": "key1", "Value": "value1"}] + tags_new_arn = [{"Key": "key2", "Value": "value2"}] + svc.tag_resource("old_arn", tags_old_arn) + svc.tag_resource("new_arn", tags_new_arn) + # + svc.copy_tags("old_arn", "new_arn") + # Assert the old tags still exist + actual = sorted( + svc.list_tags_for_resource("new_arn")["Tags"], key=lambda t: t["Key"] + ) + actual.should.equal( + [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + ) diff --git a/tests/test_xray/__init__.py b/tests/test_xray/__init__.py new file mode 100644 index 000000000..08a1c1568 --- /dev/null +++ b/tests/test_xray/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tox.ini b/tox.ini index 9dacca18c..f77df29b3 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ deps = -r{toxinidir}/requirements-dev.txt commands = {envpython} setup.py test - nosetests {posargs} + pytest -v {posargs} [flake8] ignore = W503,W605,E128,E501,E203,E266,E501,E231 diff --git a/travis_moto_server.sh b/travis_moto_server.sh index 902644b20..a9ca79eb5 100755 --- a/travis_moto_server.sh +++ b/travis_moto_server.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash set -e -pip install flask -pip install /moto/dist/moto*.gz -moto_server -H 0.0.0.0 -p 5000 \ No newline at end of file +pip install $(ls /moto/dist/moto*.gz)[server,all] +moto_server -H 0.0.0.0 -p 5000 diff --git a/update_version_from_git.py b/update_version_from_git.py index d72dc4ae9..707f2f1e8 100644 --- a/update_version_from_git.py +++ b/update_version_from_git.py @@ -30,7 +30,7 @@ def migrate_source_attribute(attr, to_this, target_file, regex): new_file = [] found = False - with open(target_file, 'r') as fp: + with open(target_file, "r") as fp: lines = fp.readlines() for line in lines: @@ -40,61 +40,78 @@ def migrate_source_attribute(attr, to_this, target_file, regex): new_file.append(line) if found: - with open(target_file, 'w') as fp: + with open(target_file, "w") as fp: fp.writelines(new_file) + def migrate_version(target_file, new_version): """Updates __version__ in the source file""" regex = r"['\"](.*)['\"]" - migrate_source_attribute('__version__', "'{new_version}'".format(new_version=new_version), target_file, regex) + migrate_source_attribute( + "__version__", + "'{new_version}'".format(new_version=new_version), + target_file, + regex, + ) def is_master_branch(): - cmd = ('git rev-parse --abbrev-ref HEAD') + cmd = "git rev-parse --abbrev-ref HEAD" tag_branch = subprocess.check_output(cmd, shell=True) - return tag_branch in [b'master\n'] + return tag_branch in [b"master\n"] + def git_tag_name(): - cmd = ('git describe --tags') + cmd = "git describe --tags" tag_branch = subprocess.check_output(cmd, shell=True) tag_branch = tag_branch.decode().strip() return tag_branch + def get_git_version_info(): - cmd = 'git describe --tags' + cmd = "git describe --tags" ver_str = subprocess.check_output(cmd, shell=True) - ver, commits_since, githash = ver_str.decode().strip().split('-') + ver, commits_since, githash = ver_str.decode().strip().split("-") return ver, commits_since, githash + def prerelease_version(): - """ return what the prerelease version should be. + """return what the prerelease version should be. https://packaging.python.org/tutorials/distributing-packages/#pre-release-versioning 0.0.2.dev22 """ ver, commits_since, githash = get_git_version_info() initpy_ver = get_version() - assert len(initpy_ver.split('.')) in [3, 4], 'moto/__init__.py version should be like 0.0.2.dev' - assert initpy_ver > ver, 'the moto/__init__.py version should be newer than the last tagged release.' - return '{initpy_ver}.{commits_since}'.format(initpy_ver=initpy_ver, commits_since=commits_since) + assert len(initpy_ver.split(".")) in [ + 3, + 4, + ], "moto/__init__.py version should be like 0.0.2.dev" + assert ( + initpy_ver > ver + ), "the moto/__init__.py version should be newer than the last tagged release." + return "{initpy_ver}.{commits_since}".format( + initpy_ver=initpy_ver, commits_since=commits_since + ) + def read(*parts): - """ Reads in file from *parts. - """ + """Reads in file from *parts.""" try: - return io.open(os.path.join(*parts), 'r', encoding='utf-8').read() + return io.open(os.path.join(*parts), "r", encoding="utf-8").read() except IOError: - return '' + return "" + def get_version(): - """ Returns version from moto/__init__.py - """ - version_file = read('moto', '__init__.py') - version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', - version_file, re.MULTILINE) + """Returns version from moto/__init__.py""" + version_file = read("moto", "__init__.py") + version_match = re.search( + r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.MULTILINE + ) if version_match: return version_match.group(1) - raise RuntimeError('Unable to find version string.') + raise RuntimeError("Unable to find version string.") def release_version_correct(): @@ -107,14 +124,22 @@ def release_version_correct(): initpy = os.path.abspath("moto/__init__.py") new_version = prerelease_version() - print('updating version in __init__.py to {new_version}'.format(new_version=new_version)) - assert len(new_version.split('.')) >= 4, 'moto/__init__.py version should be like 0.0.2.dev' + print( + "updating version in __init__.py to {new_version}".format( + new_version=new_version + ) + ) + assert ( + len(new_version.split(".")) >= 4 + ), "moto/__init__.py version should be like 0.0.2.dev" migrate_version(initpy, new_version) else: assert False, "No non-master deployments yet" # check that we are a tag with the same version as in __init__.py - assert get_version() == git_tag_name(), 'git tag/branch name not the same as moto/__init__.py __verion__' + assert ( + get_version() == git_tag_name() + ), "git tag/branch name not the same as moto/__init__.py __verion__" -if __name__ == '__main__': +if __name__ == "__main__": release_version_correct() diff --git a/wait_for.py b/wait_for.py index 1f291c16b..be29b0140 100755 --- a/wait_for.py +++ b/wait_for.py @@ -21,12 +21,12 @@ start_ts = time.time() print("Waiting for service to come up") while True: try: - urllib.urlopen('http://localhost:5000/', timeout=1) + urllib.urlopen("http://localhost:5000/", timeout=1) break except EXCEPTIONS: elapsed_s = time.time() - start_ts if elapsed_s > 60: raise - print('.') + print(".") time.sleep(1)