diff --git a/.gitignore b/.gitignore index efb489651..0a24fe476 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ venv/ .python-version .vscode/ tests/file.tmp +.eggs/ diff --git a/.travis.yml b/.travis.yml index 5bc9779f3..8145cfb46 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,36 +2,56 @@ dist: xenial language: python sudo: false services: - - docker +- docker python: - - 2.7 - - 3.6 - - 3.7 +- 2.7 +- 3.6 +- 3.7 env: - - TEST_SERVER_MODE=false - - TEST_SERVER_MODE=true +- TEST_SERVER_MODE=false +- TEST_SERVER_MODE=true before_install: - - export BOTO_CONFIG=/dev/null +- export BOTO_CONFIG=/dev/null install: - # We build moto first so the docker container doesn't try to compile it as well, also note we don't use - # -d for docker run so the logs show up in travis - # Python images come from here: https://hub.docker.com/_/python/ - - | - python setup.py sdist +- | + python setup.py sdist - if [ "$TEST_SERVER_MODE" = "true" ]; then - docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & - fi - travis_retry pip install boto==2.45.0 - travis_retry pip install boto3 - travis_retry pip install dist/moto*.gz - travis_retry pip install coveralls==1.1 - travis_retry pip install -r requirements-dev.txt + if [ "$TEST_SERVER_MODE" = "true" ]; then + docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & + fi + travis_retry pip install boto==2.45.0 + travis_retry pip install boto3 + travis_retry pip install dist/moto*.gz + travis_retry pip install coveralls==1.1 + travis_retry pip install -r requirements-dev.txt - if [ "$TEST_SERVER_MODE" = "true" ]; then - python wait_for.py - fi + if [ "$TEST_SERVER_MODE" = "true" ]; then + python wait_for.py + fi script: - - make test +- make test after_success: - - coveralls +- coveralls +before_deploy: +- git checkout $TRAVIS_BRANCH +- git fetch --unshallow +- python update_version_from_git.py +deploy: + - provider: pypi + distributions: sdist bdist_wheel + user: spulec + password: + secure: NxnPylnTfekJmGyoufCw0lMoYRskSMJzvAIyAlJJVYKwEhmiCPOrdy5qV8i8mRZ1AkUsqU3jBZ/PD56n96clHW0E3d080UleRDj6JpyALVdeLfMqZl9kLmZ8bqakWzYq3VSJKw2zGP/L4tPGf8wTK1SUv9yl/YNDsBdCkjDverw= + on: + branch: + - master + skip_cleanup: true + skip_existing: true + - provider: pypi + distributions: sdist bdist_wheel + user: spulec + password: + secure: NxnPylnTfekJmGyoufCw0lMoYRskSMJzvAIyAlJJVYKwEhmiCPOrdy5qV8i8mRZ1AkUsqU3jBZ/PD56n96clHW0E3d080UleRDj6JpyALVdeLfMqZl9kLmZ8bqakWzYq3VSJKw2zGP/L4tPGf8wTK1SUv9yl/YNDsBdCkjDverw= + on: + tags: true + skip_existing: true diff --git a/AUTHORS.md b/AUTHORS.md index 0a152505a..01b000182 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -54,3 +54,6 @@ Moto is written by Steve Pulec with contributions from: * [William Richard](https://github.com/william-richard) * [Alex Casalboni](https://github.com/alexcasalboni) * [Jon Beilke](https://github.com/jrbeilke) +* [Bendeguz Acs](https://github.com/acsbendi) +* [Craig Anderson](https://github.com/craiga) +* [Robert Lewis](https://github.com/ralewis85) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index a5650f572..685db7ec4 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -470,48 +470,55 @@ - [ ] upgrade_applied_schema - [ ] upgrade_published_schema -## cloudformation - 21% implemented +## cloudformation - 65% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set - [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set +- [X] create_stack_instances +- [X] create_stack_set - [X] delete_change_set - [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set +- [X] delete_stack_instances +- [X] delete_stack_set +- [ ] deploy - [ ] describe_account_limits - [X] describe_change_set -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation +- [ ] describe_stack_drift_detection_status +- [X] describe_stack_events +- [X] describe_stack_instance +- [X] describe_stack_resource +- [ ] describe_stack_resource_drifts +- [X] describe_stack_resources +- [X] describe_stack_set +- [X] describe_stack_set_operation - [X] describe_stacks +- [ ] detect_stack_drift +- [ ] detect_stack_resource_drift - [ ] estimate_template_cost - [X] execute_change_set - [ ] get_stack_policy -- [ ] get_template +- [X] get_template - [ ] get_template_summary - [X] list_change_sets - [X] list_exports - [ ] list_imports -- [ ] list_stack_instances +- [X] list_stack_instances - [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets +- [X] list_stack_set_operation_results +- [X] list_stack_set_operations +- [X] list_stack_sets - [X] list_stacks +- [ ] package - [ ] set_stack_policy - [ ] signal_resource -- [ ] stop_stack_set_operation +- [X] stop_stack_set_operation - [X] update_stack -- [ ] update_stack_instances -- [ ] update_stack_set +- [X] update_stack_instances +- [X] update_stack_set - [ ] update_termination_protection -- [ ] validate_template +- [X] validate_template +- [ ] wait ## cloudfront - 0% implemented - [ ] create_cloud_front_origin_access_identity @@ -852,7 +859,7 @@ - [ ] admin_set_user_settings - [ ] admin_update_auth_event_feedback - [ ] admin_update_device_status -- [ ] admin_update_user_attributes +- [X] admin_update_user_attributes - [ ] admin_user_global_sign_out - [ ] associate_software_token - [X] change_password @@ -916,7 +923,7 @@ - [ ] update_auth_event_feedback - [ ] update_device_status - [ ] update_group -- [ ] update_identity_provider +- [x] update_identity_provider - [ ] update_resource_server - [ ] update_user_attributes - [ ] update_user_pool @@ -1466,7 +1473,7 @@ - [X] describe_spot_instance_requests - [ ] describe_spot_price_history - [ ] describe_stale_security_groups -- [ ] describe_subnets +- [X] describe_subnets - [X] describe_tags - [ ] describe_volume_attribute - [ ] describe_volume_status @@ -1574,9 +1581,9 @@ - [ ] update_security_group_rule_descriptions_egress - [ ] update_security_group_rule_descriptions_ingress -## ecr - 31% implemented +## ecr - 36% implemented - [ ] batch_check_layer_availability -- [ ] batch_delete_image +- [X] batch_delete_image - [X] batch_get_image - [ ] complete_layer_upload - [X] create_repository @@ -2005,23 +2012,23 @@ - [ ] upload_archive - [ ] upload_multipart_part -## glue - 0% implemented -- [ ] batch_create_partition +## glue - 23% implemented +- [x] batch_create_partition - [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table +- [x] batch_delete_partition +- [x] batch_delete_table - [ ] batch_delete_table_version - [ ] batch_get_partition - [ ] batch_stop_job_run - [ ] create_classifier - [ ] create_connection - [ ] create_crawler -- [ ] create_database +- [x] create_database - [ ] create_dev_endpoint - [ ] create_job -- [ ] create_partition +- [x] create_partition - [ ] create_script -- [ ] create_table +- [x] create_table - [ ] create_trigger - [ ] create_user_defined_function - [ ] delete_classifier @@ -2030,8 +2037,8 @@ - [ ] delete_database - [ ] delete_dev_endpoint - [ ] delete_job -- [ ] delete_partition -- [ ] delete_table +- [x] delete_partition +- [x] delete_table - [ ] delete_table_version - [ ] delete_trigger - [ ] delete_user_defined_function @@ -2043,7 +2050,7 @@ - [ ] get_crawler - [ ] get_crawler_metrics - [ ] get_crawlers -- [ ] get_database +- [x] get_database - [ ] get_databases - [ ] get_dataflow_graph - [ ] get_dev_endpoint @@ -2053,13 +2060,13 @@ - [ ] get_job_runs - [ ] get_jobs - [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions +- [x] get_partition +- [x] get_partitions - [ ] get_plan -- [ ] get_table -- [ ] get_table_version -- [ ] get_table_versions -- [ ] get_tables +- [x] get_table +- [x] get_table_version +- [x] get_table_versions +- [x] get_tables - [ ] get_trigger - [ ] get_triggers - [ ] get_user_defined_function @@ -2080,8 +2087,8 @@ - [ ] update_database - [ ] update_dev_endpoint - [ ] update_job -- [ ] update_partition -- [ ] update_table +- [x] update_partition +- [x] update_table - [ ] update_trigger - [ ] update_user_defined_function @@ -2334,7 +2341,7 @@ - [ ] update_service_specific_credential - [X] update_signing_certificate - [ ] update_ssh_public_key -- [ ] update_user +- [X] update_user - [X] upload_server_certificate - [X] upload_signing_certificate - [ ] upload_ssh_public_key @@ -2382,7 +2389,7 @@ - [ ] unsubscribe_from_event - [ ] update_assessment_target -## iot - 32% implemented +## iot - 33% implemented - [ ] accept_certificate_transfer - [X] add_thing_to_thing_group - [ ] associate_targets_with_job @@ -2480,7 +2487,7 @@ - [ ] list_topic_rules - [ ] list_v2_logging_levels - [ ] register_ca_certificate -- [ ] register_certificate +- [X] register_certificate - [ ] register_thing - [ ] reject_certificate_transfer - [X] remove_thing_from_thing_group @@ -2519,14 +2526,14 @@ - [ ] start_next_pending_job_execution - [ ] update_job_execution -## kinesis - 56% implemented +## kinesis - 61% implemented - [X] add_tags_to_stream - [X] create_stream - [ ] decrease_stream_retention_period - [X] delete_stream - [ ] describe_limits - [X] describe_stream -- [ ] describe_stream_summary +- [X] describe_stream_summary - [ ] disable_enhanced_monitoring - [ ] enable_enhanced_monitoring - [X] get_records @@ -3098,14 +3105,14 @@ - [ ] update_server - [ ] update_server_engine_attributes -## organizations - 30% implemented +## organizations - 47% implemented - [ ] accept_handshake -- [ ] attach_policy +- [X] attach_policy - [ ] cancel_handshake - [X] create_account - [X] create_organization - [X] create_organizational_unit -- [ ] create_policy +- [X] create_policy - [ ] decline_handshake - [ ] delete_organization - [ ] delete_organizational_unit @@ -3115,7 +3122,7 @@ - [ ] describe_handshake - [X] describe_organization - [X] describe_organizational_unit -- [ ] describe_policy +- [X] describe_policy - [ ] detach_policy - [ ] disable_aws_service_access - [ ] disable_policy_type @@ -3133,10 +3140,10 @@ - [ ] list_handshakes_for_organization - [X] list_organizational_units_for_parent - [X] list_parents -- [ ] list_policies -- [ ] list_policies_for_target +- [X] list_policies +- [X] list_policies_for_target - [X] list_roots -- [ ] list_targets_for_policy +- [X] list_targets_for_policy - [X] move_account - [ ] remove_account_from_organization - [ ] update_organizational_unit @@ -3415,19 +3422,19 @@ - [ ] start_stream_processor - [ ] stop_stream_processor -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query +## resource-groups - 62% implemented +- [X] create_group +- [X] delete_group +- [X] get_group +- [X] get_group_query - [ ] get_tags - [ ] list_group_resources -- [ ] list_groups +- [X] list_groups - [ ] search_resources - [ ] tag - [ ] untag -- [ ] update_group -- [ ] update_group_query +- [X] update_group +- [X] update_group_query ## resourcegroupstaggingapi - 60% implemented - [X] get_resources @@ -3540,7 +3547,7 @@ - [ ] delete_object - [ ] delete_object_tagging - [ ] delete_objects -- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration - [ ] get_bucket_cors @@ -3574,7 +3581,7 @@ - [ ] list_objects - [ ] list_objects_v2 - [ ] list_parts -- [ ] put_bucket_accelerate_configuration +- [X] put_bucket_accelerate_configuration - [ ] put_bucket_acl - [ ] put_bucket_analytics_configuration - [X] put_bucket_cors @@ -3654,14 +3661,14 @@ ## secretsmanager - 33% implemented - [ ] cancel_rotate_secret - [X] create_secret -- [ ] delete_secret +- [X] delete_secret - [X] describe_secret - [X] get_random_password - [X] get_secret_value -- [ ] list_secret_version_ids -- [ ] list_secrets -- [ ] put_secret_value -- [ ] restore_secret +- [X] list_secret_version_ids +- [X] list_secrets +- [X] put_secret_value +- [X] restore_secret - [X] rotate_secret - [ ] tag_resource - [ ] untag_resource diff --git a/README.md b/README.md index aeff847ed..e4c88dec8 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,8 @@ [![Join the chat at https://gitter.im/awsmoto/Lobby](https://badges.gitter.im/awsmoto/Lobby.svg)](https://gitter.im/awsmoto/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/spulec/moto.png?branch=master)](https://travis-ci.org/spulec/moto) -[![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.png?branch=master)](https://coveralls.io/r/spulec/moto) +[![Build Status](https://travis-ci.org/spulec/moto.svg?branch=master)](https://travis-ci.org/spulec/moto) +[![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.svg?branch=master)](https://coveralls.io/r/spulec/moto) [![Docs](https://readthedocs.org/projects/pip/badge/?version=stable)](http://docs.getmoto.org) # In a nutshell @@ -47,7 +47,7 @@ def test_my_model_save(): body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") - assert body == b'is awesome' + assert body == 'is awesome' ``` With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys. @@ -55,95 +55,95 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock It gets even better! Moto isn't just for Python code and it isn't just for S3. Look at the [standalone server mode](https://github.com/spulec/moto#stand-alone-server-mode) for more information about running Moto with other languages. Here's the status of the other AWS services implemented: ```gherkin -|------------------------------------------------------------------------------| -| Service Name | Decorator | Development Status | -|------------------------------------------------------------------------------| -| ACM | @mock_acm | all endpoints done | -|------------------------------------------------------------------------------| -| API Gateway | @mock_apigateway | core endpoints done | -|------------------------------------------------------------------------------| -| Autoscaling | @mock_autoscaling| core endpoints done | -|------------------------------------------------------------------------------| -| Cloudformation | @mock_cloudformation| core endpoints done | -|------------------------------------------------------------------------------| -| Cloudwatch | @mock_cloudwatch | basic endpoints done | -|------------------------------------------------------------------------------| -| CloudwatchEvents | @mock_events | all endpoints done | -|------------------------------------------------------------------------------| -| Cognito Identity | @mock_cognitoidentity| basic endpoints done | -|------------------------------------------------------------------------------| -| Cognito Identity Provider | @mock_cognitoidp| basic endpoints done | -|------------------------------------------------------------------------------| -| Config | @mock_config | basic endpoints done | -|------------------------------------------------------------------------------| -| Data Pipeline | @mock_datapipeline| basic endpoints done | -|------------------------------------------------------------------------------| -| DynamoDB | @mock_dynamodb | core endpoints done | -| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | -|------------------------------------------------------------------------------| -| EC2 | @mock_ec2 | core endpoints done | -| - AMI | | core endpoints done | -| - EBS | | core endpoints done | -| - Instances | | all endpoints done | -| - Security Groups | | core endpoints done | -| - Tags | | all endpoints done | -|------------------------------------------------------------------------------| -| ECR | @mock_ecr | basic endpoints done | -|------------------------------------------------------------------------------| -| ECS | @mock_ecs | basic endpoints done | -|------------------------------------------------------------------------------| -| ELB | @mock_elb | core endpoints done | -|------------------------------------------------------------------------------| -| ELBv2 | @mock_elbv2 | all endpoints done | -|------------------------------------------------------------------------------| -| EMR | @mock_emr | core endpoints done | -|------------------------------------------------------------------------------| -| Glacier | @mock_glacier | core endpoints done | -|------------------------------------------------------------------------------| -| IAM | @mock_iam | core endpoints done | -|------------------------------------------------------------------------------| -| IoT | @mock_iot | core endpoints done | -| | @mock_iotdata | core endpoints done | -|------------------------------------------------------------------------------| -| Lambda | @mock_lambda | basic endpoints done, requires | -| | | docker | -|------------------------------------------------------------------------------| -| Logs | @mock_logs | basic endpoints done | -|------------------------------------------------------------------------------| -| Kinesis | @mock_kinesis | core endpoints done | -|------------------------------------------------------------------------------| -| KMS | @mock_kms | basic endpoints done | -|------------------------------------------------------------------------------| -| Organizations | @mock_organizations | some core endpoints done | -|------------------------------------------------------------------------------| -| Polly | @mock_polly | all endpoints done | -|------------------------------------------------------------------------------| -| RDS | @mock_rds | core endpoints done | -|------------------------------------------------------------------------------| -| RDS2 | @mock_rds2 | core endpoints done | -|------------------------------------------------------------------------------| -| Redshift | @mock_redshift | core endpoints done | -|------------------------------------------------------------------------------| -| Route53 | @mock_route53 | core endpoints done | -|------------------------------------------------------------------------------| -| S3 | @mock_s3 | core endpoints done | -|------------------------------------------------------------------------------| -| SecretsManager | @mock_secretsmanager | basic endpoints done -|------------------------------------------------------------------------------| -| SES | @mock_ses | all endpoints done | -|------------------------------------------------------------------------------| -| SNS | @mock_sns | all endpoints done | -|------------------------------------------------------------------------------| -| SQS | @mock_sqs | core endpoints done | -|------------------------------------------------------------------------------| -| SSM | @mock_ssm | core endpoints done | -|------------------------------------------------------------------------------| -| STS | @mock_sts | core endpoints done | -|------------------------------------------------------------------------------| -| SWF | @mock_swf | basic endpoints done | -|------------------------------------------------------------------------------| -| X-Ray | @mock_xray | all endpoints done | -|------------------------------------------------------------------------------| +|-------------------------------------------------------------------------------------| +| Service Name | Decorator | Development Status | +|-------------------------------------------------------------------------------------| +| ACM | @mock_acm | all endpoints done | +|-------------------------------------------------------------------------------------| +| API Gateway | @mock_apigateway | core endpoints done | +|-------------------------------------------------------------------------------------| +| Autoscaling | @mock_autoscaling | core endpoints done | +|-------------------------------------------------------------------------------------| +| Cloudformation | @mock_cloudformation | core endpoints done | +|-------------------------------------------------------------------------------------| +| Cloudwatch | @mock_cloudwatch | basic endpoints done | +|-------------------------------------------------------------------------------------| +| CloudwatchEvents | @mock_events | all endpoints done | +|-------------------------------------------------------------------------------------| +| Cognito Identity | @mock_cognitoidentity | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Cognito Identity Provider | @mock_cognitoidp | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Config | @mock_config | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Data Pipeline | @mock_datapipeline | basic endpoints done | +|-------------------------------------------------------------------------------------| +| DynamoDB | @mock_dynamodb | core endpoints done | +| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | +|-------------------------------------------------------------------------------------| +| EC2 | @mock_ec2 | core endpoints done | +| - AMI | | core endpoints done | +| - EBS | | core endpoints done | +| - Instances | | all endpoints done | +| - Security Groups | | core endpoints done | +| - Tags | | all endpoints done | +|-------------------------------------------------------------------------------------| +| ECR | @mock_ecr | basic endpoints done | +|-------------------------------------------------------------------------------------| +| ECS | @mock_ecs | basic endpoints done | +|-------------------------------------------------------------------------------------| +| ELB | @mock_elb | core endpoints done | +|-------------------------------------------------------------------------------------| +| ELBv2 | @mock_elbv2 | all endpoints done | +|-------------------------------------------------------------------------------------| +| EMR | @mock_emr | core endpoints done | +|-------------------------------------------------------------------------------------| +| Glacier | @mock_glacier | core endpoints done | +|-------------------------------------------------------------------------------------| +| IAM | @mock_iam | core endpoints done | +|-------------------------------------------------------------------------------------| +| IoT | @mock_iot | core endpoints done | +| | @mock_iotdata | core endpoints done | +|-------------------------------------------------------------------------------------| +| Kinesis | @mock_kinesis | core endpoints done | +|-------------------------------------------------------------------------------------| +| KMS | @mock_kms | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Lambda | @mock_lambda | basic endpoints done, requires | +| | | docker | +|-------------------------------------------------------------------------------------| +| Logs | @mock_logs | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Organizations | @mock_organizations | some core endpoints done | +|-------------------------------------------------------------------------------------| +| Polly | @mock_polly | all endpoints done | +|-------------------------------------------------------------------------------------| +| RDS | @mock_rds | core endpoints done | +|-------------------------------------------------------------------------------------| +| RDS2 | @mock_rds2 | core endpoints done | +|-------------------------------------------------------------------------------------| +| Redshift | @mock_redshift | core endpoints done | +|-------------------------------------------------------------------------------------| +| Route53 | @mock_route53 | core endpoints done | +|-------------------------------------------------------------------------------------| +| S3 | @mock_s3 | core endpoints done | +|-------------------------------------------------------------------------------------| +| SecretsManager | @mock_secretsmanager | basic endpoints done | +|-------------------------------------------------------------------------------------| +| SES | @mock_ses | all endpoints done | +|-------------------------------------------------------------------------------------| +| SNS | @mock_sns | all endpoints done | +|-------------------------------------------------------------------------------------| +| SQS | @mock_sqs | core endpoints done | +|-------------------------------------------------------------------------------------| +| SSM | @mock_ssm | core endpoints done | +|-------------------------------------------------------------------------------------| +| STS | @mock_sts | core endpoints done | +|-------------------------------------------------------------------------------------| +| SWF | @mock_swf | basic endpoints done | +|-------------------------------------------------------------------------------------| +| X-Ray | @mock_xray | all endpoints done | +|-------------------------------------------------------------------------------------| ``` For a full list of endpoint [implementation coverage](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) @@ -318,3 +318,11 @@ boto3.resource( ```console $ pip install moto ``` + +## Releases + +Releases are done from travisci. Fairly closely following this: +https://docs.travis-ci.com/user/deployment/pypi/ + +- Commits to `master` branch do a dev deploy to pypi. +- Commits to a tag do a real deploy to pypi. diff --git a/docs/index.rst b/docs/index.rst index 66e12e4bd..4811fb797 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,66 +17,95 @@ with ``moto`` and its usage. Currently implemented Services: ------------------------------- -+-----------------------+---------------------+-----------------------------------+ -| Service Name | Decorator | Development Status | -+=======================+=====================+===================================+ -| API Gateway | @mock_apigateway | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Autoscaling | @mock_autoscaling | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Cloudformation | @mock_cloudformation| core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Cloudwatch | @mock_cloudwatch | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Data Pipeline | @mock_datapipeline | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| - DynamoDB | - @mock_dynamodb | - core endpoints done | -| - DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes| -+-----------------------+---------------------+-----------------------------------+ -| EC2 | @mock_ec2 | core endpoints done | -| - AMI | | - core endpoints done | -| - EBS | | - core endpoints done | -| - Instances | | - all endpoints done | -| - Security Groups | | - core endpoints done | -| - Tags | | - all endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| ECS | @mock_ecs | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| ELB | @mock_elb | core endpoints done | -| | @mock_elbv2 | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| EMR | @mock_emr | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Glacier | @mock_glacier | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| IAM | @mock_iam | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Lambda | @mock_lambda | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Kinesis | @mock_kinesis | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| KMS | @mock_kms | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| RDS | @mock_rds | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| RDS2 | @mock_rds2 | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Redshift | @mock_redshift | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Route53 | @mock_route53 | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| S3 | @mock_s3 | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| SES | @mock_ses | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| SNS | @mock_sns | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| SQS | @mock_sqs | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| STS | @mock_sts | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| SWF | @mock_swf | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ ++---------------------------+-----------------------+------------------------------------+ +| Service Name | Decorator | Development Status | ++===========================+=======================+====================================+ +| ACM | @mock_acm | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| API Gateway | @mock_apigateway | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Autoscaling | @mock_autoscaling | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Cloudformation | @mock_cloudformation | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Cloudwatch | @mock_cloudwatch | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| CloudwatchEvents | @mock_events | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Cognito Identity | @mock_cognitoidentity | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Cognito Identity Provider | @mock_cognitoidp | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Config | @mock_config | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Data Pipeline | @mock_datapipeline | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| DynamoDB | - @mock_dynamodb | - core endpoints done | +| DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes | ++---------------------------+-----------------------+------------------------------------+ +| EC2 | @mock_ec2 | core endpoints done | +| - AMI | | - core endpoints done | +| - EBS | | - core endpoints done | +| - Instances | | - all endpoints done | +| - Security Groups | | - core endpoints done | +| - Tags | | - all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| ECR | @mock_ecr | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| ECS | @mock_ecs | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| ELB | @mock_elb | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| ELBv2 | @mock_elbv2 | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| EMR | @mock_emr | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Glacier | @mock_glacier | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| IAM | @mock_iam | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| IoT | @mock_iot | core endpoints done | +| | @mock_iotdata | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Kinesis | @mock_kinesis | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| KMS | @mock_kms | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Lambda | @mock_lambda | basic endpoints done, | +| | | requires docker | ++---------------------------+-----------------------+------------------------------------+ +| Logs | @mock_logs | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Organizations | @mock_organizations | some core edpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Polly | @mock_polly | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| RDS | @mock_rds | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| RDS2 | @mock_rds2 | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Redshift | @mock_redshift | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Route53 | @mock_route53 | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| S3 | @mock_s3 | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SecretsManager | @mock_secretsmanager | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SES | @mock_ses | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SNS | @mock_sns | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SQS | @mock_sqs | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SSM | @mock_ssm | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| STS | @mock_sts | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SWF | @mock_swf | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| X-Ray | @mock_xray | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ diff --git a/moto/__init__.py b/moto/__init__.py index 8e9b91bce..9c974f00d 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.3.7' +__version__ = '1.3.9' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa @@ -36,6 +36,7 @@ from .polly import mock_polly # flake8: noqa from .rds import mock_rds, mock_rds_deprecated # flake8: noqa from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa +from .resourcegroups import mock_resourcegroups # flake8: noqa from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa from .ses import mock_ses, mock_ses_deprecated # flake8: noqa from .secretsmanager import mock_secretsmanager # flake8: noqa diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 27e81a87c..24811be73 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -1,4 +1,7 @@ from __future__ import unicode_literals + +import random + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel @@ -159,13 +162,7 @@ class FakeAutoScalingGroup(BaseModel): self.autoscaling_backend = autoscaling_backend self.name = name - if not availability_zones and not vpc_zone_identifier: - raise AutoscalingClientError( - "ValidationError", - "At least one Availability Zone or VPC Subnet is required." - ) - self.availability_zones = availability_zones - self.vpc_zone_identifier = vpc_zone_identifier + self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier) self.max_size = max_size self.min_size = min_size @@ -188,6 +185,35 @@ class FakeAutoScalingGroup(BaseModel): self.tags = tags if tags else [] self.set_desired_capacity(desired_capacity) + def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False): + # for updates, if only AZs are provided, they must not clash with + # the AZs of existing VPCs + if update and availability_zones and not vpc_zone_identifier: + vpc_zone_identifier = self.vpc_zone_identifier + + if vpc_zone_identifier: + # extract azs for vpcs + subnet_ids = vpc_zone_identifier.split(',') + subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(subnet_ids=subnet_ids) + vpc_zones = [subnet.availability_zone for subnet in subnets] + + if availability_zones and set(availability_zones) != set(vpc_zones): + raise AutoscalingClientError( + "ValidationError", + "The availability zones of the specified subnets and the Auto Scaling group do not match", + ) + availability_zones = vpc_zones + elif not availability_zones: + if not update: + raise AutoscalingClientError( + "ValidationError", + "At least one Availability Zone or VPC Subnet is required." + ) + return + + self.availability_zones = availability_zones + self.vpc_zone_identifier = vpc_zone_identifier + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -246,8 +272,8 @@ class FakeAutoScalingGroup(BaseModel): health_check_period, health_check_type, placement_group, termination_policies, new_instances_protected_from_scale_in=None): - if availability_zones: - self.availability_zones = availability_zones + self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True) + if max_size is not None: self.max_size = max_size if min_size is not None: @@ -257,8 +283,6 @@ class FakeAutoScalingGroup(BaseModel): self.launch_config = self.autoscaling_backend.launch_configurations[ launch_config_name] self.launch_config_name = launch_config_name - if vpc_zone_identifier is not None: - self.vpc_zone_identifier = vpc_zone_identifier if health_check_period is not None: self.health_check_period = health_check_period if health_check_type is not None: @@ -319,7 +343,8 @@ class FakeAutoScalingGroup(BaseModel): self.launch_config.user_data, self.launch_config.security_groups, instance_type=self.launch_config.instance_type, - tags={'instance': propagated_tags} + tags={'instance': propagated_tags}, + placement=random.choice(self.availability_zones), ) for instance in reservation.instances: instance.autoscaling_group = self diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 6a7913021..985c6f852 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -404,7 +404,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """ -{{ requestid }} + """ @@ -412,7 +412,7 @@ ATTACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -454,7 +454,7 @@ DETACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -499,7 +499,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {{ instance_state.health_status }} - us-east-1e + {{ instance_state.instance.placement }} {{ instance_state.instance.id }} {{ group.launch_config_name }} {{ instance_state.lifecycle_state }} @@ -585,7 +585,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """ {{ instance_state.health_status }} {{ instance_state.instance.autoscaling_group.name }} - us-east-1e + {{ instance_state.instance.placement }} {{ instance_state.instance.id }} {{ instance_state.instance.autoscaling_group.launch_config_name }} {{ instance_state.lifecycle_state }} @@ -654,7 +654,7 @@ DELETE_POLICY_TEMPLATE = """ -{{ requestid }} + """ @@ -670,14 +670,14 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ -{{ requestid }} + """ @@ -690,13 +690,13 @@ SUSPEND_PROCESSES_TEMPLATE = """ -{{ requestid }} + """ SET_INSTANCE_PROTECTION_TEMPLATE = """ -{{ requestid }} + """ diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 9fc41c11e..8dfa4724a 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -30,7 +30,7 @@ from moto.s3.models import s3_backend from moto.logs.models import logs_backends from moto.s3.exceptions import MissingBucket, MissingKey from moto import settings -from .utils import make_function_arn +from .utils import make_function_arn, make_function_ver_arn logger = logging.getLogger(__name__) @@ -45,7 +45,7 @@ except ImportError: _stderr_regex = re.compile(r'START|END|REPORT RequestId: .*') _orig_adapter_send = requests.adapters.HTTPAdapter.send -docker_3 = docker.__version__.startswith("3") +docker_3 = docker.__version__[0] >= '3' def zip2tar(zip_bytes): @@ -215,12 +215,12 @@ class LambdaFunction(BaseModel): self.code_size = key.size self.code_sha_256 = hashlib.sha256(key.value).hexdigest() - self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) + self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name) self.tags = dict() def set_version(self, version): - self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) + self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version) self.version = version self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') @@ -503,7 +503,10 @@ class LambdaStorage(object): def list_versions_by_function(self, name): if name not in self._functions: return None - return [self._functions[name]['latest']] + + latest = copy.copy(self._functions[name]['latest']) + latest.function_arn += ':$LATEST' + return [latest] + self._functions[name]['versions'] def get_arn(self, arn): return self._arns.get(arn, None) @@ -535,6 +538,7 @@ class LambdaStorage(object): fn.set_version(new_version) self._functions[name]['versions'].append(fn) + self._arns[fn.function_arn] = fn return fn def del_function(self, name, qualifier=None): @@ -604,6 +608,9 @@ class LambdaBackend(BaseBackend): self._lambdas.put_function(fn) + if spec.get('Publish'): + ver = self.publish_function(function_name) + fn.version = ver.version return fn def publish_function(self, function_name): diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index d4eb73bc3..c29c9acd9 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -150,7 +150,7 @@ class LambdaResponse(BaseResponse): for fn in self.lambda_backend.list_functions(): json_data = fn.get_configuration() - + json_data['Version'] = '$LATEST' result['Functions'].append(json_data) return 200, {}, json.dumps(result) @@ -183,7 +183,7 @@ class LambdaResponse(BaseResponse): fn = self.lambda_backend.publish_function(function_name) if fn: config = fn.get_configuration() - return 200, {}, json.dumps(config) + return 201, {}, json.dumps(config) else: return 404, {}, "{}" @@ -204,7 +204,10 @@ class LambdaResponse(BaseResponse): if fn: code = fn.get_code() - + if qualifier is None or qualifier == '$LATEST': + code['Configuration']['Version'] = '$LATEST' + if qualifier == '$LATEST': + code['Configuration']['FunctionArn'] += ':$LATEST' return 200, {}, json.dumps(code) else: return 404, {}, "{}" diff --git a/moto/awslambda/utils.py b/moto/awslambda/utils.py index 88146d34f..82027cb2f 100644 --- a/moto/awslambda/utils.py +++ b/moto/awslambda/utils.py @@ -3,8 +3,13 @@ from collections import namedtuple ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version']) -def make_function_arn(region, account, name, version='1'): - return 'arn:aws:lambda:{0}:{1}:function:{2}:{3}'.format(region, account, name, version) +def make_function_arn(region, account, name): + return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account, name) + + +def make_function_ver_arn(region, account, name, version='1'): + arn = make_function_arn(region, account, name) + return '{0}:{1}'.format(arn, version) def split_function_arn(arn): diff --git a/moto/backends.py b/moto/backends.py index 90cc803a7..6ea85093d 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -32,6 +32,7 @@ from moto.organizations import organizations_backends from moto.polly import polly_backends from moto.rds2 import rds2_backends from moto.redshift import redshift_backends +from moto.resourcegroups import resourcegroups_backends from moto.route53 import route53_backends from moto.s3 import s3_backends from moto.ses import ses_backends @@ -81,6 +82,7 @@ BACKENDS = { 'organizations': organizations_backends, 'polly': polly_backends, 'redshift': redshift_backends, + 'resource-groups': resourcegroups_backends, 'rds': rds2_backends, 's3': s3_backends, 's3bucket_path': s3_backends, diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 864e98a92..01e3113dd 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from datetime import datetime +from datetime import datetime, timedelta import json import yaml import uuid @@ -12,12 +12,156 @@ from .parsing import ResourceMap, OutputMap from .utils import ( generate_changeset_id, generate_stack_id, + generate_stackset_arn, + generate_stackset_id, yaml_tag_constructor, validate_template_cfn_lint, ) from .exceptions import ValidationError +class FakeStackSet(BaseModel): + + def __init__(self, stackset_id, name, template, region='us-east-1', + status='ACTIVE', description=None, parameters=None, tags=None, + admin_role='AWSCloudFormationStackSetAdministrationRole', + execution_role='AWSCloudFormationStackSetExecutionRole'): + self.id = stackset_id + self.arn = generate_stackset_arn(stackset_id, region) + self.name = name + self.template = template + self.description = description + self.parameters = parameters + self.tags = tags + self.admin_role = admin_role + self.execution_role = execution_role + self.status = status + self.instances = FakeStackInstances(parameters, self.id, self.name) + self.stack_instances = self.instances.stack_instances + self.operations = [] + + def _create_operation(self, operation_id, action, status, accounts=[], regions=[]): + operation = { + 'OperationId': str(operation_id), + 'Action': action, + 'Status': status, + 'CreationTimestamp': datetime.now(), + 'EndTimestamp': datetime.now() + timedelta(minutes=2), + 'Instances': [{account: region} for account in accounts for region in regions], + } + + self.operations += [operation] + return operation + + def get_operation(self, operation_id): + for operation in self.operations: + if operation_id == operation['OperationId']: + return operation + raise ValidationError(operation_id) + + def update_operation(self, operation_id, status): + operation = self.get_operation(operation_id) + operation['Status'] = status + return operation_id + + def delete(self): + self.status = 'DELETED' + + def update(self, template, description, parameters, tags, admin_role, + execution_role, accounts, regions, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.template = template if template else self.template + self.description = description if description is not None else self.description + self.parameters = parameters if parameters else self.parameters + self.tags = tags if tags else self.tags + self.admin_role = admin_role if admin_role else self.admin_role + self.execution_role = execution_role if execution_role else self.execution_role + + if accounts and regions: + self.update_instances(accounts, regions, self.parameters) + + operation = self._create_operation(operation_id=operation_id, + action='UPDATE', status='SUCCEEDED', accounts=accounts, + regions=regions) + return operation + + def create_stack_instances(self, accounts, regions, parameters, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + if not parameters: + parameters = self.parameters + + self.instances.create_instances(accounts, regions, parameters, operation_id) + self._create_operation(operation_id=operation_id, action='CREATE', + status='SUCCEEDED', accounts=accounts, regions=regions) + + def delete_stack_instances(self, accounts, regions, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.instances.delete(accounts, regions) + + operation = self._create_operation(operation_id=operation_id, action='DELETE', + status='SUCCEEDED', accounts=accounts, regions=regions) + return operation + + def update_instances(self, accounts, regions, parameters, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.instances.update(accounts, regions, parameters) + operation = self._create_operation(operation_id=operation_id, + action='UPDATE', status='SUCCEEDED', accounts=accounts, + regions=regions) + return operation + + +class FakeStackInstances(BaseModel): + def __init__(self, parameters, stackset_id, stackset_name): + self.parameters = parameters if parameters else {} + self.stackset_id = stackset_id + self.stack_name = "StackSet-{}".format(stackset_id) + self.stackset_name = stackset_name + self.stack_instances = [] + + def create_instances(self, accounts, regions, parameters, operation_id): + new_instances = [] + for region in regions: + for account in accounts: + instance = { + 'StackId': generate_stack_id(self.stack_name, region, account), + 'StackSetId': self.stackset_id, + 'Region': region, + 'Account': account, + 'Status': "CURRENT", + 'ParameterOverrides': parameters if parameters else [], + } + new_instances.append(instance) + self.stack_instances += new_instances + return new_instances + + def update(self, accounts, regions, parameters): + for account in accounts: + for region in regions: + instance = self.get_instance(account, region) + if parameters: + instance['ParameterOverrides'] = parameters + else: + instance['ParameterOverrides'] = [] + + def delete(self, accounts, regions): + for i, instance in enumerate(self.stack_instances): + if instance['Region'] in regions and instance['Account'] in accounts: + self.stack_instances.pop(i) + + def get_instance(self, account, region): + for i, instance in enumerate(self.stack_instances): + if instance['Region'] == region and instance['Account'] == account: + return self.stack_instances[i] + + class FakeStack(BaseModel): def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False): @@ -85,9 +229,9 @@ class FakeStack(BaseModel): def _parse_template(self): yaml.add_multi_constructor('', yaml_tag_constructor) try: - self.template_dict = yaml.load(self.template) + self.template_dict = yaml.load(self.template, Loader=yaml.Loader) except yaml.parser.ParserError: - self.template_dict = json.loads(self.template) + self.template_dict = json.loads(self.template, Loader=yaml.Loader) @property def stack_parameters(self): @@ -189,10 +333,72 @@ class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() + self.stacksets = OrderedDict() self.deleted_stacks = {} self.exports = OrderedDict() self.change_sets = OrderedDict() + def create_stack_set(self, name, template, parameters, tags=None, description=None, region='us-east-1', admin_role=None, execution_role=None): + stackset_id = generate_stackset_id(name) + new_stackset = FakeStackSet( + stackset_id=stackset_id, + name=name, + template=template, + parameters=parameters, + description=description, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + ) + self.stacksets[stackset_id] = new_stackset + return new_stackset + + def get_stack_set(self, name): + stacksets = self.stacksets.keys() + for stackset in stacksets: + if self.stacksets[stackset].name == name: + return self.stacksets[stackset] + raise ValidationError(name) + + def delete_stack_set(self, name): + stacksets = self.stacksets.keys() + for stackset in stacksets: + if self.stacksets[stackset].name == name: + self.stacksets[stackset].delete() + + def create_stack_instances(self, stackset_name, accounts, regions, parameters, operation_id=None): + stackset = self.get_stack_set(stackset_name) + + stackset.create_stack_instances( + accounts=accounts, + regions=regions, + parameters=parameters, + operation_id=operation_id, + ) + return stackset + + def update_stack_set(self, stackset_name, template=None, description=None, + parameters=None, tags=None, admin_role=None, execution_role=None, + accounts=None, regions=None, operation_id=None): + stackset = self.get_stack_set(stackset_name) + update = stackset.update( + template=template, + description=description, + parameters=parameters, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + accounts=accounts, + regions=regions, + operation_id=operation_id + ) + return update + + def delete_stack_instances(self, stackset_name, accounts, regions, operation_id=None): + stackset = self.get_stack_set(stackset_name) + stackset.delete_stack_instances(accounts, regions, operation_id) + return stackset + def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False): stack_id = generate_stack_id(name) new_stack = FakeStack( diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 0be68944b..f2e03bd81 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -12,7 +12,7 @@ from moto.batch import models as batch_models from moto.cloudwatch import models as cloudwatch_models from moto.cognitoidentity import models as cognitoidentity_models from moto.datapipeline import models as datapipeline_models -from moto.dynamodb import models as dynamodb_models +from moto.dynamodb2 import models as dynamodb2_models from moto.ec2 import models as ec2_models from moto.ecs import models as ecs_models from moto.elb import models as elb_models @@ -37,7 +37,7 @@ MODEL_MAP = { "AWS::Batch::JobDefinition": batch_models.JobDefinition, "AWS::Batch::JobQueue": batch_models.JobQueue, "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, - "AWS::DynamoDB::Table": dynamodb_models.Table, + "AWS::DynamoDB::Table": dynamodb2_models.Table, "AWS::Kinesis::Stream": kinesis_models.Stream, "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, "AWS::Lambda::Function": lambda_models.LambdaFunction, @@ -246,7 +246,8 @@ def resource_name_property_from_type(resource_type): def generate_resource_name(resource_type, stack_name, logical_id): - if resource_type == "AWS::ElasticLoadBalancingV2::TargetGroup": + if resource_type in ["AWS::ElasticLoadBalancingV2::TargetGroup", + "AWS::ElasticLoadBalancingV2::LoadBalancer"]: # Target group names need to be less than 32 characters, so when cloudformation creates a name for you # it makes sure to stay under that limit name_prefix = '{0}-{1}'.format(stack_name, logical_id) @@ -425,11 +426,18 @@ class ResourceMap(collections.Mapping): self.resolved_parameters[parameter_name] = parameter.get('Default') # Set any input parameters that were passed + self.no_echo_parameter_keys = [] for key, value in self.input_parameters.items(): if key in self.resolved_parameters: - value_type = parameter_slots[key].get('Type', 'String') + parameter_slot = parameter_slots[key] + + value_type = parameter_slot.get('Type', 'String') if value_type == 'CommaDelimitedList' or value_type.startswith("List"): value = value.split(',') + + if parameter_slot.get('NoEcho'): + self.no_echo_parameter_keys.append(key) + self.resolved_parameters[key] = value # Check if there are any non-default params that were not passed input diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 84805efaf..80970262f 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -342,6 +342,175 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE) return template.render(description=description) + def create_stack_set(self): + stackset_name = self._get_param('StackSetName') + stack_body = self._get_param('TemplateBody') + template_url = self._get_param('TemplateURL') + # role_arn = self._get_param('RoleARN') + parameters_list = self._get_list_prefix("Parameters.member") + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) + + # Copy-Pasta - Hack dict-comprehension + parameters = dict([ + (parameter['parameter_key'], parameter['parameter_value']) + for parameter + in parameters_list + ]) + if template_url: + stack_body = self._get_stack_from_s3_url(template_url) + + stackset = self.cloudformation_backend.create_stack_set( + name=stackset_name, + template=stack_body, + parameters=parameters, + tags=tags, + # role_arn=role_arn, + ) + if self.request_json: + return json.dumps({ + 'CreateStackSetResponse': { + 'CreateStackSetResult': { + 'StackSetId': stackset.stackset_id, + } + } + }) + else: + template = self.response_template(CREATE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def create_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + parameters = self._get_multi_param('ParameterOverrides.member') + self.cloudformation_backend.create_stack_instances(stackset_name, accounts, regions, parameters) + template = self.response_template(CREATE_STACK_INSTANCES_TEMPLATE) + return template.render() + + def delete_stack_set(self): + stackset_name = self._get_param('StackSetName') + self.cloudformation_backend.delete_stack_set(stackset_name) + template = self.response_template(DELETE_STACK_SET_RESPONSE_TEMPLATE) + return template.render() + + def delete_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + operation = self.cloudformation_backend.delete_stack_instances(stackset_name, accounts, regions) + + template = self.response_template(DELETE_STACK_INSTANCES_TEMPLATE) + return template.render(operation=operation) + + def describe_stack_set(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + + if not stackset.admin_role: + stackset.admin_role = 'arn:aws:iam::123456789012:role/AWSCloudFormationStackSetAdministrationRole' + if not stackset.execution_role: + stackset.execution_role = 'AWSCloudFormationStackSetExecutionRole' + + template = self.response_template(DESCRIBE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def describe_stack_instance(self): + stackset_name = self._get_param('StackSetName') + account = self._get_param('StackInstanceAccount') + region = self._get_param('StackInstanceRegion') + + instance = self.cloudformation_backend.get_stack_set(stackset_name).instances.get_instance(account, region) + template = self.response_template(DESCRIBE_STACK_INSTANCE_TEMPLATE) + rendered = template.render(instance=instance) + return rendered + + def list_stack_sets(self): + stacksets = self.cloudformation_backend.stacksets + template = self.response_template(LIST_STACK_SETS_TEMPLATE) + return template.render(stacksets=stacksets) + + def list_stack_instances(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + template = self.response_template(LIST_STACK_INSTANCES_TEMPLATE) + return template.render(stackset=stackset) + + def list_stack_set_operations(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + template = self.response_template(LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def stop_stack_set_operation(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + stackset.update_operation(operation_id, 'STOPPED') + template = self.response_template(STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE) + return template.render() + + def describe_stack_set_operation(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + operation = stackset.get_operation(operation_id) + template = self.response_template(DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE) + return template.render(stackset=stackset, operation=operation) + + def list_stack_set_operation_results(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + operation = stackset.get_operation(operation_id) + template = self.response_template(LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE) + return template.render(operation=operation) + + def update_stack_set(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + description = self._get_param('Description') + execution_role = self._get_param('ExecutionRoleName') + admin_role = self._get_param('AdministrationRoleARN') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + template_body = self._get_param('TemplateBody') + template_url = self._get_param('TemplateURL') + if template_url: + template_body = self._get_stack_from_s3_url(template_url) + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) + parameters_list = self._get_list_prefix("Parameters.member") + parameters = dict([ + (parameter['parameter_key'], parameter['parameter_value']) + for parameter + in parameters_list + ]) + operation = self.cloudformation_backend.update_stack_set( + stackset_name=stackset_name, + template=template_body, + description=description, + parameters=parameters, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + accounts=accounts, + regions=regions, + operation_id=operation_id + ) + + template = self.response_template(UPDATE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(operation=operation) + + def update_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + parameters = self._get_multi_param('ParameterOverrides.member') + operation = self.cloudformation_backend.get_stack_set(stackset_name).update_instances(accounts, regions, parameters) + template = self.response_template(UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE) + return template.render(operation=operation) + VALIDATE_STACK_RESPONSE_TEMPLATE = """ @@ -485,7 +654,11 @@ DESCRIBE_STACKS_TEMPLATE = """ {% for param_name, param_value in stack.stack_parameters.items() %} {{ param_name }} - {{ param_value }} + {% if param_name in stack.resource_map.no_echo_parameter_keys %} + **** + {% else %} + {{ param_value }} + {% endif %} {% endfor %} @@ -664,3 +837,236 @@ LIST_EXPORTS_RESPONSE = """ + + {{ stackset.stackset_id }} + + + f457258c-391d-41d1-861f-example + + +""" + +DESCRIBE_STACK_SET_RESPONSE_TEMPLATE = """ + + + + {{ stackset.arn }} + {{ stackset.execution_role }} + {{ stackset.admin_role }} + {{ stackset.id }} + {{ stackset.template }} + {{ stackset.name }} + + {% for param_name, param_value in stackset.parameters.items() %} + + {{ param_name }} + {{ param_value }} + + {% endfor %} + + + {% for tag_key, tag_value in stackset.tags.items() %} + + {{ tag_key }} + {{ tag_value }} + + {% endfor %} + + {{ stackset.status }} + + + + d8b64e11-5332-46e1-9603-example + +""" + +DELETE_STACK_SET_RESPONSE_TEMPLATE = """ + + + c35ec2d0-d69f-4c4d-9bd7-example + +""" + +CREATE_STACK_INSTANCES_TEMPLATE = """ + + 1459ad6d-63cc-4c96-a73e-example + + + 6b29f7e3-69be-4d32-b374-example + + +""" + +LIST_STACK_INSTANCES_TEMPLATE = """ + + + {% for instance in stackset.stack_instances %} + + {{ instance.StackId }} + {{ instance.StackSetId }} + {{ instance.Region }} + {{ instance.Account }} + {{ instance.Status }} + + {% endfor %} + + + + 83c27e73-b498-410f-993c-example + + +""" + +DELETE_STACK_INSTANCES_TEMPLATE = """ + + {{ operation.OperationId }} + + + e5325090-66f6-4ecd-a531-example + + +""" + +DESCRIBE_STACK_INSTANCE_TEMPLATE = """ + + + {{ instance.StackId }} + {{ instance.StackSetId }} + {% if instance.ParameterOverrides %} + + {% for override in instance.ParameterOverrides %} + {% if override['ParameterKey'] or override['ParameterValue'] %} + + {{ override.ParameterKey }} + false + {{ override.ParameterValue }} + + {% endif %} + {% endfor %} + + {% else %} + + {% endif %} + {{ instance.Region }} + {{ instance.Account }} + {{ instance.Status }} + + + + c6c7be10-0343-4319-8a25-example + + +""" + +LIST_STACK_SETS_TEMPLATE = """ + + + {% for key, value in stacksets.items() %} + + {{ value.name }} + {{ value.id }} + {{ value.status }} + + {% endfor %} + + + + 4dcacb73-841e-4ed8-b335-example + + +""" + +UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE = """ + + {{ operation }} + + + bdbf8e94-19b6-4ce4-af85-example + + +""" + +UPDATE_STACK_SET_RESPONSE_TEMPLATE = """ + + {{ operation.OperationId }} + + + adac907b-17e3-43e6-a254-example + + +""" + +LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE = """ + + + {% for operation in stackset.operations %} + + {{ operation.CreationTimestamp }} + {{ operation.OperationId }} + {{ operation.Action }} + {{ operation.EndTimestamp }} + {{ operation.Status }} + + {% endfor %} + + + + 65b9d9be-08bb-4a43-9a21-example + + +""" + +STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE = """ + + + 2188554a-07c6-4396-b2c5-example + +""" + +DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE = """ + + + {{ stackset.execution_role }} + arn:aws:iam::123456789012:role/{{ stackset.admin_role }} + {{ stackset.id }} + {{ operation.CreationTimestamp }} + {{ operation.OperationId }} + {{ operation.Action }} + + + + {{ operation.EndTimestamp }} + {{ operation.Status }} + + + + 2edc27b6-9ce2-486a-a192-example + + +""" + +LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE = """ + + + {% for instance in operation.Instances %} + {% for account, region in instance.items() %} + + + Function not found: arn:aws:lambda:us-west-2:123456789012:function:AWSCloudFormationStackSetAccountGate + SKIPPED + + {{ region }} + {{ account }} + {{ operation.Status }} + + {% endfor %} + {% endfor %} + + + + ac05a9ce-5f98-4197-a29b-example + + +""" diff --git a/moto/cloudformation/utils.py b/moto/cloudformation/utils.py index f963ce7c8..e4290ce1a 100644 --- a/moto/cloudformation/utils.py +++ b/moto/cloudformation/utils.py @@ -4,13 +4,14 @@ import six import random import yaml import os +import string from cfnlint import decode, core -def generate_stack_id(stack_name): +def generate_stack_id(stack_name, region="us-east-1", account="123456789"): random_id = uuid.uuid4() - return "arn:aws:cloudformation:us-east-1:123456789:stack/{0}/{1}".format(stack_name, random_id) + return "arn:aws:cloudformation:{}:{}:stack/{}/{}".format(region, account, stack_name, random_id) def generate_changeset_id(changeset_name, region_name): @@ -18,9 +19,18 @@ def generate_changeset_id(changeset_name, region_name): return 'arn:aws:cloudformation:{0}:123456789:changeSet/{1}/{2}'.format(region_name, changeset_name, random_id) +def generate_stackset_id(stackset_name): + random_id = uuid.uuid4() + return '{}:{}'.format(stackset_name, random_id) + + +def generate_stackset_arn(stackset_id, region_name): + return 'arn:aws:cloudformation:{}:123456789012:stackset/{}'.format(region_name, stackset_id) + + def random_suffix(): size = 12 - chars = list(range(10)) + ['A-Z'] + chars = list(range(10)) + list(string.ascii_uppercase) return ''.join(six.text_type(random.choice(chars)) for x in range(size)) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 00868f7b3..ef1377789 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -287,6 +287,18 @@ class CognitoIdpUser(BaseModel): return user_json + def update_attributes(self, new_attributes): + + def flatten_attrs(attrs): + return {attr['Name']: attr['Value'] for attr in attrs} + + def expand_attrs(attrs): + return [{'Name': k, 'Value': v} for k, v in attrs.items()] + + flat_attributes = flatten_attrs(self.attributes) + flat_attributes.update(flatten_attrs(new_attributes)) + self.attributes = expand_attrs(flat_attributes) + class CognitoIdpBackend(BaseBackend): @@ -426,6 +438,19 @@ class CognitoIdpBackend(BaseBackend): return identity_provider + def update_identity_provider(self, user_pool_id, name, extended_config): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + identity_provider = user_pool.identity_providers.get(name) + if not identity_provider: + raise ResourceNotFoundError(name) + + identity_provider.extended_config.update(extended_config) + + return identity_provider + def delete_identity_provider(self, user_pool_id, name): user_pool = self.user_pools.get(user_pool_id) if not user_pool: @@ -660,6 +685,17 @@ class CognitoIdpBackend(BaseBackend): else: raise NotAuthorizedError(access_token) + def admin_update_user_attributes(self, user_pool_id, username, attributes): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if username not in user_pool.users: + raise UserNotFoundError(username) + + user = user_pool.users[username] + user.update_attributes(attributes) + cognitoidp_backends = {} for region in boto.cognito.identity.regions(): diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 8b3941c21..e9e83695a 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -143,6 +143,14 @@ class CognitoIdpResponse(BaseResponse): "IdentityProvider": identity_provider.to_json(extended=True) }) + def update_identity_provider(self): + user_pool_id = self._get_param("UserPoolId") + name = self._get_param("ProviderName") + identity_provider = cognitoidp_backends[self.region].update_identity_provider(user_pool_id, name, self.parameters) + return json.dumps({ + "IdentityProvider": identity_provider.to_json(extended=True) + }) + def delete_identity_provider(self): user_pool_id = self._get_param("UserPoolId") name = self._get_param("ProviderName") @@ -344,6 +352,13 @@ class CognitoIdpResponse(BaseResponse): cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password) return "" + def admin_update_user_attributes(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + attributes = self._get_param("UserAttributes") + cognitoidp_backends[self.region].admin_update_user_attributes(user_pool_id, username, attributes) + return "" + class CognitoIdpJsonWebKeyResponse(BaseResponse): diff --git a/moto/core/responses.py b/moto/core/responses.py index 8fb247f75..9da36b865 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -152,11 +152,18 @@ class BaseResponse(_TemplateEnvironmentMixin): for key, value in flat.items(): querystring[key] = [value] elif self.body: - querystring.update(parse_qs(raw_body, keep_blank_values=True)) + try: + querystring.update(parse_qs(raw_body, keep_blank_values=True)) + except UnicodeEncodeError: + pass # ignore encoding errors, as the body may not contain a legitimate querystring if not querystring: querystring.update(headers) - querystring = _decode_dict(querystring) + try: + querystring = _decode_dict(querystring) + except UnicodeDecodeError: + pass # ignore decoding errors, as the body may not contain a legitimate querystring + self.uri = full_url self.path = urlparse(full_url).path self.querystring = querystring diff --git a/moto/core/utils.py b/moto/core/utils.py index 777a03752..ca670e871 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -280,7 +280,7 @@ def amzn_request_id(f): # Update request ID in XML try: - body = body.replace('{{ requestid }}', request_id) + body = re.sub(r'(?<=).*(?=<\/RequestId>)', request_id, body) except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) pass diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py new file mode 100644 index 000000000..9df973292 --- /dev/null +++ b/moto/dynamodb2/exceptions.py @@ -0,0 +1,2 @@ +class InvalidIndexNameError(ValueError): + pass diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 677bbfb07..bfbb654b4 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -13,6 +13,7 @@ from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError from .comparisons import get_comparison_func, get_filter_expression, Op +from .exceptions import InvalidIndexNameError class DynamoJsonEncoder(json.JSONEncoder): @@ -293,6 +294,19 @@ class Item(BaseModel): # TODO: implement other data types raise NotImplementedError( 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + elif action == 'DELETE': + if set(update_action['Value'].keys()) == set(['SS']): + existing = self.attrs.get(attribute_name, DynamoType({"SS": {}})) + new_set = set(existing.value).difference(set(new_value)) + self.attrs[attribute_name] = DynamoType({ + "SS": list(new_set) + }) + else: + raise NotImplementedError( + 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + else: + raise NotImplementedError( + '%s action not support for update_with_attribute_updates' % action) class StreamRecord(BaseModel): @@ -403,6 +417,25 @@ class Table(BaseModel): } self.set_stream_specification(streams) + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + params = {} + + if 'KeySchema' in properties: + params['schema'] = properties['KeySchema'] + if 'AttributeDefinitions' in properties: + params['attr'] = properties['AttributeDefinitions'] + if 'GlobalSecondaryIndexes' in properties: + params['global_indexes'] = properties['GlobalSecondaryIndexes'] + if 'ProvisionedThroughput' in properties: + params['throughput'] = properties['ProvisionedThroughput'] + if 'LocalSecondaryIndexes' in properties: + params['indexes'] = properties['LocalSecondaryIndexes'] + + table = dynamodb_backends[region_name].create_table(name=properties['TableName'], **params) + return table + def _generate_arn(self, name): return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name @@ -570,8 +603,9 @@ class Table(BaseModel): exclusive_start_key, scan_index_forward, projection_expression, index_name=None, filter_expression=None, **filter_kwargs): results = [] + if index_name: - all_indexes = (self.global_indexes or []) + (self.indexes or []) + all_indexes = self.all_indexes() indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) if index_name not in indexes_by_name: raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % ( @@ -586,24 +620,28 @@ class Table(BaseModel): raise ValueError('Missing Hash Key. KeySchema: %s' % index['KeySchema']) - possible_results = [] - for item in self.all_items(): - if not isinstance(item, Item): - continue - item_hash_key = item.attrs.get(index_hash_key['AttributeName']) - if item_hash_key and item_hash_key == hash_key: - possible_results.append(item) - else: - possible_results = [item for item in list(self.all_items()) if isinstance( - item, Item) and item.hash_key == hash_key] - - if index_name: try: index_range_key = [key for key in index[ 'KeySchema'] if key['KeyType'] == 'RANGE'][0] except IndexError: index_range_key = None + possible_results = [] + for item in self.all_items(): + if not isinstance(item, Item): + continue + item_hash_key = item.attrs.get(index_hash_key['AttributeName']) + if index_range_key is None: + if item_hash_key and item_hash_key == hash_key: + possible_results.append(item) + else: + item_range_key = item.attrs.get(index_range_key['AttributeName']) + if item_hash_key and item_hash_key == hash_key and item_range_key: + possible_results.append(item) + else: + possible_results = [item for item in list(self.all_items()) if isinstance( + item, Item) and item.hash_key == hash_key] + if range_comparison: if index_name and not index_range_key: raise ValueError( @@ -667,11 +705,39 @@ class Table(BaseModel): else: yield hash_set - def scan(self, filters, limit, exclusive_start_key, filter_expression=None): + def all_indexes(self): + return (self.global_indexes or []) + (self.indexes or []) + + def has_idx_items(self, index_name): + + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + idx = indexes_by_name[index_name] + idx_col_set = set([i['AttributeName'] for i in idx['KeySchema']]) + + for hash_set in self.items.values(): + if self.range_key_attr: + for item in hash_set.values(): + if idx_col_set.issubset(set(item.attrs)): + yield item + else: + if idx_col_set.issubset(set(hash_set.attrs)): + yield hash_set + + def scan(self, filters, limit, exclusive_start_key, filter_expression=None, index_name=None, projection_expression=None): results = [] scanned_count = 0 + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) - for item in self.all_items(): + if index_name: + if index_name not in indexes_by_name: + raise InvalidIndexNameError('The table does not have the specified index: %s' % index_name) + items = self.has_idx_items(index_name) + else: + items = self.all_items() + + for item in items: scanned_count += 1 passes_all_conditions = True for attribute_name, (comparison_operator, comparison_objs) in filters.items(): @@ -697,11 +763,19 @@ class Table(BaseModel): if passes_all_conditions: results.append(item) + if projection_expression: + expressions = [x.strip() for x in projection_expression.split(',')] + results = copy.deepcopy(results) + for result in results: + for attr in list(result.attrs): + if attr not in expressions: + result.attrs.pop(attr) + results, last_evaluated_key = self._trim_results(results, limit, - exclusive_start_key) + exclusive_start_key, index_name) return results, scanned_count, last_evaluated_key - def _trim_results(self, results, limit, exclusive_start_key): + def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None): if exclusive_start_key is not None: hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr)) range_key = exclusive_start_key.get(self.range_key_attr) @@ -721,6 +795,14 @@ class Table(BaseModel): if results[-1].range_key is not None: last_evaluated_key[self.range_key_attr] = results[-1].range_key + if scaned_index: + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + idx = indexes_by_name[scaned_index] + idx_col_list = [i['AttributeName'] for i in idx['KeySchema']] + for col in idx_col_list: + last_evaluated_key[col] = results[-1].attrs[col] + return results, last_evaluated_key def lookup(self, *args, **kwargs): @@ -888,7 +970,7 @@ class DynamoDBBackend(BaseBackend): return table.query(hash_key, range_comparison, range_values, limit, exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs) - def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): + def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name, projection_expression): table = self.tables.get(table_name) if not table: return None, None, None @@ -903,7 +985,9 @@ class DynamoDBBackend(BaseBackend): else: filter_expression = Op(None, None) # Will always eval to true - return table.scan(scan_filters, limit, exclusive_start_key, filter_expression) + projection_expression = ','.join([expr_names.get(attr, attr) for attr in projection_expression.replace(' ', '').split(',')]) + + return table.scan(scan_filters, limit, exclusive_start_key, filter_expression, index_name, projection_expression) def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, expected=None): diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 49095f09c..5dde432d5 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -5,6 +5,7 @@ import re from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id +from .exceptions import InvalidIndexNameError from .models import dynamodb_backends, dynamo_json_dump @@ -156,8 +157,16 @@ class DynamoHandler(BaseResponse): body = self.body # get the table name table_name = body['TableName'] - # get the throughput - throughput = body["ProvisionedThroughput"] + # check billing mode and get the throughput + if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST": + if "ProvisionedThroughput" in body.keys(): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, + 'ProvisionedThroughput cannot be specified \ + when BillingMode is PAY_PER_REQUEST') + throughput = None + else: # Provisioned (default billing mode) + throughput = body.get("ProvisionedThroughput") # getting the schema key_schema = body['KeySchema'] # getting attribute definition @@ -549,9 +558,10 @@ class DynamoHandler(BaseResponse): filter_expression = self.body.get('FilterExpression') expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) - + projection_expression = self.body.get('ProjectionExpression', '') exclusive_start_key = self.body.get('ExclusiveStartKey') limit = self.body.get("Limit") + index_name = self.body.get('IndexName') try: items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters, @@ -559,7 +569,12 @@ class DynamoHandler(BaseResponse): exclusive_start_key, filter_expression, expression_attribute_names, - expression_attribute_values) + expression_attribute_values, + index_name, + projection_expression) + except InvalidIndexNameError as err: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, str(err)) except ValueError as err: er = 'com.amazonaws.dynamodb.v20111205#ValidationError' return self.error(er, 'Bad Filter Expression: {0}'.format(err)) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index f747c9cd5..259e84bc3 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -58,6 +58,14 @@ class InvalidKeyPairDuplicateError(EC2ClientError): .format(key)) +class InvalidKeyPairFormatError(EC2ClientError): + + def __init__(self): + super(InvalidKeyPairFormatError, self).__init__( + "InvalidKeyPair.Format", + "Key is not in valid OpenSSH public key format") + + class InvalidVPCIdError(EC2ClientError): def __init__(self, vpc_id): @@ -420,3 +428,89 @@ class OperationNotPermitted(EC2ClientError): "The vpc CIDR block with association ID {} may not be disassociated. " "It is the primary IPv4 CIDR block of the VPC".format(association_id) ) + + +class InvalidAvailabilityZoneError(EC2ClientError): + + def __init__(self, availability_zone_value, valid_availability_zones): + super(InvalidAvailabilityZoneError, self).__init__( + "InvalidParameterValue", + "Value ({0}) for parameter availabilityZone is invalid. " + "Subnets can currently only be created in the following availability zones: {1}.".format(availability_zone_value, valid_availability_zones) + ) + + +class NetworkAclEntryAlreadyExistsError(EC2ClientError): + + def __init__(self, rule_number): + super(NetworkAclEntryAlreadyExistsError, self).__init__( + "NetworkAclEntryAlreadyExists", + "The network acl entry identified by {} already exists.".format(rule_number) + ) + + +class InvalidSubnetRangeError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidSubnetRangeError, self).__init__( + "InvalidSubnet.Range", + "The CIDR '{}' is invalid.".format(cidr_block) + ) + + +class InvalidCIDRBlockParameterError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidCIDRBlockParameterError, self).__init__( + "InvalidParameterValue", + "Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block) + ) + + +class InvalidDestinationCIDRBlockParameterError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidDestinationCIDRBlockParameterError, self).__init__( + "InvalidParameterValue", + "Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block) + ) + + +class InvalidSubnetConflictError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidSubnetConflictError, self).__init__( + "InvalidSubnet.Conflict", + "The CIDR '{}' conflicts with another subnet".format(cidr_block) + ) + + +class InvalidVPCRangeError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidVPCRangeError, self).__init__( + "InvalidVpc.Range", + "The CIDR '{}' is invalid.".format(cidr_block) + ) + + +# accept exception +class OperationNotPermitted2(EC2ClientError): + def __init__(self, client_region, pcx_id, acceptor_region): + super(OperationNotPermitted2, self).__init__( + "OperationNotPermitted", + "Incorrect region ({0}) specified for this request." + "VPC peering connection {1} must be accepted in region {2}".format(client_region, pcx_id, acceptor_region) + ) + + +# reject exception +class OperationNotPermitted3(EC2ClientError): + def __init__(self, client_region, pcx_id, acceptor_region): + super(OperationNotPermitted3, self).__init__( + "OperationNotPermitted", + "Incorrect region ({0}) specified for this request." + "VPC peering connection {1} must be accepted or rejected in region {2}".format(client_region, + pcx_id, + acceptor_region) + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index ef1425ea7..811283fe8 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -20,6 +20,7 @@ from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest from boto.ec2.launchspecification import LaunchSpecification + from moto.compat import OrderedDict from moto.core import BaseBackend from moto.core.models import Model, BaseModel @@ -35,14 +36,18 @@ from .exceptions import ( InvalidAMIIdError, InvalidAMIAttributeItemValueError, InvalidAssociationIdError, + InvalidAvailabilityZoneError, + InvalidCIDRBlockParameterError, InvalidCIDRSubnetError, InvalidCustomerGatewayIdError, + InvalidDestinationCIDRBlockParameterError, InvalidDHCPOptionsIdError, InvalidDomainError, InvalidID, InvalidInstanceIdError, InvalidInternetGatewayIdError, InvalidKeyPairDuplicateError, + InvalidKeyPairFormatError, InvalidKeyPairNameError, InvalidNetworkAclIdError, InvalidNetworkAttachmentIdError, @@ -56,20 +61,26 @@ from .exceptions import ( InvalidSecurityGroupDuplicateError, InvalidSecurityGroupNotFoundError, InvalidSnapshotIdError, + InvalidSubnetConflictError, InvalidSubnetIdError, + InvalidSubnetRangeError, InvalidVolumeIdError, InvalidVolumeAttachmentError, InvalidVpcCidrBlockAssociationIdError, InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionStateTransitionError, InvalidVPCIdError, + InvalidVPCRangeError, InvalidVpnGatewayIdError, InvalidVpnConnectionIdError, MalformedAMIIdError, MalformedDHCPOptionsIdError, MissingParameterError, MotoNotImplementedError, + NetworkAclEntryAlreadyExistsError, OperationNotPermitted, + OperationNotPermitted2, + OperationNotPermitted3, ResourceAlreadyAssociatedError, RulesPerSecurityGroupLimitExceededError, TagLimitExceeded) @@ -118,6 +129,8 @@ from .utils import ( random_customer_gateway_id, is_tag_filter, tag_filter_matches, + rsa_public_key_parse, + rsa_public_key_fingerprint ) INSTANCE_TYPES = json.load( @@ -134,6 +147,8 @@ def utc_date_and_time(): def validate_resource_ids(resource_ids): + if not resource_ids: + raise MissingParameterError(parameter='resourceIdSet') for resource_id in resource_ids: if not is_valid_resource_id(resource_id): raise InvalidID(resource_id=resource_id) @@ -402,7 +417,7 @@ class Instance(TaggedEC2Resource, BotoInstance): warnings.warn('Could not find AMI with image-id:{0}, ' 'in the near future this will ' 'cause an error.\n' - 'Use ec2_backend.describe_images() to' + 'Use ec2_backend.describe_images() to ' 'find suitable image for your test'.format(image_id), PendingDeprecationWarning) @@ -906,7 +921,14 @@ class KeyPairBackend(object): def import_key_pair(self, key_name, public_key_material): if key_name in self.keypairs: raise InvalidKeyPairDuplicateError(key_name) - keypair = KeyPair(key_name, **random_key_pair()) + + try: + rsa_public_key = rsa_public_key_parse(public_key_material) + except ValueError: + raise InvalidKeyPairFormatError() + + fingerprint = rsa_public_key_fingerprint(rsa_public_key) + keypair = KeyPair(key_name, material=public_key_material, fingerprint=fingerprint) self.keypairs[key_name] = keypair return keypair @@ -1267,17 +1289,107 @@ class Region(object): class Zone(object): - def __init__(self, name, region_name): + def __init__(self, name, region_name, zone_id): self.name = name self.region_name = region_name + self.zone_id = zone_id class RegionsAndZonesBackend(object): regions = [Region(ri.name, ri.endpoint) for ri in boto.ec2.regions()] - zones = dict( - (region, [Zone(region + c, region) for c in 'abc']) - for region in [r.name for r in regions]) + zones = { + 'ap-south-1': [ + Zone(region_name="ap-south-1", name="ap-south-1a", zone_id="aps1-az1"), + Zone(region_name="ap-south-1", name="ap-south-1b", zone_id="aps1-az3") + ], + 'eu-west-3': [ + Zone(region_name="eu-west-3", name="eu-west-3a", zone_id="euw3-az1"), + Zone(region_name="eu-west-3", name="eu-west-3b", zone_id="euw3-az2"), + Zone(region_name="eu-west-3", name="eu-west-3c", zone_id="euw3-az3") + ], + 'eu-north-1': [ + Zone(region_name="eu-north-1", name="eu-north-1a", zone_id="eun1-az1"), + Zone(region_name="eu-north-1", name="eu-north-1b", zone_id="eun1-az2"), + Zone(region_name="eu-north-1", name="eu-north-1c", zone_id="eun1-az3") + ], + 'eu-west-2': [ + Zone(region_name="eu-west-2", name="eu-west-2a", zone_id="euw2-az2"), + Zone(region_name="eu-west-2", name="eu-west-2b", zone_id="euw2-az3"), + Zone(region_name="eu-west-2", name="eu-west-2c", zone_id="euw2-az1") + ], + 'eu-west-1': [ + Zone(region_name="eu-west-1", name="eu-west-1a", zone_id="euw1-az3"), + Zone(region_name="eu-west-1", name="eu-west-1b", zone_id="euw1-az1"), + Zone(region_name="eu-west-1", name="eu-west-1c", zone_id="euw1-az2") + ], + 'ap-northeast-3': [ + Zone(region_name="ap-northeast-3", name="ap-northeast-2a", zone_id="apne3-az1") + ], + 'ap-northeast-2': [ + Zone(region_name="ap-northeast-2", name="ap-northeast-2a", zone_id="apne2-az1"), + Zone(region_name="ap-northeast-2", name="ap-northeast-2c", zone_id="apne2-az3") + ], + 'ap-northeast-1': [ + Zone(region_name="ap-northeast-1", name="ap-northeast-1a", zone_id="apne1-az4"), + Zone(region_name="ap-northeast-1", name="ap-northeast-1c", zone_id="apne1-az1"), + Zone(region_name="ap-northeast-1", name="ap-northeast-1d", zone_id="apne1-az2") + ], + 'sa-east-1': [ + Zone(region_name="sa-east-1", name="sa-east-1a", zone_id="sae1-az1"), + Zone(region_name="sa-east-1", name="sa-east-1c", zone_id="sae1-az3") + ], + 'ca-central-1': [ + Zone(region_name="ca-central-1", name="ca-central-1a", zone_id="cac1-az1"), + Zone(region_name="ca-central-1", name="ca-central-1b", zone_id="cac1-az2") + ], + 'ap-southeast-1': [ + Zone(region_name="ap-southeast-1", name="ap-southeast-1a", zone_id="apse1-az1"), + Zone(region_name="ap-southeast-1", name="ap-southeast-1b", zone_id="apse1-az2"), + Zone(region_name="ap-southeast-1", name="ap-southeast-1c", zone_id="apse1-az3") + ], + 'ap-southeast-2': [ + Zone(region_name="ap-southeast-2", name="ap-southeast-2a", zone_id="apse2-az1"), + Zone(region_name="ap-southeast-2", name="ap-southeast-2b", zone_id="apse2-az3"), + Zone(region_name="ap-southeast-2", name="ap-southeast-2c", zone_id="apse2-az2") + ], + 'eu-central-1': [ + Zone(region_name="eu-central-1", name="eu-central-1a", zone_id="euc1-az2"), + Zone(region_name="eu-central-1", name="eu-central-1b", zone_id="euc1-az3"), + Zone(region_name="eu-central-1", name="eu-central-1c", zone_id="euc1-az1") + ], + 'us-east-1': [ + Zone(region_name="us-east-1", name="us-east-1a", zone_id="use1-az6"), + Zone(region_name="us-east-1", name="us-east-1b", zone_id="use1-az1"), + Zone(region_name="us-east-1", name="us-east-1c", zone_id="use1-az2"), + Zone(region_name="us-east-1", name="us-east-1d", zone_id="use1-az4"), + Zone(region_name="us-east-1", name="us-east-1e", zone_id="use1-az3"), + Zone(region_name="us-east-1", name="us-east-1f", zone_id="use1-az5") + ], + 'us-east-2': [ + Zone(region_name="us-east-2", name="us-east-2a", zone_id="use2-az1"), + Zone(region_name="us-east-2", name="us-east-2b", zone_id="use2-az2"), + Zone(region_name="us-east-2", name="us-east-2c", zone_id="use2-az3") + ], + 'us-west-1': [ + Zone(region_name="us-west-1", name="us-west-1a", zone_id="usw1-az3"), + Zone(region_name="us-west-1", name="us-west-1b", zone_id="usw1-az1") + ], + 'us-west-2': [ + Zone(region_name="us-west-2", name="us-west-2a", zone_id="usw2-az2"), + Zone(region_name="us-west-2", name="us-west-2b", zone_id="usw2-az1"), + Zone(region_name="us-west-2", name="us-west-2c", zone_id="usw2-az3") + ], + 'cn-north-1': [ + Zone(region_name="cn-north-1", name="cn-north-1a", zone_id="cnn1-az1"), + Zone(region_name="cn-north-1", name="cn-north-1b", zone_id="cnn1-az2") + ], + 'us-gov-west-1': [ + Zone(region_name="us-gov-west-1", name="us-gov-west-1a", zone_id="usgw1-az1"), + Zone(region_name="us-gov-west-1", name="us-gov-west-1b", zone_id="usgw1-az2"), + Zone(region_name="us-gov-west-1", name="us-gov-west-1c", zone_id="usgw1-az3") + ] + } def describe_regions(self, region_names=[]): if len(region_names) == 0: @@ -1877,6 +1989,8 @@ class Snapshot(TaggedEC2Resource): return str(self.encrypted).lower() elif filter_name == 'status': return self.status + elif filter_name == 'owner-id': + return self.owner_id else: return super(Snapshot, self).get_filter_value( filter_name, 'DescribeSnapshots') @@ -2118,22 +2232,28 @@ class VPC(TaggedEC2Resource): class VPCBackend(object): - __refs__ = defaultdict(list) + vpc_refs = defaultdict(set) def __init__(self): self.vpcs = {} - self.__refs__[self.__class__].append(weakref.ref(self)) + self.vpc_refs[self.__class__].add(weakref.ref(self)) super(VPCBackend, self).__init__() @classmethod - def get_instances(cls): - for inst_ref in cls.__refs__[cls]: + def get_vpc_refs(cls): + for inst_ref in cls.vpc_refs[cls]: inst = inst_ref() if inst is not None: yield inst def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): vpc_id = random_vpc_id() + try: + vpc_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False) + except ValueError: + raise InvalidCIDRBlockParameterError(cidr_block) + if vpc_cidr_block.prefixlen < 16 or vpc_cidr_block.prefixlen > 28: + raise InvalidVPCRangeError(cidr_block) vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) self.vpcs[vpc_id] = vpc @@ -2157,7 +2277,7 @@ class VPCBackend(object): # get vpc by vpc id and aws region def get_cross_vpc(self, vpc_id, peer_region): - for vpcs in self.get_instances(): + for vpcs in self.get_vpc_refs(): if vpcs.region_name == peer_region: match_vpc = vpcs.get_vpc(vpc_id) return match_vpc @@ -2278,15 +2398,31 @@ class VPCPeeringConnection(TaggedEC2Resource): class VPCPeeringConnectionBackend(object): + # for cross region vpc reference + vpc_pcx_refs = defaultdict(set) + def __init__(self): self.vpc_pcxs = {} + self.vpc_pcx_refs[self.__class__].add(weakref.ref(self)) super(VPCPeeringConnectionBackend, self).__init__() + @classmethod + def get_vpc_pcx_refs(cls): + for inst_ref in cls.vpc_pcx_refs[cls]: + inst = inst_ref() + if inst is not None: + yield inst + def create_vpc_peering_connection(self, vpc, peer_vpc): vpc_pcx_id = random_vpc_peering_connection_id() vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc) vpc_pcx._status.pending() self.vpc_pcxs[vpc_pcx_id] = vpc_pcx + # insert cross region peering info + if vpc.ec2_backend.region_name != peer_vpc.ec2_backend.region_name: + for vpc_pcx_cx in peer_vpc.ec2_backend.get_vpc_pcx_refs(): + if vpc_pcx_cx.region_name == peer_vpc.ec2_backend.region_name: + vpc_pcx_cx.vpc_pcxs[vpc_pcx_id] = vpc_pcx return vpc_pcx def get_all_vpc_peering_connections(self): @@ -2304,6 +2440,11 @@ class VPCPeeringConnectionBackend(object): def accept_vpc_peering_connection(self, vpc_pcx_id): vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) + # if cross region need accepter from another region + pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name + pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name + if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: + raise OperationNotPermitted2(self.region_name, vpc_pcx.id, pcx_acp_region) if vpc_pcx._status.code != 'pending-acceptance': raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) vpc_pcx._status.accept() @@ -2311,6 +2452,11 @@ class VPCPeeringConnectionBackend(object): def reject_vpc_peering_connection(self, vpc_pcx_id): vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) + # if cross region need accepter from another region + pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name + pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name + if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: + raise OperationNotPermitted3(self.region_name, vpc_pcx.id, pcx_acp_region) if vpc_pcx._status.code != 'pending-acceptance': raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) vpc_pcx._status.reject() @@ -2319,15 +2465,18 @@ class VPCPeeringConnectionBackend(object): class Subnet(TaggedEC2Resource): def __init__(self, ec2_backend, subnet_id, vpc_id, cidr_block, availability_zone, default_for_az, - map_public_ip_on_launch): + map_public_ip_on_launch, owner_id=111122223333, assign_ipv6_address_on_creation=False): self.ec2_backend = ec2_backend self.id = subnet_id self.vpc_id = vpc_id self.cidr_block = cidr_block - self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block)) + self.cidr = ipaddress.IPv4Network(six.text_type(self.cidr_block), strict=False) self._availability_zone = availability_zone self.default_for_az = default_for_az self.map_public_ip_on_launch = map_public_ip_on_launch + self.owner_id = owner_id + self.assign_ipv6_address_on_creation = assign_ipv6_address_on_creation + self.ipv6_cidr_block_associations = [] # Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8) self._subnet_ip_generator = self.cidr.hosts() @@ -2357,7 +2506,7 @@ class Subnet(TaggedEC2Resource): @property def availability_zone(self): - return self._availability_zone + return self._availability_zone.name @property def physical_resource_id(self): @@ -2454,16 +2603,35 @@ class SubnetBackend(object): return subnets[subnet_id] raise InvalidSubnetIdError(subnet_id) - def create_subnet(self, vpc_id, cidr_block, availability_zone): + def create_subnet(self, vpc_id, cidr_block, availability_zone, context=None): subnet_id = random_subnet_id() - self.get_vpc(vpc_id) # Validate VPC exists + vpc = self.get_vpc(vpc_id) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's + vpc_cidr_block = ipaddress.IPv4Network(six.text_type(vpc.cidr_block), strict=False) + try: + subnet_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False) + except ValueError: + raise InvalidCIDRBlockParameterError(cidr_block) + if not (vpc_cidr_block.network_address <= subnet_cidr_block.network_address and + vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address): + raise InvalidSubnetRangeError(cidr_block) + + for subnet in self.get_all_subnets(filters={'vpc-id': vpc_id}): + if subnet.cidr.overlaps(subnet_cidr_block): + raise InvalidSubnetConflictError(cidr_block) # if this is the first subnet for an availability zone, # consider it the default default_for_az = str(availability_zone not in self.subnets).lower() map_public_ip_on_launch = default_for_az - subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone, - default_for_az, map_public_ip_on_launch) + if availability_zone is None: + availability_zone = 'us-east-1a' + try: + availability_zone_data = next(zone for zones in RegionsAndZonesBackend.zones.values() for zone in zones if zone.name == availability_zone) + except StopIteration: + raise InvalidAvailabilityZoneError(availability_zone, ", ".join([zone.name for zones in RegionsAndZonesBackend.zones.values() for zone in zones])) + subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone_data, + default_for_az, map_public_ip_on_launch, + owner_id=context.get_current_user() if context else '111122223333', assign_ipv6_address_on_creation=False) # AWS associates a new subnet with the default Network ACL self.associate_default_network_acl_with_subnet(subnet_id, vpc_id) @@ -2491,11 +2659,12 @@ class SubnetBackend(object): return subnets.pop(subnet_id, None) raise InvalidSubnetIdError(subnet_id) - def modify_subnet_attribute(self, subnet_id, map_public_ip): + def modify_subnet_attribute(self, subnet_id, attr_name, attr_value): subnet = self.get_subnet(subnet_id) - if map_public_ip not in ('true', 'false'): - raise InvalidParameterValueError(map_public_ip) - subnet.map_public_ip_on_launch = map_public_ip + if attr_name in ('map_public_ip_on_launch', 'assign_ipv6_address_on_creation'): + setattr(subnet, attr_name, attr_value) + else: + raise InvalidParameterValueError(attr_name) class SubnetRouteTableAssociation(object): @@ -2716,6 +2885,11 @@ class RouteBackend(object): elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id: gateway = self.get_internet_gateway(gateway_id) + try: + ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False) + except ValueError: + raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) + route = Route(route_table, destination_cidr_block, local=local, gateway=gateway, instance=self.get_instance( @@ -3593,10 +3767,10 @@ class NetworkAclBackend(object): def add_default_entries(self, network_acl_id): default_acl_entries = [ - {'rule_number': 100, 'rule_action': 'allow', 'egress': 'true'}, - {'rule_number': 32767, 'rule_action': 'deny', 'egress': 'true'}, - {'rule_number': 100, 'rule_action': 'allow', 'egress': 'false'}, - {'rule_number': 32767, 'rule_action': 'deny', 'egress': 'false'} + {'rule_number': "100", 'rule_action': 'allow', 'egress': 'true'}, + {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'true'}, + {'rule_number': "100", 'rule_action': 'allow', 'egress': 'false'}, + {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'false'} ] for entry in default_acl_entries: self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1', @@ -3627,12 +3801,14 @@ class NetworkAclBackend(object): icmp_code, icmp_type, port_range_from, port_range_to): + network_acl = self.get_network_acl(network_acl_id) + if any(entry.egress == egress and entry.rule_number == rule_number for entry in network_acl.network_acl_entries): + raise NetworkAclEntryAlreadyExistsError(rule_number) network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number, protocol, rule_action, egress, cidr_block, icmp_code, icmp_type, port_range_from, port_range_to) - network_acl = self.get_network_acl(network_acl_id) network_acl.network_acl_entries.append(network_acl_entry) return network_acl_entry diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 49f1face7..a5359daca 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -450,6 +450,7 @@ EC2_DESCRIBE_INSTANCES = """{{ subnet.vpc_id }} {{ subnet.cidr_block }} 251 - {{ subnet.availability_zone }} - - {% for tag in subnet.get_tags() %} - - {{ tag.resource_id }} - {{ tag.resource_type }} - {{ tag.key }} - {{ tag.value }} - - {% endfor %} - + {{ subnet._availability_zone.name }} + {{ subnet._availability_zone.zone_id }} + {{ subnet.default_for_az }} + {{ subnet.map_public_ip_on_launch }} + {{ subnet.owner_id }} + {{ subnet.assign_ipv6_address_on_creation }} + {{ subnet.ipv6_cidr_block_associations }} + arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }} """ @@ -80,19 +84,26 @@ DESCRIBE_SUBNETS_RESPONSE = """ {{ subnet.vpc_id }} {{ subnet.cidr_block }} 251 - {{ subnet.availability_zone }} + {{ subnet._availability_zone.name }} + {{ subnet._availability_zone.zone_id }} {{ subnet.default_for_az }} {{ subnet.map_public_ip_on_launch }} - - {% for tag in subnet.get_tags() %} - - {{ tag.resource_id }} - {{ tag.resource_type }} - {{ tag.key }} - {{ tag.value }} - - {% endfor %} - + {{ subnet.owner_id }} + {{ subnet.assign_ipv6_address_on_creation }} + {{ subnet.ipv6_cidr_block_associations }} + arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }} + {% if subnet.get_tags() %} + + {% for tag in subnet.get_tags() %} + + {{ tag.resource_id }} + {{ tag.resource_type }} + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + + {% endif %} {% endfor %} diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 49d752893..68bae72da 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -74,30 +74,35 @@ CREATE_VPC_PEERING_CONNECTION_RESPONSE = """ """ DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - {% for vpc_pcx in vpc_pcxs %} - - {{ vpc_pcx.id }} - - 777788889999 - {{ vpc_pcx.vpc.id }} - {{ vpc_pcx.vpc.cidr_block }} - - - 123456789012 - {{ vpc_pcx.peer_vpc.id }} - - - {{ vpc_pcx._status.code }} - {{ vpc_pcx._status.message }} - - 2014-02-17T16:00:50.000Z - - - {% endfor %} - + +7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + {% for vpc_pcx in vpc_pcxs %} + + {{ vpc_pcx.id }} + + 777788889999 + {{ vpc_pcx.vpc.id }} + {{ vpc_pcx.vpc.cidr_block }} + + + 123456789012 + {{ vpc_pcx.peer_vpc.id }} + {{ vpc_pcx.peer_vpc.cidr_block }} + + false + true + false + + + + {{ vpc_pcx._status.code }} + {{ vpc_pcx._status.message }} + + + + {% endfor %} + """ @@ -109,19 +114,24 @@ DELETE_VPC_PEERING_CONNECTION_RESPONSE = """ """ ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc_pcx.id }} - 123456789012 + 777788889999 {{ vpc_pcx.vpc.id }} {{ vpc_pcx.vpc.cidr_block }} - 777788889999 + 123456789012 {{ vpc_pcx.peer_vpc.id }} {{ vpc_pcx.peer_vpc.cidr_block }} + + false + false + false + {{ vpc_pcx._status.code }} diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index f5c9b8512..a998f18ef 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -1,10 +1,19 @@ from __future__ import unicode_literals +import base64 +import hashlib import fnmatch import random import re import six +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.asymmetric import rsa +import sshpubkeys.exceptions +from sshpubkeys.keys import SSHKey + + EC2_RESOURCE_TO_PREFIX = { 'customer-gateway': 'cgw', 'dhcp-options': 'dopt', @@ -453,23 +462,19 @@ def simple_aws_filter_to_re(filter_string): def random_key_pair(): - def random_hex(): - return chr(random.choice(list(range(48, 58)) + list(range(97, 102)))) + private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=default_backend()) + private_key_material = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()) + public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key()) - def random_fingerprint(): - return ':'.join([random_hex() + random_hex() for i in range(20)]) - - def random_material(): - return ''.join([ - chr(random.choice(list(range(65, 91)) + list(range(48, 58)) + - list(range(97, 102)))) - for i in range(1000) - ]) - material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \ - "-----END RSA PRIVATE KEY-----" return { - 'fingerprint': random_fingerprint(), - 'material': material + 'fingerprint': public_key_fingerprint, + 'material': private_key_material.decode('ascii') } @@ -535,3 +540,28 @@ def generate_instance_identity_document(instance): } return document + + +def rsa_public_key_parse(key_material): + try: + if not isinstance(key_material, six.binary_type): + key_material = key_material.encode("ascii") + + decoded_key = base64.b64decode(key_material).decode("ascii") + public_key = SSHKey(decoded_key) + except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError): + raise ValueError('bad key') + + if not public_key.rsa: + raise ValueError('bad key') + + return public_key.rsa + + +def rsa_public_key_fingerprint(rsa_public_key): + key_data = rsa_public_key.public_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PublicFormat.SubjectPublicKeyInfo) + fingerprint_hex = hashlib.md5(key_data).hexdigest() + fingerprint = re.sub(r'([a-f0-9]{2})(?!$)', r'\1:', fingerprint_hex) + return fingerprint diff --git a/moto/ecr/models.py b/moto/ecr/models.py index 4849ffbfa..b03f25dee 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -1,7 +1,9 @@ from __future__ import unicode_literals import hashlib +import re from copy import copy +from datetime import datetime from random import random from botocore.exceptions import ParamValidationError @@ -105,7 +107,7 @@ class Image(BaseObject): self.repository = repository self.registry_id = registry_id self.image_digest = digest - self.image_pushed_at = None + self.image_pushed_at = str(datetime.utcnow().isoformat()) def _create_digest(self): image_contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) @@ -119,6 +121,12 @@ class Image(BaseObject): def get_image_manifest(self): return self.image_manifest + def remove_tag(self, tag): + if tag is not None and tag in self.image_tags: + self.image_tags.remove(tag) + if self.image_tags: + self.image_tag = self.image_tags[-1] + def update_tag(self, tag): self.image_tag = tag if tag not in self.image_tags and tag is not None: @@ -151,7 +159,7 @@ class Image(BaseObject): response_object['repositoryName'] = self.repository response_object['registryId'] = self.registry_id response_object['imageSizeInBytes'] = self.image_size_in_bytes - response_object['imagePushedAt'] = '2017-05-09' + response_object['imagePushedAt'] = self.image_pushed_at return {k: v for k, v in response_object.items() if v is not None and v != []} @property @@ -165,6 +173,13 @@ class Image(BaseObject): response_object['registryId'] = self.registry_id return {k: v for k, v in response_object.items() if v is not None and v != [None]} + @property + def response_batch_delete_image(self): + response_object = {} + response_object['imageDigest'] = self.get_image_digest() + response_object['imageTag'] = self.image_tag + return {k: v for k, v in response_object.items() if v is not None and v != [None]} + class ECRBackend(BaseBackend): @@ -310,6 +325,106 @@ class ECRBackend(BaseBackend): return response + def batch_delete_image(self, repository_name, registry_id=None, image_ids=None): + if repository_name in self.repositories: + repository = self.repositories[repository_name] + else: + raise RepositoryNotFoundException( + repository_name, registry_id or DEFAULT_REGISTRY_ID + ) + + if not image_ids: + raise ParamValidationError( + msg='Missing required parameter in input: "imageIds"' + ) + + response = { + "imageIds": [], + "failures": [] + } + + for image_id in image_ids: + image_found = False + + # Is request missing both digest and tag? + if "imageDigest" not in image_id and "imageTag" not in image_id: + response["failures"].append( + { + "imageId": {}, + "failureCode": "MissingDigestAndTag", + "failureReason": "Invalid request parameters: both tag and digest cannot be null", + } + ) + continue + + # If we have a digest, is it valid? + if "imageDigest" in image_id: + pattern = re.compile("^[0-9a-zA-Z_+\.-]+:[0-9a-fA-F]{64}") + if not pattern.match(image_id.get("imageDigest")): + response["failures"].append( + { + "imageId": { + "imageDigest": image_id.get("imageDigest", "null") + }, + "failureCode": "InvalidImageDigest", + "failureReason": "Invalid request parameters: image digest should satisfy the regex '[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+'", + } + ) + continue + + for num, image in enumerate(repository.images): + + # Search by matching both digest and tag + if "imageDigest" in image_id and "imageTag" in image_id: + if ( + image_id["imageDigest"] == image.get_image_digest() and + image_id["imageTag"] in image.image_tags + ): + image_found = True + for image_tag in reversed(image.image_tags): + repository.images[num].image_tag = image_tag + response["imageIds"].append( + image.response_batch_delete_image + ) + repository.images[num].remove_tag(image_tag) + del repository.images[num] + + # Search by matching digest + elif "imageDigest" in image_id and image.get_image_digest() == image_id["imageDigest"]: + image_found = True + for image_tag in reversed(image.image_tags): + repository.images[num].image_tag = image_tag + response["imageIds"].append(image.response_batch_delete_image) + repository.images[num].remove_tag(image_tag) + del repository.images[num] + + # Search by matching tag + elif "imageTag" in image_id and image_id["imageTag"] in image.image_tags: + image_found = True + repository.images[num].image_tag = image_id["imageTag"] + response["imageIds"].append(image.response_batch_delete_image) + if len(image.image_tags) > 1: + repository.images[num].remove_tag(image_id["imageTag"]) + else: + repository.images.remove(image) + + if not image_found: + failure_response = { + "imageId": {}, + "failureCode": "ImageNotFound", + "failureReason": "Requested image not found", + } + + if "imageDigest" in image_id: + failure_response["imageId"]["imageDigest"] = image_id.get("imageDigest", "null") + + if "imageTag" in image_id: + failure_response["imageId"]["imageTag"] = image_id.get("imageTag", "null") + + response["failures"].append(failure_response) + + return response + ecr_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index af237769f..f758176ad 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -84,9 +84,12 @@ class ECRResponse(BaseResponse): 'ECR.batch_check_layer_availability is not yet implemented') def batch_delete_image(self): - if self.is_not_dryrun('BatchDeleteImage'): - raise NotImplementedError( - 'ECR.batch_delete_image is not yet implemented') + repository_str = self._get_param('repositoryName') + registry_id = self._get_param('registryId') + image_ids = self._get_param('imageIds') + + response = self.ecr_backend.batch_delete_image(repository_str, registry_id, image_ids) + return json.dumps(response) def batch_get_image(self): repository_str = self._get_param('repositoryName') diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 4a6737ceb..a314c7776 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -94,6 +94,12 @@ class Cluster(BaseObject): # no-op when nothing changed between old and new resources return original_resource + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == 'Arn': + return self.arn + raise UnformattedGetAttTemplateException() + class TaskDefinition(BaseObject): @@ -271,6 +277,12 @@ class Service(BaseObject): else: return ecs_backend.update_service(cluster_name, service_name, task_definition, desired_count) + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == 'Name': + return self.name + raise UnformattedGetAttTemplateException() + class ContainerInstance(BaseObject): @@ -358,6 +370,20 @@ class ContainerInstance(BaseObject): return formatted_attr +class ClusterFailure(BaseObject): + def __init__(self, reason, cluster_name): + self.reason = reason + self.arn = "arn:aws:ecs:us-east-1:012345678910:cluster/{0}".format( + cluster_name) + + @property + def response_object(self): + response_object = self.gen_response_object() + response_object['reason'] = self.reason + response_object['arn'] = self.arn + return response_object + + class ContainerInstanceFailure(BaseObject): def __init__(self, reason, container_instance_id): @@ -419,6 +445,7 @@ class EC2ContainerServiceBackend(BaseBackend): def describe_clusters(self, list_clusters_name=None): list_clusters = [] + failures = [] if list_clusters_name is None: if 'default' in self.clusters: list_clusters.append(self.clusters['default'].response_object) @@ -429,9 +456,8 @@ class EC2ContainerServiceBackend(BaseBackend): list_clusters.append( self.clusters[cluster_name].response_object) else: - raise Exception( - "{0} is not a cluster".format(cluster_name)) - return list_clusters + failures.append(ClusterFailure('MISSING', cluster_name)) + return list_clusters, failures def delete_cluster(self, cluster_str): cluster_name = cluster_str.split('/')[-1] @@ -673,12 +699,15 @@ class EC2ContainerServiceBackend(BaseBackend): return service - def list_services(self, cluster_str): + def list_services(self, cluster_str, scheduling_strategy=None): cluster_name = cluster_str.split('/')[-1] service_arns = [] for key, value in self.services.items(): if cluster_name + ':' in key: - service_arns.append(self.services[key].arn) + service = self.services[key] + if scheduling_strategy is None or service.scheduling_strategy == scheduling_strategy: + service_arns.append(service.arn) + return sorted(service_arns) def describe_services(self, cluster_str, service_names_or_arns): diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index e0bfefc02..92b769fad 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -45,10 +45,10 @@ class EC2ContainerServiceResponse(BaseResponse): def describe_clusters(self): list_clusters_name = self._get_param('clusters') - clusters = self.ecs_backend.describe_clusters(list_clusters_name) + clusters, failures = self.ecs_backend.describe_clusters(list_clusters_name) return json.dumps({ 'clusters': clusters, - 'failures': [] + 'failures': [cluster.response_object for cluster in failures] }) def delete_cluster(self): @@ -163,7 +163,8 @@ class EC2ContainerServiceResponse(BaseResponse): def list_services(self): cluster_str = self._get_param('cluster') - service_arns = self.ecs_backend.list_services(cluster_str) + scheduling_strategy = self._get_param('schedulingStrategy') + service_arns = self.ecs_backend.list_services(cluster_str, scheduling_strategy) return json.dumps({ 'serviceArns': service_arns # , diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 0bf9649d7..11dcbcb21 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -131,7 +131,7 @@ class InvalidActionTypeError(ELBClientError): def __init__(self, invalid_name, index): super(InvalidActionTypeError, self).__init__( "ValidationError", - "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward]" % (invalid_name, index) + "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward, redirect]" % (invalid_name, index) ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 3925fa95d..8d98f187d 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -204,8 +204,20 @@ class FakeListener(BaseModel): # transform default actions to confirm with the rest of the code and XML templates if "DefaultActions" in properties: default_actions = [] - for action in properties['DefaultActions']: - default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']}) + for i, action in enumerate(properties['DefaultActions']): + action_type = action['Type'] + if action_type == 'forward': + default_actions.append({'type': action_type, 'target_group_arn': action['TargetGroupArn']}) + elif action_type == 'redirect': + redirect_action = {'type': action_type, } + for redirect_config_key, redirect_config_value in action['RedirectConfig'].items(): + # need to match the output of _get_list_prefix + if redirect_config_key == 'StatusCode': + redirect_config_key = 'status_code' + redirect_action['redirect_config._' + redirect_config_key.lower()] = redirect_config_value + default_actions.append(redirect_action) + else: + raise InvalidActionTypeError(action_type, i + 1) else: default_actions = None @@ -417,11 +429,15 @@ class ELBv2Backend(BaseBackend): for i, action in enumerate(actions): index = i + 1 action_type = action['type'] - if action_type not in ['forward']: + if action_type == 'forward': + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' @@ -483,10 +499,18 @@ class ELBv2Backend(BaseBackend): arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) balancer.listeners[listener.arn] = listener - for action in default_actions: - if action['target_group_arn'] in self.target_groups.keys(): - target_group = self.target_groups[action['target_group_arn']] - target_group.load_balancer_arns.append(load_balancer_arn) + for i, action in enumerate(default_actions): + action_type = action['type'] + if action_type == 'forward': + if action['target_group_arn'] in self.target_groups.keys(): + target_group = self.target_groups[action['target_group_arn']] + target_group.load_balancer_arns.append(load_balancer_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: + raise InvalidActionTypeError(action_type, i + 1) + return listener def describe_load_balancers(self, arns, names): @@ -649,11 +673,15 @@ class ELBv2Backend(BaseBackend): for i, action in enumerate(actions): index = i + 1 action_type = action['type'] - if action_type not in ['forward']: + if action_type == 'forward': + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' @@ -873,7 +901,7 @@ class ELBv2Backend(BaseBackend): # Its already validated in responses.py listener.ssl_policy = ssl_policy - if default_actions is not None: + if default_actions is not None and default_actions != []: # Is currently not validated listener.default_actions = default_actions diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 1814f1273..3ca53240b 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -704,7 +704,11 @@ CREATE_RULE_TEMPLATE = """ + {% if action["type"] == "forward" %} {{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + {{ action["redirect_config"] }} + {% endif %} {% endfor %} @@ -772,7 +776,15 @@ CREATE_LISTENER_TEMPLATE = """{{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -877,7 +889,15 @@ DESCRIBE_RULES_TEMPLATE = """ + {% if action["type"] == "forward" %} {{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -970,7 +990,15 @@ DESCRIBE_LISTENERS_TEMPLATE = """{{ action["target_group_arn"] }}m + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -1399,7 +1427,15 @@ MODIFY_LISTENER_TEMPLATE = """{{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} diff --git a/moto/emr/models.py b/moto/emr/models.py index 6b7147e3f..4b591acb1 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -97,7 +97,8 @@ class FakeCluster(BaseModel): visible_to_all_users='false', release_label=None, requested_ami_version=None, - running_ami_version=None): + running_ami_version=None, + custom_ami_id=None): self.id = cluster_id or random_cluster_id() emr_backend.clusters[self.id] = self self.emr_backend = emr_backend @@ -162,6 +163,7 @@ class FakeCluster(BaseModel): self.release_label = release_label self.requested_ami_version = requested_ami_version self.running_ami_version = running_ami_version + self.custom_ami_id = custom_ami_id self.role = job_flow_role or 'EMRJobflowDefault' self.service_role = service_role diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 933e0177b..c807b5f54 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -267,6 +267,18 @@ class ElasticMapReduceResponse(BaseResponse): else: kwargs['running_ami_version'] = '1.0.0' + custom_ami_id = self._get_param('CustomAmiId') + if custom_ami_id: + kwargs['custom_ami_id'] = custom_ami_id + if release_label and release_label < 'emr-5.7.0': + message = 'Custom AMI is not allowed' + raise EmrError(error_type='ValidationException', + message=message, template='error_json') + elif ami_version: + message = 'Custom AMI is not supported in this version of EMR' + raise EmrError(error_type='ValidationException', + message=message, template='error_json') + cluster = self.backend.run_job_flow(**kwargs) applications = self._get_list_prefix('Applications.member') @@ -375,6 +387,9 @@ DESCRIBE_CLUSTER_TEMPLATE = """ 2: + raise MalformedPolicyDocument("Actions/Condition can contain only one colon.") + + vendor_pattern = re.compile(r'[^a-zA-Z0-9\-.]') + if action_parts[0] != "*" and vendor_pattern.search(action_parts[0]): + raise MalformedPolicyDocument("Vendor {vendor} is not valid".format(vendor=action_parts[0])) + + def _validate_resources_for_formats(self): + self._validate_resource_like_for_formats("Resource") + + def _validate_not_resources_for_formats(self): + self._validate_resource_like_for_formats("NotResource") + + def _validate_resource_like_for_formats(self, key): + for statement in self._statements: + if key in statement: + if isinstance(statement[key], string_types): + self._validate_resource_format(statement[key]) + else: + for resource in sorted(statement[key], reverse=True): + self._validate_resource_format(resource) + if self._resource_error == "": + IAMPolicyDocumentValidator._legacy_parse_resource_like(statement, key) + + def _validate_resource_format(self, resource): + if resource != "*": + resource_partitions = resource.partition(":") + + if resource_partitions[1] == "": + self._resource_error = 'Resource {resource} must be in ARN format or "*".'.format(resource=resource) + return + + resource_partitions = resource_partitions[2].partition(":") + if resource_partitions[0] != "aws": + remaining_resource_parts = resource_partitions[2].split(":") + + arn1 = remaining_resource_parts[0] if remaining_resource_parts[0] != "" or len(remaining_resource_parts) > 1 else "*" + arn2 = remaining_resource_parts[1] if len(remaining_resource_parts) > 1 else "*" + arn3 = remaining_resource_parts[2] if len(remaining_resource_parts) > 2 else "*" + arn4 = ":".join(remaining_resource_parts[3:]) if len(remaining_resource_parts) > 3 else "*" + self._resource_error = 'Partition "{partition}" is not valid for resource "arn:{partition}:{arn1}:{arn2}:{arn3}:{arn4}".'.format( + partition=resource_partitions[0], + arn1=arn1, + arn2=arn2, + arn3=arn3, + arn4=arn4 + ) + return + + if resource_partitions[1] != ":": + self._resource_error = "Resource vendor must be fully qualified and cannot contain regexes." + return + + resource_partitions = resource_partitions[2].partition(":") + + service = resource_partitions[0] + + if service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() and not resource_partitions[2].startswith(":"): + self._resource_error = SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[service].format(resource=resource) + return + + resource_partitions = resource_partitions[2].partition(":") + resource_partitions = resource_partitions[2].partition(":") + + if service in VALID_RESOURCE_PATH_STARTING_VALUES.keys(): + valid_start = False + for valid_starting_value in VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]: + if resource_partitions[2].startswith(valid_starting_value): + valid_start = True + break + if not valid_start: + self._resource_error = VALID_RESOURCE_PATH_STARTING_VALUES[service]["error_message"].format( + values=", ".join(VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]) + ) + + def _perform_first_legacy_parsing(self): + """This method excludes legacy parsing resources, since that have to be done later.""" + for statement in self._statements: + self._legacy_parse_statement(statement) + + @staticmethod + def _legacy_parse_statement(statement): + assert statement["Effect"] in VALID_EFFECTS # case-sensitive matching + if "Condition" in statement: + for condition_key, condition_value in statement["Condition"].items(): + IAMPolicyDocumentValidator._legacy_parse_condition(condition_key, condition_value) + + @staticmethod + def _legacy_parse_resource_like(statement, key): + if isinstance(statement[key], string_types): + if statement[key] != "*": + assert statement[key].count(":") >= 5 or "::" not in statement[key] + assert statement[key].split(":")[2] != "" + else: # list + for resource in statement[key]: + if resource != "*": + assert resource.count(":") >= 5 or "::" not in resource + assert resource[2] != "" + + @staticmethod + def _legacy_parse_condition(condition_key, condition_value): + stripped_condition_key = IAMPolicyDocumentValidator._strip_condition_key(condition_key) + + if stripped_condition_key.startswith("Date"): + for condition_element_key, condition_element_value in condition_value.items(): + if isinstance(condition_element_value, string_types): + IAMPolicyDocumentValidator._legacy_parse_date_condition_value(condition_element_value) + else: # it has to be a list + for date_condition_value in condition_element_value: + IAMPolicyDocumentValidator._legacy_parse_date_condition_value(date_condition_value) + + @staticmethod + def _legacy_parse_date_condition_value(date_condition_value): + if "t" in date_condition_value.lower() or "-" in date_condition_value: + IAMPolicyDocumentValidator._validate_iso_8601_datetime(date_condition_value.lower()) + else: # timestamp + assert 0 <= int(date_condition_value) <= 9223372036854775807 + + @staticmethod + def _validate_iso_8601_datetime(datetime): + datetime_parts = datetime.partition("t") + negative_year = datetime_parts[0].startswith("-") + date_parts = datetime_parts[0][1:].split("-") if negative_year else datetime_parts[0].split("-") + year = "-" + date_parts[0] if negative_year else date_parts[0] + assert -292275054 <= int(year) <= 292278993 + if len(date_parts) > 1: + month = date_parts[1] + assert 1 <= int(month) <= 12 + if len(date_parts) > 2: + day = date_parts[2] + assert 1 <= int(day) <= 31 + assert len(date_parts) < 4 + + time_parts = datetime_parts[2].split(":") + if time_parts[0] != "": + hours = time_parts[0] + assert 0 <= int(hours) <= 23 + if len(time_parts) > 1: + minutes = time_parts[1] + assert 0 <= int(minutes) <= 59 + if len(time_parts) > 2: + if "z" in time_parts[2]: + seconds_with_decimal_fraction = time_parts[2].partition("z")[0] + assert time_parts[2].partition("z")[2] == "" + elif "+" in time_parts[2]: + seconds_with_decimal_fraction = time_parts[2].partition("+")[0] + time_zone_data = time_parts[2].partition("+")[2].partition(":") + time_zone_hours = time_zone_data[0] + assert len(time_zone_hours) == 2 + assert 0 <= int(time_zone_hours) <= 23 + if time_zone_data[1] == ":": + time_zone_minutes = time_zone_data[2] + assert len(time_zone_minutes) == 2 + assert 0 <= int(time_zone_minutes) <= 59 + else: + seconds_with_decimal_fraction = time_parts[2] + seconds_with_decimal_fraction_partition = seconds_with_decimal_fraction.partition(".") + seconds = seconds_with_decimal_fraction_partition[0] + assert 0 <= int(seconds) <= 59 + if seconds_with_decimal_fraction_partition[1] == ".": + decimal_seconds = seconds_with_decimal_fraction_partition[2] + assert 0 <= int(decimal_seconds) <= 999999999 diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 5b19c9cdc..05624101a 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -175,9 +175,11 @@ class IamResponse(BaseResponse): path = self._get_param('Path') assume_role_policy_document = self._get_param( 'AssumeRolePolicyDocument') + permissions_boundary = self._get_param( + 'PermissionsBoundary') role = iam_backend.create_role( - role_name, assume_role_policy_document, path) + role_name, assume_role_policy_document, path, permissions_boundary) template = self.response_template(CREATE_ROLE_TEMPLATE) return template.render(role=role) @@ -440,6 +442,18 @@ class IamResponse(BaseResponse): template = self.response_template(LIST_USERS_TEMPLATE) return template.render(action='List', users=users) + def update_user(self): + user_name = self._get_param('UserName') + new_path = self._get_param('NewPath') + new_user_name = self._get_param('NewUserName') + iam_backend.update_user(user_name, new_path, new_user_name) + if new_user_name: + user = iam_backend.get_user(new_user_name) + else: + user = iam_backend.get_user(user_name) + template = self.response_template(USER_TEMPLATE) + return template.render(action='Update', user=user) + def create_login_profile(self): user_name = self._get_param('UserName') password = self._get_param('Password') @@ -804,12 +818,12 @@ CREATE_POLICY_TEMPLATE = """ {{ policy.arn }} {{ policy.attachment_count }} - {{ policy.create_datetime.isoformat() }} + {{ policy.created_iso_8601 }} {{ policy.default_version_id }} {{ policy.path }} {{ policy.id }} {{ policy.name }} - {{ policy.update_datetime.isoformat() }} + {{ policy.updated_iso_8601 }} @@ -827,8 +841,8 @@ GET_POLICY_TEMPLATE = """ {{ policy.path }} {{ policy.arn }} {{ policy.attachment_count }} - {{ policy.create_datetime.isoformat() }} - {{ policy.update_datetime.isoformat() }} + {{ policy.created_iso_8601 }} + {{ policy.updated_iso_8601 }} @@ -915,12 +929,12 @@ LIST_POLICIES_TEMPLATE = """ {{ policy.arn }} {{ policy.attachment_count }} - {{ policy.create_datetime.isoformat() }} + {{ policy.created_iso_8601 }} {{ policy.default_version_id }} {{ policy.path }} {{ policy.id }} {{ policy.name }} - {{ policy.update_datetime.isoformat() }} + {{ policy.updated_iso_8601 }} {% endfor %} @@ -944,7 +958,7 @@ CREATE_INSTANCE_PROFILE_TEMPLATE = """ {{ group.name }} {{ group.id }} {{ group.arn }} - {{ group.create_date }} + {{ group.created_iso_8601 }} @@ -1276,7 +1302,7 @@ GET_GROUP_TEMPLATE = """ {{ group.name }} {{ group.id }} {{ group.arn }} - {{ group.create_date }} + {{ group.created_iso_8601 }} {% for user in group.users %} @@ -1483,7 +1509,7 @@ LIST_ACCESS_KEYS_TEMPLATE = """ {{ user_name }} {{ key.access_key_id }} {{ key.status }} - {{ key.create_date }} + {{ key.created_iso_8601 }} {% endfor %} @@ -1551,7 +1577,7 @@ LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE = """{{ role.arn }} {{ role.name }} {{ role.assume_policy_document }} - {{ role.create_date }} + {{ role.created_iso_8601 }} {{ role.id }} {% endfor %} @@ -1559,7 +1585,7 @@ LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE = """{{ profile.name }} {{ profile.path }} {{ profile.arn }} - {{ profile.create_date }} + {{ profile.created_iso_8601 }} {% endfor %} @@ -1678,7 +1704,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ group.name }} {{ group.path }} {{ group.arn }} - {{ group.create_date }} + {{ group.created_iso_8601 }} {% for policy in group.policies %} @@ -1728,7 +1754,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ role.arn }} {{ role.name }} {{ role.assume_role_policy_document }} - {{ role.create_date }} + {{ role.created_iso_8601 }} {{ role.id }} {% endfor %} @@ -1736,7 +1762,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ profile.name }} {{ profile.path }} {{ profile.arn }} - {{ profile.create_date }} + {{ profile.created_iso_8601 }} {% endfor %} @@ -1744,7 +1770,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ role.arn }} {{ role.name }} {{ role.assume_role_policy_document }} - {{ role.create_date }} + {{ role.created_iso_8601 }} {{ role.id }} {% endfor %} @@ -1762,15 +1788,15 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ policy_version.document }} {{ policy_version.is_default }} {{ policy_version.version_id }} - {{ policy_version.create_datetime }} + {{ policy_version.created_iso_8601 }} {% endfor %} {{ policy.arn }} 1 - {{ policy.create_datetime }} + {{ policy.created_iso_8601 }} true - {{ policy.update_datetime }} + {{ policy.updated_iso_8601 }} {% endfor %} diff --git a/moto/iot/models.py b/moto/iot/models.py index b493f6b8d..960787101 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -96,7 +96,7 @@ class FakeThingGroup(BaseModel): class FakeCertificate(BaseModel): - def __init__(self, certificate_pem, status, region_name): + def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None): m = hashlib.sha256() m.update(str(uuid.uuid4()).encode('utf-8')) self.certificate_id = m.hexdigest() @@ -109,12 +109,18 @@ class FakeCertificate(BaseModel): self.transfer_data = {} self.creation_date = time.time() self.last_modified_date = self.creation_date + self.ca_certificate_id = None + self.ca_certificate_pem = ca_certificate_pem + if ca_certificate_pem: + m.update(str(uuid.uuid4()).encode('utf-8')) + self.ca_certificate_id = m.hexdigest() def to_dict(self): return { 'certificateArn': self.arn, 'certificateId': self.certificate_id, + 'caCertificateId': self.ca_certificate_id, 'status': self.status, 'creationDate': self.creation_date } @@ -410,6 +416,12 @@ class IoTBackend(BaseBackend): def list_certificates(self): return self.certificates.values() + def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status): + certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status, + self.region_name, ca_certificate_pem) + self.certificates[certificate.certificate_id] = certificate + return certificate + def update_certificate(self, certificate_id, new_status): cert = self.describe_certificate(certificate_id) # TODO: validate new_status diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 214576f52..3821c1c79 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -183,6 +183,20 @@ class IoTResponse(BaseResponse): # TODO: implement pagination in the future return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) + def register_certificate(self): + certificate_pem = self._get_param("certificatePem") + ca_certificate_pem = self._get_param("caCertificatePem") + set_as_active = self._get_bool_param("setAsActive") + status = self._get_param("status") + + cert = self.iot_backend.register_certificate( + certificate_pem=certificate_pem, + ca_certificate_pem=ca_certificate_pem, + set_as_active=set_as_active, + status=status + ) + return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn)) + def update_certificate(self): certificate_id = self._get_param("certificateId") new_status = self._get_param("newStatus") diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index d9a47ea87..e7a389981 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -116,22 +116,19 @@ class Stream(BaseModel): def __init__(self, stream_name, shard_count, region): self.stream_name = stream_name self.shard_count = shard_count + self.creation_datetime = datetime.datetime.now() self.region = region self.account_number = "123456789012" self.shards = {} self.tags = {} + self.status = "ACTIVE" - if six.PY3: - izip_longest = itertools.zip_longest - else: - izip_longest = itertools.izip_longest + step = 2**128 // shard_count + hash_ranges = itertools.chain(map(lambda i: (i, i * step, (i + 1) * step), + range(shard_count - 1)), + [(shard_count - 1, (shard_count - 1) * step, 2**128)]) + for index, start, end in hash_ranges: - for index, start, end in izip_longest(range(shard_count), - range(0, 2**128, 2 ** - 128 // shard_count), - range(2**128 // shard_count, 2 ** - 128, 2**128 // shard_count), - fillvalue=2**128): shard = Shard(index, start, end) self.shards[shard.shard_id] = shard @@ -183,12 +180,23 @@ class Stream(BaseModel): "StreamDescription": { "StreamARN": self.arn, "StreamName": self.stream_name, - "StreamStatus": "ACTIVE", + "StreamStatus": self.status, "HasMoreShards": False, "Shards": [shard.to_json() for shard in self.shards.values()], } } + def to_json_summary(self): + return { + "StreamDescriptionSummary": { + "StreamARN": self.arn, + "StreamName": self.stream_name, + "StreamStatus": self.status, + "StreamCreationTimestamp": six.text_type(self.creation_datetime), + "OpenShardCount": self.shard_count, + } + } + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -309,6 +317,9 @@ class KinesisBackend(BaseBackend): else: raise StreamNotFoundError(stream_name) + def describe_stream_summary(self, stream_name): + return self.describe_stream(stream_name) + def list_streams(self): return self.streams.values() diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 72b2af4ce..3a81bd9f4 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -33,6 +33,11 @@ class KinesisResponse(BaseResponse): stream = self.kinesis_backend.describe_stream(stream_name) return json.dumps(stream.to_json()) + def describe_stream_summary(self): + stream_name = self.parameters.get('StreamName') + stream = self.kinesis_backend.describe_stream_summary(stream_name) + return json.dumps(stream.to_json_summary()) + def list_streams(self): streams = self.kinesis_backend.list_streams() stream_names = [stream.stream_name for stream in streams] diff --git a/moto/kinesis/utils.py b/moto/kinesis/utils.py index 337728f02..0c3edbb5a 100644 --- a/moto/kinesis/utils.py +++ b/moto/kinesis/utils.py @@ -1,8 +1,19 @@ +import sys import base64 from .exceptions import InvalidArgumentError +if sys.version_info[0] == 2: + encode_method = base64.encodestring + decode_method = base64.decodestring +elif sys.version_info[0] == 3: + encode_method = base64.encodebytes + decode_method = base64.decodebytes +else: + raise Exception("Python version is not supported") + + def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number, at_timestamp): if shard_iterator_type == "AT_SEQUENCE_NUMBER": @@ -22,7 +33,7 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting def compose_shard_iterator(stream_name, shard, last_sequence_id): - return base64.encodestring( + return encode_method( "{0}:{1}:{2}".format( stream_name, shard.shard_id, @@ -32,4 +43,4 @@ def compose_shard_iterator(stream_name, shard, last_sequence_id): def decompose_shard_iterator(shard_iterator): - return base64.decodestring(shard_iterator.encode("utf-8")).decode("utf-8").split(":") + return decode_method(shard_iterator.encode("utf-8")).decode("utf-8").split(":") diff --git a/moto/kms/exceptions.py b/moto/kms/exceptions.py new file mode 100644 index 000000000..70edd3dcd --- /dev/null +++ b/moto/kms/exceptions.py @@ -0,0 +1,36 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class NotFoundException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(NotFoundException, self).__init__( + "NotFoundException", message) + + +class ValidationException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ValidationException, self).__init__( + "ValidationException", message) + + +class AlreadyExistsException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(AlreadyExistsException, self).__init__( + "AlreadyExistsException", message) + + +class NotAuthorizedException(JsonRESTError): + code = 400 + + def __init__(self): + super(NotAuthorizedException, self).__init__( + "NotAuthorizedException", None) + + self.description = '{"__type":"NotAuthorizedException"}' diff --git a/moto/kms/models.py b/moto/kms/models.py index 9fbb2b587..2d6245ad2 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -1,8 +1,9 @@ from __future__ import unicode_literals +import os import boto.kms from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_without_milliseconds +from moto.core.utils import iso_8601_datetime_without_milliseconds, unix_time from .utils import generate_key_id from collections import defaultdict from datetime import datetime, timedelta @@ -36,7 +37,7 @@ class Key(BaseModel): "KeyMetadata": { "AWSAccountId": self.account_id, "Arn": self.arn, - "CreationDate": datetime.strftime(datetime.utcnow(), "%s"), + "CreationDate": "%d" % unix_time(), "Description": self.description, "Enabled": self.enabled, "KeyId": self.id, @@ -159,27 +160,38 @@ class KmsBackend(BaseBackend): return self.keys[self.get_key_id(key_id)].policy def disable_key(self, key_id): - if key_id in self.keys: - self.keys[key_id].enabled = False - self.keys[key_id].key_state = 'Disabled' + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'Disabled' def enable_key(self, key_id): - if key_id in self.keys: - self.keys[key_id].enabled = True - self.keys[key_id].key_state = 'Enabled' + self.keys[key_id].enabled = True + self.keys[key_id].key_state = 'Enabled' def cancel_key_deletion(self, key_id): - if key_id in self.keys: - self.keys[key_id].key_state = 'Disabled' - self.keys[key_id].deletion_date = None + self.keys[key_id].key_state = 'Disabled' + self.keys[key_id].deletion_date = None def schedule_key_deletion(self, key_id, pending_window_in_days): - if key_id in self.keys: - if 7 <= pending_window_in_days <= 30: - self.keys[key_id].enabled = False - self.keys[key_id].key_state = 'PendingDeletion' - self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days) - return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date) + if 7 <= pending_window_in_days <= 30: + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'PendingDeletion' + self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days) + return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date) + + def generate_data_key(self, key_id, encryption_context, number_of_bytes, key_spec, grant_tokens): + key = self.keys[self.get_key_id(key_id)] + + if key_spec: + if key_spec == 'AES_128': + bytes = 16 + else: + bytes = 32 + else: + bytes = number_of_bytes + + plaintext = os.urandom(bytes) + + return plaintext, key.arn kms_backends = {} diff --git a/moto/kms/responses.py b/moto/kms/responses.py index 2674f765c..92195ed6b 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -5,11 +5,9 @@ import json import re import six -from boto.exception import JSONResponseError -from boto.kms.exceptions import AlreadyExistsException, NotFoundException - from moto.core.responses import BaseResponse from .models import kms_backends +from .exceptions import NotFoundException, ValidationException, AlreadyExistsException, NotAuthorizedException reserved_aliases = [ 'alias/aws/ebs', @@ -88,36 +86,28 @@ class KmsResponse(BaseResponse): def create_alias(self): alias_name = self.parameters['AliasName'] target_key_id = self.parameters['TargetKeyId'] - region = self.region if not alias_name.startswith('alias/'): - raise JSONResponseError(400, 'Bad Request', - body={'message': 'Invalid identifier', '__type': 'ValidationException'}) + raise ValidationException('Invalid identifier') if alias_name in reserved_aliases: - raise JSONResponseError(400, 'Bad Request', body={ - '__type': 'NotAuthorizedException'}) + raise NotAuthorizedException() if ':' in alias_name: - raise JSONResponseError(400, 'Bad Request', body={ - 'message': '{alias_name} contains invalid characters for an alias'.format(**locals()), - '__type': 'ValidationException'}) + raise ValidationException('{alias_name} contains invalid characters for an alias'.format(alias_name=alias_name)) if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name): - raise JSONResponseError(400, 'Bad Request', body={ - 'message': "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$" - .format(**locals()), - '__type': 'ValidationException'}) + raise ValidationException("1 validation error detected: Value '{alias_name}' at 'aliasName' " + "failed to satisfy constraint: Member must satisfy regular " + "expression pattern: ^[a-zA-Z0-9:/_-]+$" + .format(alias_name=alias_name)) if self.kms_backend.alias_exists(target_key_id): - raise JSONResponseError(400, 'Bad Request', body={ - 'message': 'Aliases must refer to keys. Not aliases', - '__type': 'ValidationException'}) + raise ValidationException('Aliases must refer to keys. Not aliases') if self.kms_backend.alias_exists(alias_name): - raise AlreadyExistsException(400, 'Bad Request', body={ - 'message': 'An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} already exists' - .format(**locals()), '__type': 'AlreadyExistsException'}) + raise AlreadyExistsException('An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} ' + 'already exists'.format(region=self.region, alias_name=alias_name)) self.kms_backend.add_alias(target_key_id, alias_name) @@ -125,16 +115,13 @@ class KmsResponse(BaseResponse): def delete_alias(self): alias_name = self.parameters['AliasName'] - region = self.region if not alias_name.startswith('alias/'): - raise JSONResponseError(400, 'Bad Request', - body={'message': 'Invalid identifier', '__type': 'ValidationException'}) + raise ValidationException('Invalid identifier') if not self.kms_backend.alias_exists(alias_name): - raise NotFoundException(400, 'Bad Request', body={ - 'message': 'Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(**locals()), - '__type': 'NotFoundException'}) + raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:' + '{alias_name} is not found.'.format(region=self.region, alias_name=alias_name)) self.kms_backend.delete_alias(alias_name) @@ -172,9 +159,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.enable_key_rotation(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) @@ -184,9 +170,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.disable_key_rotation(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) def get_key_rotation_status(self): @@ -195,9 +180,8 @@ class KmsResponse(BaseResponse): try: rotation_enabled = self.kms_backend.get_key_rotation_status(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps({'KeyRotationEnabled': rotation_enabled}) def put_key_policy(self): @@ -210,9 +194,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.put_key_policy(key_id, policy) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) @@ -225,9 +208,8 @@ class KmsResponse(BaseResponse): try: return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)}) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) def list_key_policies(self): key_id = self.parameters.get('KeyId') @@ -235,9 +217,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.describe_key(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps({'Truncated': False, 'PolicyNames': ['default']}) @@ -249,11 +230,17 @@ class KmsResponse(BaseResponse): value = self.parameters.get("Plaintext") if isinstance(value, six.text_type): value = value.encode('utf-8') - return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8")}) + return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'}) def decrypt(self): + # TODO refuse decode if EncryptionContext is not the same as when it was encrypted / generated + value = self.parameters.get("CiphertextBlob") - return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) + try: + return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) + except UnicodeDecodeError: + # Generate data key will produce random bytes which when decrypted is still returned as base64 + return json.dumps({"Plaintext": value}) def disable_key(self): key_id = self.parameters.get('KeyId') @@ -261,9 +248,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.disable_key(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) def enable_key(self): @@ -272,9 +258,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.enable_key(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) def cancel_key_deletion(self): @@ -283,9 +268,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.cancel_key_deletion(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps({'KeyId': key_id}) def schedule_key_deletion(self): @@ -301,19 +285,62 @@ class KmsResponse(BaseResponse): 'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days) }) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) + + def generate_data_key(self): + key_id = self.parameters.get('KeyId') + encryption_context = self.parameters.get('EncryptionContext') + number_of_bytes = self.parameters.get('NumberOfBytes') + key_spec = self.parameters.get('KeySpec') + grant_tokens = self.parameters.get('GrantTokens') + + # Param validation + if key_id.startswith('alias'): + if self.kms_backend.get_key_id_from_alias(key_id) is None: + raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format( + region=self.region, alias_name=key_id)) + else: + if self.kms_backend.get_key_id(key_id) not in self.kms_backend.keys: + raise NotFoundException('Invalid keyId') + + if number_of_bytes and (number_of_bytes > 1024 or number_of_bytes < 0): + raise ValidationException("1 validation error detected: Value '2048' at 'numberOfBytes' failed " + "to satisfy constraint: Member must have value less than or " + "equal to 1024") + + if key_spec and key_spec not in ('AES_256', 'AES_128'): + raise ValidationException("1 validation error detected: Value 'AES_257' at 'keySpec' failed " + "to satisfy constraint: Member must satisfy enum value set: " + "[AES_256, AES_128]") + if not key_spec and not number_of_bytes: + raise ValidationException("Please specify either number of bytes or key spec.") + if key_spec and number_of_bytes: + raise ValidationException("Please specify either number of bytes or key spec.") + + plaintext, key_arn = self.kms_backend.generate_data_key(key_id, encryption_context, + number_of_bytes, key_spec, grant_tokens) + + plaintext = base64.b64encode(plaintext).decode() + + return json.dumps({ + 'CiphertextBlob': plaintext, + 'Plaintext': plaintext, + 'KeyId': key_arn # not alias + }) + + def generate_data_key_without_plaintext(self): + result = json.loads(self.generate_data_key()) + del result['Plaintext'] + + return json.dumps(result) def _assert_valid_key_id(key_id): if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE): - raise JSONResponseError(404, 'Not Found', body={ - 'message': ' Invalid keyId', '__type': 'NotFoundException'}) + raise NotFoundException('Invalid keyId') def _assert_default_policy(policy_name): if policy_name != 'default': - raise JSONResponseError(404, 'Not Found', body={ - 'message': "No such policy exists", - '__type': 'NotFoundException'}) + raise NotFoundException("No such policy exists") diff --git a/moto/logs/models.py b/moto/logs/models.py index e105d4d14..a44b76812 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -137,6 +137,7 @@ class LogGroup: self.creationTime = unix_time_millis() self.tags = tags self.streams = dict() # {name: LogStream} + self.retentionInDays = None # AWS defaults to Never Expire for log group retention def create_log_stream(self, log_stream_name): if log_stream_name in self.streams: @@ -201,14 +202,20 @@ class LogGroup: return events_page, next_token, searched_streams def to_describe_dict(self): - return { + log_group = { "arn": self.arn, "creationTime": self.creationTime, "logGroupName": self.name, "metricFilterCount": 0, - "retentionInDays": 30, "storedBytes": sum(s.storedBytes for s in self.streams.values()), } + # AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire) + if self.retentionInDays: + log_group["retentionInDays"] = self.retentionInDays + return log_group + + def set_retention_policy(self, retention_in_days): + self.retentionInDays = retention_in_days class LogsBackend(BaseBackend): @@ -289,5 +296,17 @@ class LogsBackend(BaseBackend): log_group = self.groups[log_group_name] return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) + def put_retention_policy(self, log_group_name, retention_in_days): + if log_group_name not in self.groups: + raise ResourceNotFoundException() + log_group = self.groups[log_group_name] + return log_group.set_retention_policy(retention_in_days) + + def delete_retention_policy(self, log_group_name): + if log_group_name not in self.groups: + raise ResourceNotFoundException() + log_group = self.groups[log_group_name] + return log_group.set_retention_policy(None) + logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()} diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4bec86cb2..39f24a260 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -123,3 +123,14 @@ class LogsResponse(BaseResponse): "nextToken": next_token, "searchedLogStreams": searched_streams }) + + def put_retention_policy(self): + log_group_name = self._get_param('logGroupName') + retention_in_days = self._get_param('retentionInDays') + self.logs_backend.put_retention_policy(log_group_name, retention_in_days) + return '' + + def delete_retention_policy(self): + log_group_name = self._get_param('logGroupName') + self.logs_backend.delete_retention_policy(log_group_name) + return '' diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 9d5fe3886..91004b9ba 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -47,6 +47,7 @@ class FakeOrganization(BaseModel): class FakeAccount(BaseModel): def __init__(self, organization, **kwargs): + self.type = 'ACCOUNT' self.organization_id = organization.id self.master_account_id = organization.master_account_id self.create_account_status_id = utils.make_random_create_account_status_id() @@ -57,6 +58,7 @@ class FakeAccount(BaseModel): self.status = 'ACTIVE' self.joined_method = 'CREATED' self.parent_id = organization.root_id + self.attached_policies = [] @property def arn(self): @@ -103,6 +105,7 @@ class FakeOrganizationalUnit(BaseModel): self.name = kwargs.get('Name') self.parent_id = kwargs.get('ParentId') self._arn_format = utils.OU_ARN_FORMAT + self.attached_policies = [] @property def arn(self): @@ -134,6 +137,7 @@ class FakeRoot(FakeOrganizationalUnit): 'Status': 'ENABLED' }] self._arn_format = utils.ROOT_ARN_FORMAT + self.attached_policies = [] def describe(self): return { @@ -144,12 +148,52 @@ class FakeRoot(FakeOrganizationalUnit): } +class FakeServiceControlPolicy(BaseModel): + + def __init__(self, organization, **kwargs): + self.type = 'POLICY' + self.content = kwargs.get('Content') + self.description = kwargs.get('Description') + self.name = kwargs.get('Name') + self.type = kwargs.get('Type') + self.id = utils.make_random_service_control_policy_id() + self.aws_managed = False + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self._arn_format = utils.SCP_ARN_FORMAT + self.attachments = [] + + @property + def arn(self): + return self._arn_format.format( + self.master_account_id, + self.organization_id, + self.id + ) + + def describe(self): + return { + 'Policy': { + 'PolicySummary': { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + 'Description': self.description, + 'Type': self.type, + 'AwsManaged': self.aws_managed, + }, + 'Content': self.content + } + } + + class OrganizationsBackend(BaseBackend): def __init__(self): self.org = None self.accounts = [] self.ou = [] + self.policies = [] def create_organization(self, **kwargs): self.org = FakeOrganization(kwargs['FeatureSet']) @@ -292,5 +336,108 @@ class OrganizationsBackend(BaseBackend): ] ) + def create_policy(self, **kwargs): + new_policy = FakeServiceControlPolicy(self.org, **kwargs) + self.policies.append(new_policy) + return new_policy.describe() + + def describe_policy(self, **kwargs): + if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if policy is None: + raise RESTError( + 'PolicyNotFoundException', + "You specified a policy that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return policy.describe() + + def attach_policy(self, **kwargs): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if (re.compile(utils.ROOT_ID_REGEX).match(kwargs['TargetId']) or + re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId'])): + ou = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None) + if ou is not None: + if ou not in ou.attached_policies: + ou.attached_policies.append(policy) + policy.attachments.append(ou) + else: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']): + account = next((a for a in self.accounts if a.id == kwargs['TargetId']), None) + if account is not None: + if account not in account.attached_policies: + account.attached_policies.append(policy) + policy.attachments.append(account) + else: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + + def list_policies(self, **kwargs): + return dict(Policies=[ + p.describe()['Policy']['PolicySummary'] for p in self.policies + ]) + + def list_policies_for_target(self, **kwargs): + if re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId']): + obj = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None) + if obj is None: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']): + obj = next((a for a in self.accounts if a.id == kwargs['TargetId']), None) + if obj is None: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return dict(Policies=[ + p.describe()['Policy']['PolicySummary'] for p in obj.attached_policies + ]) + + def list_targets_for_policy(self, **kwargs): + if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if policy is None: + raise RESTError( + 'PolicyNotFoundException', + "You specified a policy that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + objects = [ + { + 'TargetId': obj.id, + 'Arn': obj.arn, + 'Name': obj.name, + 'Type': obj.type, + } for obj in policy.attachments + ] + return dict(Targets=objects) + organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index 966c3fbf3..814f30bad 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -85,3 +85,33 @@ class OrganizationsResponse(BaseResponse): return json.dumps( self.organizations_backend.list_children(**self.request_params) ) + + def create_policy(self): + return json.dumps( + self.organizations_backend.create_policy(**self.request_params) + ) + + def describe_policy(self): + return json.dumps( + self.organizations_backend.describe_policy(**self.request_params) + ) + + def attach_policy(self): + return json.dumps( + self.organizations_backend.attach_policy(**self.request_params) + ) + + def list_policies(self): + return json.dumps( + self.organizations_backend.list_policies(**self.request_params) + ) + + def list_policies_for_target(self): + return json.dumps( + self.organizations_backend.list_policies_for_target(**self.request_params) + ) + + def list_targets_for_policy(self): + return json.dumps( + self.organizations_backend.list_targets_for_policy(**self.request_params) + ) diff --git a/moto/organizations/utils.py b/moto/organizations/utils.py index 007afa6ed..bde3660d2 100644 --- a/moto/organizations/utils.py +++ b/moto/organizations/utils.py @@ -10,6 +10,7 @@ MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}' ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}' ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}' OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}' +SCP_ARN_FORMAT = 'arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}' CHARSET = string.ascii_lowercase + string.digits ORG_ID_SIZE = 10 @@ -17,6 +18,15 @@ ROOT_ID_SIZE = 4 ACCOUNT_ID_SIZE = 12 OU_ID_SUFFIX_SIZE = 8 CREATE_ACCOUNT_STATUS_ID_SIZE = 8 +SCP_ID_SIZE = 8 + +EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" +ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % ORG_ID_SIZE +ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % ROOT_ID_SIZE +OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE) +ACCOUNT_ID_REGEX = r'[0-9]{%s}' % ACCOUNT_ID_SIZE +CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % CREATE_ACCOUNT_STATUS_ID_SIZE +SCP_ID_REGEX = r'p-[a-z0-9]{%s}' % SCP_ID_SIZE def make_random_org_id(): @@ -57,3 +67,10 @@ def make_random_create_account_status_id(): # "car-" followed by from 8 to 32 lower-case letters or digits. # e.g. 'car-35gxzwrp' return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE)) + + +def make_random_service_control_policy_id(): + # The regex pattern for a policy ID string requires "p-" followed by + # from 8 to 128 lower-case letters or digits. + # e.g. 'p-k2av4a8a' + return 'p-' + ''.join(random.choice(CHARSET) for x in range(SCP_ID_SIZE)) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 70cbb95cb..64e5c5e35 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -531,14 +531,37 @@ class RedshiftBackend(BaseBackend): setattr(cluster, key, value) if new_cluster_identifier: - self.delete_cluster(cluster_identifier) + dic = { + "cluster_identifier": cluster_identifier, + "skip_final_snapshot": True, + "final_cluster_snapshot_identifier": None + } + self.delete_cluster(**dic) cluster.cluster_identifier = new_cluster_identifier self.clusters[new_cluster_identifier] = cluster return cluster - def delete_cluster(self, cluster_identifier): + def delete_cluster(self, **cluster_kwargs): + cluster_identifier = cluster_kwargs.pop("cluster_identifier") + cluster_skip_final_snapshot = cluster_kwargs.pop("skip_final_snapshot") + cluster_snapshot_identifer = cluster_kwargs.pop("final_cluster_snapshot_identifier") + if cluster_identifier in self.clusters: + if cluster_skip_final_snapshot is False and cluster_snapshot_identifer is None: + raise ClientError( + "InvalidParameterValue", + 'FinalSnapshotIdentifier is required for Snapshot copy ' + 'when SkipFinalSnapshot is False' + ) + elif cluster_skip_final_snapshot is False and cluster_snapshot_identifer is not None: # create snapshot + cluster = self.describe_clusters(cluster_identifier)[0] + self.create_cluster_snapshot( + cluster_identifier, + cluster_snapshot_identifer, + cluster.region, + cluster.tags) + return self.clusters.pop(cluster_identifier) raise ClusterNotFoundError(cluster_identifier) @@ -617,9 +640,12 @@ class RedshiftBackend(BaseBackend): def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None): if cluster_identifier: + cluster_snapshots = [] for snapshot in self.snapshots.values(): if snapshot.cluster.cluster_identifier == cluster_identifier: - return [snapshot] + cluster_snapshots.append(snapshot) + if cluster_snapshots: + return cluster_snapshots raise ClusterNotFoundError(cluster_identifier) if snapshot_identifier: diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index 69fbac7c1..a7758febb 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -240,8 +240,13 @@ class RedshiftResponse(BaseResponse): }) def delete_cluster(self): - cluster_identifier = self._get_param("ClusterIdentifier") - cluster = self.redshift_backend.delete_cluster(cluster_identifier) + request_kwargs = { + "cluster_identifier": self._get_param("ClusterIdentifier"), + "final_cluster_snapshot_identifier": self._get_param("FinalClusterSnapshotIdentifier"), + "skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot") + } + + cluster = self.redshift_backend.delete_cluster(**request_kwargs) return self.get_response({ "DeleteClusterResponse": { diff --git a/moto/resourcegroups/__init__.py b/moto/resourcegroups/__init__.py new file mode 100644 index 000000000..74b0eb598 --- /dev/null +++ b/moto/resourcegroups/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import resourcegroups_backends +from ..core.models import base_decorator + +resourcegroups_backend = resourcegroups_backends['us-east-1'] +mock_resourcegroups = base_decorator(resourcegroups_backends) diff --git a/moto/resourcegroups/exceptions.py b/moto/resourcegroups/exceptions.py new file mode 100644 index 000000000..a8e542979 --- /dev/null +++ b/moto/resourcegroups/exceptions.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals +import json + +from werkzeug.exceptions import HTTPException + + +class BadRequestException(HTTPException): + code = 400 + + def __init__(self, message, **kwargs): + super(BadRequestException, self).__init__( + description=json.dumps({"Message": message, "Code": "BadRequestException"}), **kwargs + ) diff --git a/moto/resourcegroups/models.py b/moto/resourcegroups/models.py new file mode 100644 index 000000000..6734bd48a --- /dev/null +++ b/moto/resourcegroups/models.py @@ -0,0 +1,338 @@ +from __future__ import unicode_literals +from builtins import str + +import boto3 +import json +import re + +from moto.core import BaseBackend, BaseModel +from .exceptions import BadRequestException + + +class FakeResourceGroup(BaseModel): + def __init__(self, name, resource_query, description=None, tags=None): + self.errors = [] + description = description or "" + tags = tags or {} + if self._validate_description(value=description): + self._description = description + if self._validate_name(value=name): + self._name = name + if self._validate_resource_query(value=resource_query): + self._resource_query = resource_query + if self._validate_tags(value=tags): + self._tags = tags + self._raise_errors() + self.arn = "arn:aws:resource-groups:us-west-1:123456789012:{name}".format(name=name) + + @staticmethod + def _format_error(key, value, constraint): + return "Value '{value}' at '{key}' failed to satisfy constraint: {constraint}".format( + constraint=constraint, + key=key, + value=value, + ) + + def _raise_errors(self): + if self.errors: + errors_len = len(self.errors) + plural = "s" if len(self.errors) > 1 else "" + errors = "; ".join(self.errors) + raise BadRequestException("{errors_len} validation error{plural} detected: {errors}".format( + errors_len=errors_len, plural=plural, errors=errors, + )) + + def _validate_description(self, value): + errors = [] + if len(value) > 511: + errors.append(self._format_error( + key="description", + value=value, + constraint="Member must have length less than or equal to 512", + )) + if not re.match(r"^[\sa-zA-Z0-9_.-]*$", value): + errors.append(self._format_error( + key="name", + value=value, + constraint=r"Member must satisfy regular expression pattern: [\sa-zA-Z0-9_\.-]*", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_name(self, value): + errors = [] + if len(value) > 128: + errors.append(self._format_error( + key="name", + value=value, + constraint="Member must have length less than or equal to 128", + )) + # Note \ is a character to match not an escape. + if not re.match(r"^[a-zA-Z0-9_\\.-]+$", value): + errors.append(self._format_error( + key="name", + value=value, + constraint=r"Member must satisfy regular expression pattern: [a-zA-Z0-9_\.-]+", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_resource_query(self, value): + errors = [] + if value["Type"] not in {"CLOUDFORMATION_STACK_1_0", "TAG_FILTERS_1_0"}: + errors.append(self._format_error( + key="resourceQuery.type", + value=value, + constraint="Member must satisfy enum value set: [CLOUDFORMATION_STACK_1_0, TAG_FILTERS_1_0]", + )) + if len(value["Query"]) > 2048: + errors.append(self._format_error( + key="resourceQuery.query", + value=value, + constraint="Member must have length less than or equal to 2048", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_tags(self, value): + errors = [] + # AWS only outputs one error for all keys and one for all values. + error_keys = None + error_values = None + regex = re.compile(r"^([\\p{L}\\p{Z}\\p{N}_.:/=+\-@]*)$") + for tag_key, tag_value in value.items(): + # Validation for len(tag_key) >= 1 is done by botocore. + if len(tag_key) > 128 or re.match(regex, tag_key): + error_keys = self._format_error( + key="tags", + value=value, + constraint=( + "Map value must satisfy constraint: [" + "Member must have length less than or equal to 128, " + "Member must have length greater than or equal to 1, " + r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$" + "]" + ), + ) + # Validation for len(tag_value) >= 0 is nonsensical. + if len(tag_value) > 256 or re.match(regex, tag_key): + error_values = self._format_error( + key="tags", + value=value, + constraint=( + "Map value must satisfy constraint: [" + "Member must have length less than or equal to 256, " + "Member must have length greater than or equal to 0, " + r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$" + "]" + ), + ) + if error_keys: + errors.append(error_keys) + if error_values: + errors.append(error_values) + if errors: + self.errors += errors + return False + return True + + @property + def description(self): + return self._description + + @description.setter + def description(self, value): + if not self._validate_description(value=value): + self._raise_errors() + self._description = value + + @property + def name(self): + return self._name + + @name.setter + def name(self, value): + if not self._validate_name(value=value): + self._raise_errors() + self._name = value + + @property + def resource_query(self): + return self._resource_query + + @resource_query.setter + def resource_query(self, value): + if not self._validate_resource_query(value=value): + self._raise_errors() + self._resource_query = value + + @property + def tags(self): + return self._tags + + @tags.setter + def tags(self, value): + if not self._validate_tags(value=value): + self._raise_errors() + self._tags = value + + +class ResourceGroups(): + def __init__(self): + self.by_name = {} + self.by_arn = {} + + def __contains__(self, item): + return item in self.by_name + + def append(self, resource_group): + self.by_name[resource_group.name] = resource_group + self.by_arn[resource_group.arn] = resource_group + + def delete(self, name): + group = self.by_name[name] + del self.by_name[name] + del self.by_arn[group.arn] + return group + + +class ResourceGroupsBackend(BaseBackend): + def __init__(self, region_name=None): + super(ResourceGroupsBackend, self).__init__() + self.region_name = region_name + self.groups = ResourceGroups() + + @staticmethod + def _validate_resource_query(resource_query): + type = resource_query["Type"] + query = json.loads(resource_query["Query"]) + query_keys = set(query.keys()) + invalid_json_exception = BadRequestException("Invalid query: Invalid query format: check JSON syntax") + if not isinstance(query["ResourceTypeFilters"], list): + raise invalid_json_exception + if type == "CLOUDFORMATION_STACK_1_0": + if query_keys != {"ResourceTypeFilters", "StackIdentifier"}: + raise invalid_json_exception + stack_identifier = query["StackIdentifier"] + if not isinstance(stack_identifier, str): + raise invalid_json_exception + if not re.match( + r"^arn:aws:cloudformation:[a-z]{2}-[a-z]+-[0-9]+:[0-9]+:stack/[-0-9A-z]+/[-0-9a-f]+$", + stack_identifier, + ): + raise BadRequestException( + "Invalid query: Verify that the specified ARN is formatted correctly." + ) + # Once checking other resources is implemented. + # if stack_identifier not in self.cloudformation_backend.stacks: + # raise BadRequestException("Invalid query: The specified CloudFormation stack doesn't exist.") + if type == "TAG_FILTERS_1_0": + if query_keys != {"ResourceTypeFilters", "TagFilters"}: + raise invalid_json_exception + tag_filters = query["TagFilters"] + if not isinstance(tag_filters, list): + raise invalid_json_exception + if not tag_filters or len(tag_filters) > 50: + raise BadRequestException( + "Invalid query: The TagFilters list must contain between 1 and 50 elements" + ) + for tag_filter in tag_filters: + if not isinstance(tag_filter, dict): + raise invalid_json_exception + if set(tag_filter.keys()) != {"Key", "Values"}: + raise invalid_json_exception + key = tag_filter["Key"] + if not isinstance(key, str): + raise invalid_json_exception + if not key: + raise BadRequestException( + "Invalid query: The TagFilter element cannot have empty or null Key field" + ) + if len(key) > 128: + raise BadRequestException("Invalid query: The maximum length for a tag Key is 128") + values = tag_filter["Values"] + if not isinstance(values, list): + raise invalid_json_exception + if len(values) > 20: + raise BadRequestException( + "Invalid query: The TagFilter Values list must contain between 0 and 20 elements" + ) + for value in values: + if not isinstance(value, str): + raise invalid_json_exception + if len(value) > 256: + raise BadRequestException( + "Invalid query: The maximum length for a tag Value is 256" + ) + + @staticmethod + def _validate_tags(tags): + for tag in tags: + if tag.lower().startswith('aws:'): + raise BadRequestException("Tag keys must not start with 'aws:'") + + def create_group(self, name, resource_query, description=None, tags=None): + tags = tags or {} + group = FakeResourceGroup( + name=name, + resource_query=resource_query, + description=description, + tags=tags, + ) + if name in self.groups: + raise BadRequestException("Cannot create group: group already exists") + if name.upper().startswith("AWS"): + raise BadRequestException("Group name must not start with 'AWS'") + self._validate_tags(tags) + self._validate_resource_query(resource_query) + self.groups.append(group) + return group + + def delete_group(self, group_name): + return self.groups.delete(name=group_name) + + def get_group(self, group_name): + return self.groups.by_name[group_name] + + def get_tags(self, arn): + return self.groups.by_arn[arn].tags + + # def list_group_resources(self): + # ... + + def list_groups(self, filters=None, max_results=None, next_token=None): + return self.groups.by_name + + # def search_resources(self): + # ... + + def tag(self, arn, tags): + all_tags = self.groups.by_arn[arn].tags + all_tags.update(tags) + self._validate_tags(all_tags) + self.groups.by_arn[arn].tags = all_tags + + def untag(self, arn, keys): + group = self.groups.by_arn[arn] + for key in keys: + del group.tags[key] + + def update_group(self, group_name, description=None): + if description: + self.groups.by_name[group_name].description = description + return self.groups.by_name[group_name] + + def update_group_query(self, group_name, resource_query): + self._validate_resource_query(resource_query) + self.groups.by_name[group_name].resource_query = resource_query + return self.groups.by_name[group_name] + + +available_regions = boto3.session.Session().get_available_regions("resource-groups") +resourcegroups_backends = {region: ResourceGroupsBackend(region_name=region) for region in available_regions} diff --git a/moto/resourcegroups/responses.py b/moto/resourcegroups/responses.py new file mode 100644 index 000000000..02ea14c1a --- /dev/null +++ b/moto/resourcegroups/responses.py @@ -0,0 +1,162 @@ +from __future__ import unicode_literals +import json + +try: + from urllib import unquote +except ImportError: + from urllib.parse import unquote + +from moto.core.responses import BaseResponse +from .models import resourcegroups_backends + + +class ResourceGroupsResponse(BaseResponse): + SERVICE_NAME = 'resource-groups' + + @property + def resourcegroups_backend(self): + return resourcegroups_backends[self.region] + + def create_group(self): + name = self._get_param("Name") + description = self._get_param("Description") + resource_query = self._get_param("ResourceQuery") + tags = self._get_param("Tags") + group = self.resourcegroups_backend.create_group( + name=name, + description=description, + resource_query=resource_query, + tags=tags, + ) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + "ResourceQuery": group.resource_query, + "Tags": group.tags + }) + + def delete_group(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.delete_group(group_name=group_name) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + }) + + def get_group(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.get_group(group_name=group_name) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description, + } + }) + + def get_group_query(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.get_group(group_name=group_name) + return json.dumps({ + "GroupQuery": { + "GroupName": group.name, + "ResourceQuery": group.resource_query, + } + }) + + def get_tags(self): + arn = unquote(self._get_param("Arn")) + return json.dumps({ + "Arn": arn, + "Tags": self.resourcegroups_backend.get_tags(arn=arn) + }) + + def list_group_resources(self): + raise NotImplementedError('ResourceGroups.list_group_resources is not yet implemented') + + def list_groups(self): + filters = self._get_param("Filters") + if filters: + raise NotImplementedError( + 'ResourceGroups.list_groups with filter parameter is not yet implemented' + ) + max_results = self._get_int_param("MaxResults", 50) + next_token = self._get_param("NextToken") + groups = self.resourcegroups_backend.list_groups( + filters=filters, + max_results=max_results, + next_token=next_token + ) + return json.dumps({ + "GroupIdentifiers": [{ + "GroupName": group.name, + "GroupArn": group.arn, + } for group in groups.values()], + "Groups": [{ + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description, + } for group in groups.values()], + "NextToken": next_token, + }) + + def search_resources(self): + raise NotImplementedError('ResourceGroups.search_resources is not yet implemented') + + def tag(self): + arn = unquote(self._get_param("Arn")) + tags = self._get_param("Tags") + if arn not in self.resourcegroups_backend.groups.by_arn: + raise NotImplementedError( + 'ResourceGroups.tag with non-resource-group Arn parameter is not yet implemented' + ) + self.resourcegroups_backend.tag(arn=arn, tags=tags) + return json.dumps({ + "Arn": arn, + "Tags": tags + }) + + def untag(self): + arn = unquote(self._get_param("Arn")) + keys = self._get_param("Keys") + if arn not in self.resourcegroups_backend.groups.by_arn: + raise NotImplementedError( + 'ResourceGroups.untag with non-resource-group Arn parameter is not yet implemented' + ) + self.resourcegroups_backend.untag(arn=arn, keys=keys) + return json.dumps({ + "Arn": arn, + "Keys": keys + }) + + def update_group(self): + group_name = self._get_param("GroupName") + description = self._get_param("Description", "") + group = self.resourcegroups_backend.update_group(group_name=group_name, description=description) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + }) + + def update_group_query(self): + group_name = self._get_param("GroupName") + resource_query = self._get_param("ResourceQuery") + group = self.resourcegroups_backend.update_group_query( + group_name=group_name, + resource_query=resource_query + ) + return json.dumps({ + "GroupQuery": { + "GroupName": group.name, + "ResourceQuery": resource_query + } + }) diff --git a/moto/resourcegroups/urls.py b/moto/resourcegroups/urls.py new file mode 100644 index 000000000..518dde766 --- /dev/null +++ b/moto/resourcegroups/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import ResourceGroupsResponse + +url_bases = [ + "https?://resource-groups(-fips)?.(.+).amazonaws.com", +] + +url_paths = { + '{0}/groups$': ResourceGroupsResponse.dispatch, + '{0}/groups/(?P[^/]+)$': ResourceGroupsResponse.dispatch, + '{0}/groups/(?P[^/]+)/query$': ResourceGroupsResponse.dispatch, + '{0}/groups-list$': ResourceGroupsResponse.dispatch, + '{0}/resources/(?P[^/]+)/tags$': ResourceGroupsResponse.dispatch, +} diff --git a/moto/route53/models.py b/moto/route53/models.py index 3760d3817..d70307036 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -165,6 +165,12 @@ class RecordSet(BaseModel): hosted_zone.delete_rrset_by_name(self.name) +def reverse_domain_name(domain_name): + if domain_name.endswith('.'): # normalize without trailing dot + domain_name = domain_name[:-1] + return '.'.join(reversed(domain_name.split('.'))) + + class FakeZone(BaseModel): def __init__(self, name, id_, private_zone, comment=None): @@ -200,12 +206,15 @@ class FakeZone(BaseModel): def get_record_sets(self, start_type, start_name): record_sets = list(self.rrsets) # Copy the list + if start_name: + record_sets = [ + record_set + for record_set in record_sets + if reverse_domain_name(record_set.name) >= reverse_domain_name(start_name) + ] if start_type: record_sets = [ record_set for record_set in record_sets if record_set.type_ >= start_type] - if start_name: - record_sets = [ - record_set for record_set in record_sets if record_set.name >= start_name] return record_sets diff --git a/moto/s3/models.py b/moto/s3/models.py index 37fed3335..7488114e3 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -17,8 +17,11 @@ import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, \ - EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, DuplicateTagKeys +from .exceptions import ( + BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, InvalidRequest, + EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, + InvalidTargetBucketForLogging, DuplicateTagKeys, CrossLocationLoggingProhibitted +) from .utils import clean_key_name, _VersionedKeyStore MAX_BUCKET_NAME_LENGTH = 63 @@ -87,10 +90,13 @@ class FakeKey(BaseModel): new_value = new_value.encode(DEFAULT_TEXT_ENCODING) self._value_buffer.write(new_value) - def copy(self, new_name=None): + def copy(self, new_name=None, new_is_versioned=None): r = copy.deepcopy(self) if new_name is not None: r.name = new_name + if new_is_versioned is not None: + r._is_versioned = new_is_versioned + r.refresh_version() return r def set_metadata(self, metadata, replace=False): @@ -460,6 +466,7 @@ class FakeBucket(BaseModel): self.cors = [] self.logging = {} self.notification_configuration = None + self.accelerate_configuration = None @property def location(self): @@ -554,7 +561,6 @@ class FakeBucket(BaseModel): self.rules = [] def set_cors(self, rules): - from moto.s3.exceptions import InvalidRequest, MalformedXML self.cors = [] if len(rules) > 100: @@ -604,7 +610,6 @@ class FakeBucket(BaseModel): self.logging = {} return - from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted # Target bucket must exist in the same account (assuming all moto buckets are in the same account): if not bucket_backend.buckets.get(logging_config["TargetBucket"]): raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") @@ -652,6 +657,13 @@ class FakeBucket(BaseModel): if region != self.region_name: raise InvalidNotificationDestination() + def set_accelerate_configuration(self, accelerate_config): + if self.accelerate_configuration is None and accelerate_config == 'Suspended': + # Cannot "suspend" a not active acceleration. Leaves it undefined + return + + self.accelerate_configuration = accelerate_config + def set_website_configuration(self, website_configuration): self.website_configuration = website_configuration @@ -854,6 +866,15 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.set_notification_configuration(notification_config) + def put_bucket_accelerate_configuration(self, bucket_name, accelerate_configuration): + if accelerate_configuration not in ['Enabled', 'Suspended']: + raise MalformedXML() + + bucket = self.get_bucket(bucket_name) + if bucket.name.find('.') != -1: + raise InvalidRequest('PutBucketAccelerateConfiguration') + bucket.set_accelerate_configuration(accelerate_configuration) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) @@ -891,12 +912,11 @@ class S3Backend(BaseBackend): return multipart.set_part(part_id, value) def copy_part(self, dest_bucket_name, multipart_id, part_id, - src_bucket_name, src_key_name, start_byte, end_byte): - src_key_name = clean_key_name(src_key_name) - src_bucket = self.get_bucket(src_bucket_name) + src_bucket_name, src_key_name, src_version_id, start_byte, end_byte): dest_bucket = self.get_bucket(dest_bucket_name) multipart = dest_bucket.multiparts[multipart_id] - src_value = src_bucket.keys[src_key_name].value + + src_value = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id).value if start_byte is not None: src_value = src_value[start_byte:end_byte + 1] return multipart.set_part(part_id, src_value) @@ -973,17 +993,15 @@ class S3Backend(BaseBackend): dest_bucket = self.get_bucket(dest_bucket_name) key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) - if dest_key_name != src_key_name: - key = key.copy(dest_key_name) - dest_bucket.keys[dest_key_name] = key - # By this point, the destination key must exist, or KeyError - if dest_bucket.is_versioned: - dest_bucket.keys[dest_key_name].refresh_version() + new_key = key.copy(dest_key_name, dest_bucket.is_versioned) + if storage is not None: - key.set_storage_class(storage) + new_key.set_storage_class(storage) if acl is not None: - key.set_acl(acl) + new_key.set_acl(acl) + + dest_bucket.keys[dest_key_name] = new_key def set_bucket_acl(self, bucket_name, acl): bucket = self.get_bucket(bucket_name) diff --git a/moto/s3/responses.py b/moto/s3/responses.py old mode 100755 new mode 100644 index 856178941..e03666666 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -257,6 +257,13 @@ class ResponseObject(_TemplateEnvironmentMixin): return 200, {}, "" template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) return template.render(bucket=bucket) + elif "accelerate" in querystring: + bucket = self.backend.get_bucket(bucket_name) + if bucket.accelerate_configuration is None: + template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET) + return 200, {}, template.render() + template = self.response_template(S3_BUCKET_ACCELERATE) + return template.render(bucket=bucket) elif 'versions' in querystring: delimiter = querystring.get('delimiter', [None])[0] @@ -442,6 +449,15 @@ class ResponseObject(_TemplateEnvironmentMixin): raise MalformedXML() except Exception as e: raise e + elif "accelerate" in querystring: + try: + accelerate_status = self._accelerate_config_from_xml(body) + self.backend.put_bucket_accelerate_configuration(bucket_name, accelerate_status) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e else: if body: @@ -691,6 +707,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'x-amz-copy-source' in request.headers: src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/") src_bucket, src_key = src.split("/", 1) + + src_key, src_version_id = src_key.split("?versionId=") if "?versionId=" in src_key else (src_key, None) src_range = request.headers.get( 'x-amz-copy-source-range', '').split("bytes=")[-1] @@ -700,9 +718,13 @@ class ResponseObject(_TemplateEnvironmentMixin): except ValueError: start_byte, end_byte = None, None - key = self.backend.copy_part( - bucket_name, upload_id, part_number, src_bucket, - src_key, start_byte, end_byte) + if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + key = self.backend.copy_part( + bucket_name, upload_id, part_number, src_bucket, + src_key, src_version_id, start_byte, end_byte) + else: + return 404, response_headers, "" + template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE) response = template.render(part=key) else: @@ -741,8 +763,13 @@ class ResponseObject(_TemplateEnvironmentMixin): lstrip("/").split("/", 1) src_version_id = parse_qs(src_key_parsed.query).get( 'versionId', [None])[0] - self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, - storage=storage_class, acl=acl, src_version_id=src_version_id) + + if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, + storage=storage_class, acl=acl, src_version_id=src_version_id) + else: + return 404, response_headers, "" + new_key = self.backend.get_key(bucket_name, key_name) mdirective = request.headers.get('x-amz-metadata-directive') if mdirective is not None and mdirective == 'REPLACE': @@ -1034,6 +1061,11 @@ class ResponseObject(_TemplateEnvironmentMixin): return parsed_xml["NotificationConfiguration"] + def _accelerate_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + config = parsed_xml['AccelerateConfiguration'] + return config['Status'] + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -1686,3 +1718,13 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """ {% endfor %} """ + +S3_BUCKET_ACCELERATE = """ + + {{ bucket.accelerate_configuration }} + +""" + +S3_BUCKET_ACCELERATE_NOT_SET = """ + +""" diff --git a/moto/secretsmanager/exceptions.py b/moto/secretsmanager/exceptions.py index a72a32645..fa81b6d8b 100644 --- a/moto/secretsmanager/exceptions.py +++ b/moto/secretsmanager/exceptions.py @@ -27,3 +27,18 @@ class InvalidParameterException(SecretsManagerClientError): super(InvalidParameterException, self).__init__( 'InvalidParameterException', message) + + +class ResourceExistsException(SecretsManagerClientError): + def __init__(self, message): + super(ResourceExistsException, self).__init__( + 'ResourceExistsException', + message + ) + + +class InvalidRequestException(SecretsManagerClientError): + def __init__(self, message): + super(InvalidRequestException, self).__init__( + 'InvalidRequestException', + message) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 1350ab469..3e0424b6b 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import time import json import uuid +import datetime import boto3 @@ -10,6 +11,8 @@ from moto.core import BaseBackend, BaseModel from .exceptions import ( ResourceNotFoundException, InvalidParameterException, + ResourceExistsException, + InvalidRequestException, ClientError ) from .utils import random_password, secret_arn @@ -36,48 +39,130 @@ class SecretsManagerBackend(BaseBackend): def _is_valid_identifier(self, identifier): return identifier in self.secrets + def _unix_time_secs(self, dt): + epoch = datetime.datetime.utcfromtimestamp(0) + return (dt - epoch).total_seconds() + def get_secret_value(self, secret_id, version_id, version_stage): if not self._is_valid_identifier(secret_id): raise ResourceNotFoundException() - secret = self.secrets[secret_id] + if not version_id and version_stage: + # set version_id to match version_stage + versions_dict = self.secrets[secret_id]['versions'] + for ver_id, ver_val in versions_dict.items(): + if version_stage in ver_val['version_stages']: + version_id = ver_id + break + if not version_id: + raise ResourceNotFoundException() - response = json.dumps({ + # TODO check this part + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + + secret = self.secrets[secret_id] + version_id = version_id or secret['default_version_id'] + + secret_version = secret['versions'][version_id] + + response_data = { "ARN": secret_arn(self.region, secret['secret_id']), "Name": secret['name'], - "VersionId": secret['version_id'], - "SecretString": secret['secret_string'], - "VersionStages": [ - "AWSCURRENT", - ], - "CreatedDate": secret['createdate'] - }) + "VersionId": secret_version['version_id'], + "VersionStages": secret_version['version_stages'], + "CreatedDate": secret_version['createdate'], + } + + if 'secret_string' in secret_version: + response_data["SecretString"] = secret_version['secret_string'] + + if 'secret_binary' in secret_version: + response_data["SecretBinary"] = secret_version['secret_binary'] + + response = json.dumps(response_data) return response - def create_secret(self, name, secret_string, tags, **kwargs): + def create_secret(self, name, secret_string=None, secret_binary=None, tags=[], **kwargs): - generated_version_id = str(uuid.uuid4()) + # error if secret exists + if name in self.secrets.keys(): + raise ResourceExistsException('A resource with the ID you requested already exists.') - secret = { - 'secret_string': secret_string, - 'secret_id': name, - 'name': name, - 'createdate': int(time.time()), - 'rotation_enabled': False, - 'rotation_lambda_arn': '', - 'auto_rotate_after_days': 0, - 'version_id': generated_version_id, - 'tags': tags - } - - self.secrets[name] = secret + version_id = self._add_secret(name, secret_string=secret_string, secret_binary=secret_binary, tags=tags) response = json.dumps({ "ARN": secret_arn(self.region, name), "Name": name, - "VersionId": generated_version_id, + "VersionId": version_id, + }) + + return response + + def _add_secret(self, secret_id, secret_string=None, secret_binary=None, tags=[], version_id=None, version_stages=None): + + if version_stages is None: + version_stages = ['AWSCURRENT'] + + if not version_id: + version_id = str(uuid.uuid4()) + + secret_version = { + 'createdate': int(time.time()), + 'version_id': version_id, + 'version_stages': version_stages, + } + + if secret_string is not None: + secret_version['secret_string'] = secret_string + + if secret_binary is not None: + secret_version['secret_binary'] = secret_binary + + if secret_id in self.secrets: + # remove all old AWSPREVIOUS stages + for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values(): + if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']: + secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS') + + # set old AWSCURRENT secret to AWSPREVIOUS + previous_current_version_id = self.secrets[secret_id]['default_version_id'] + self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS'] + + self.secrets[secret_id]['versions'][version_id] = secret_version + self.secrets[secret_id]['default_version_id'] = version_id + else: + self.secrets[secret_id] = { + 'versions': { + version_id: secret_version + }, + 'default_version_id': version_id, + } + + secret = self.secrets[secret_id] + secret['secret_id'] = secret_id + secret['name'] = secret_id + secret['rotation_enabled'] = False + secret['rotation_lambda_arn'] = '' + secret['auto_rotate_after_days'] = 0 + secret['tags'] = tags + + return version_id + + def put_secret_value(self, secret_id, secret_string, version_stages): + + version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages) + + response = json.dumps({ + 'ARN': secret_arn(self.region, secret_id), + 'Name': secret_id, + 'VersionId': version_id, + 'VersionStages': version_stages }) return response @@ -101,7 +186,7 @@ class SecretsManagerBackend(BaseBackend): "LastRotatedDate": None, "LastChangedDate": None, "LastAccessedDate": None, - "DeletedDate": None, + "DeletedDate": secret.get('deleted_date', None), "Tags": secret['tags'] }) @@ -115,6 +200,12 @@ class SecretsManagerBackend(BaseBackend): if not self._is_valid_identifier(secret_id): raise ResourceNotFoundException + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + if client_request_token: token_length = len(client_request_token) if token_length < 32 or token_length > 64: @@ -144,17 +235,24 @@ class SecretsManagerBackend(BaseBackend): secret = self.secrets[secret_id] - secret['version_id'] = client_request_token or '' + old_secret_version = secret['versions'][secret['default_version_id']] + new_version_id = client_request_token or str(uuid.uuid4()) + + self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT']) + secret['rotation_lambda_arn'] = rotation_lambda_arn or '' if rotation_rules: secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0) if secret['auto_rotate_after_days'] > 0: secret['rotation_enabled'] = True + if 'AWSCURRENT' in old_secret_version['version_stages']: + old_secret_version['version_stages'].remove('AWSCURRENT') + response = json.dumps({ "ARN": secret_arn(self.region, secret['secret_id']), "Name": secret['name'], - "VersionId": secret['version_id'] + "VersionId": new_version_id }) return response @@ -188,6 +286,111 @@ class SecretsManagerBackend(BaseBackend): return response + def list_secret_version_ids(self, secret_id): + secret = self.secrets[secret_id] + + version_list = [] + for version_id, version in secret['versions'].items(): + version_list.append({ + 'CreatedDate': int(time.time()), + 'LastAccessedDate': int(time.time()), + 'VersionId': version_id, + 'VersionStages': version['version_stages'], + }) + + response = json.dumps({ + 'ARN': secret['secret_id'], + 'Name': secret['name'], + 'NextToken': '', + 'Versions': version_list, + }) + + return response + + def list_secrets(self, max_results, next_token): + # TODO implement pagination and limits + + secret_list = [] + for secret in self.secrets.values(): + + versions_to_stages = {} + for version_id, version in secret['versions'].items(): + versions_to_stages[version_id] = version['version_stages'] + + secret_list.append({ + "ARN": secret_arn(self.region, secret['secret_id']), + "DeletedDate": secret.get('deleted_date', None), + "Description": "", + "KmsKeyId": "", + "LastAccessedDate": None, + "LastChangedDate": None, + "LastRotatedDate": None, + "Name": secret['name'], + "RotationEnabled": secret['rotation_enabled'], + "RotationLambdaARN": secret['rotation_lambda_arn'], + "RotationRules": { + "AutomaticallyAfterDays": secret['auto_rotate_after_days'] + }, + "SecretVersionsToStages": versions_to_stages, + "Tags": secret['tags'] + }) + + return secret_list, None + + def delete_secret(self, secret_id, recovery_window_in_days, force_delete_without_recovery): + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + + if recovery_window_in_days and force_delete_without_recovery: + raise InvalidParameterException( + "An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \ + use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays." + ) + + if recovery_window_in_days and (recovery_window_in_days < 7 or recovery_window_in_days > 30): + raise InvalidParameterException( + "An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \ + RecoveryWindowInDays value must be between 7 and 30 days (inclusive)." + ) + + deletion_date = datetime.datetime.utcnow() + + if force_delete_without_recovery: + secret = self.secrets.pop(secret_id, None) + else: + deletion_date += datetime.timedelta(days=recovery_window_in_days or 30) + self.secrets[secret_id]['deleted_date'] = self._unix_time_secs(deletion_date) + secret = self.secrets.get(secret_id, None) + + if not secret: + raise ResourceNotFoundException + + arn = secret_arn(self.region, secret['secret_id']) + name = secret['name'] + + return arn, name, self._unix_time_secs(deletion_date) + + def restore_secret(self, secret_id): + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + self.secrets[secret_id].pop('deleted_date', None) + + secret = self.secrets[secret_id] + + arn = secret_arn(self.region, secret['secret_id']) + name = secret['name'] + + return arn, name + available_regions = ( boto3.session.Session().get_available_regions("secretsmanager") diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index 932e7bfd7..090688351 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -4,6 +4,8 @@ from moto.core.responses import BaseResponse from .models import secretsmanager_backends +import json + class SecretsManagerResponse(BaseResponse): @@ -19,10 +21,12 @@ class SecretsManagerResponse(BaseResponse): def create_secret(self): name = self._get_param('Name') secret_string = self._get_param('SecretString') + secret_binary = self._get_param('SecretBinary') tags = self._get_param('Tags', if_none=[]) return secretsmanager_backends[self.region].create_secret( name=name, secret_string=secret_string, + secret_binary=secret_binary, tags=tags ) @@ -64,3 +68,46 @@ class SecretsManagerResponse(BaseResponse): rotation_lambda_arn=rotation_lambda_arn, rotation_rules=rotation_rules ) + + def put_secret_value(self): + secret_id = self._get_param('SecretId', if_none='') + secret_string = self._get_param('SecretString', if_none='') + version_stages = self._get_param('VersionStages', if_none=['AWSCURRENT']) + return secretsmanager_backends[self.region].put_secret_value( + secret_id=secret_id, + secret_string=secret_string, + version_stages=version_stages, + ) + + def list_secret_version_ids(self): + secret_id = self._get_param('SecretId', if_none='') + return secretsmanager_backends[self.region].list_secret_version_ids( + secret_id=secret_id + ) + + def list_secrets(self): + max_results = self._get_int_param("MaxResults") + next_token = self._get_param("NextToken") + secret_list, next_token = secretsmanager_backends[self.region].list_secrets( + max_results=max_results, + next_token=next_token, + ) + return json.dumps(dict(SecretList=secret_list, NextToken=next_token)) + + def delete_secret(self): + secret_id = self._get_param("SecretId") + recovery_window_in_days = self._get_param("RecoveryWindowInDays") + force_delete_without_recovery = self._get_param("ForceDeleteWithoutRecovery") + arn, name, deletion_date = secretsmanager_backends[self.region].delete_secret( + secret_id=secret_id, + recovery_window_in_days=recovery_window_in_days, + force_delete_without_recovery=force_delete_without_recovery, + ) + return json.dumps(dict(ARN=arn, Name=name, DeletionDate=deletion_date)) + + def restore_secret(self): + secret_id = self._get_param("SecretId") + arn, name = secretsmanager_backends[self.region].restore_secret( + secret_id=secret_id, + ) + return json.dumps(dict(ARN=arn, Name=name)) diff --git a/moto/sns/models.py b/moto/sns/models.py index 41e83aba4..c764cb25f 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -255,7 +255,7 @@ class SNSBackend(BaseBackend): return candidate_topic def _get_values_nexttoken(self, values_map, next_token=None): - if next_token is None: + if next_token is None or not next_token: next_token = 0 next_token = int(next_token) values = list(values_map.values())[ diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index b4f64b14e..5ddaf8849 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -420,7 +420,7 @@ CREATE_QUEUE_RESPONSE = """ {{ queue.visibility_timeout }} - {{ requestid }} + """ @@ -429,7 +429,7 @@ GET_QUEUE_URL_RESPONSE = """ {{ queue.url(request_url) }} - {{ requestid }} + """ @@ -440,13 +440,13 @@ LIST_QUEUES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_QUEUE_RESPONSE = """ - {{ requestid }} + """ @@ -460,13 +460,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ SET_QUEUE_ATTRIBUTE_RESPONSE = """ - {{ requestid }} + """ @@ -483,7 +483,7 @@ SEND_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -543,7 +543,7 @@ RECEIVE_MESSAGE_RESPONSE = """ {% endfor %} - {{ requestid }} + """ @@ -561,13 +561,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -580,13 +580,13 @@ DELETE_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ CHANGE_MESSAGE_VISIBILITY_RESPONSE = """ - {{ requestid }} + """ @@ -613,7 +613,7 @@ CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """ - {{ requestid }} + """ diff --git a/scripts/update_managed_policies.py b/scripts/update_managed_policies.py index 5b60660f6..de7058fd7 100755 --- a/scripts/update_managed_policies.py +++ b/scripts/update_managed_policies.py @@ -48,7 +48,8 @@ for policy_name in policies: PolicyArn=policies[policy_name]['Arn'], VersionId=policies[policy_name]['DefaultVersionId']) for key in response['PolicyVersion']: - policies[policy_name][key] = response['PolicyVersion'][key] + if key != "CreateDate": # the policy's CreateDate should not be overwritten by its version's CreateDate + policies[policy_name][key] = response['PolicyVersion'][key] with open(output_file, 'w') as f: triple_quote = '\"\"\"' diff --git a/setup.py b/setup.py index 99be632db..593d248e9 100755 --- a/setup.py +++ b/setup.py @@ -18,12 +18,22 @@ def read(*parts): return fp.read() +def get_version(): + version_file = read('moto', '__init__.py') + version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', + version_file, re.MULTILINE) + if version_match: + return version_match.group(1) + raise RuntimeError('Unable to find version string.') + + install_requires = [ - "Jinja2>=2.7.3", + "Jinja2>=2.10.1", "boto>=2.36.0", "boto3>=1.9.86", "botocore>=1.12.86", "cryptography>=2.3.0", + "datetime", "requests>=2.5", "xmltodict", "six>1.9", @@ -39,6 +49,7 @@ install_requires = [ "responses>=0.9.0", "idna<2.9,>=2.5", "cfn-lint", + "sshpubkeys>=3.1.0,<4.0" ] extras_require = { @@ -55,7 +66,7 @@ else: setup( name='moto', - version='1.3.7', + version=get_version(), description='A library that allows your python tests to easily' ' mock out the boto library', long_description=read('README.md'), diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index b1a65fb7e..750605c07 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -32,7 +32,7 @@ def test_create_autoscaling_group(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a', 'us-east-1b'], default_cooldown=60, desired_capacity=2, health_check_period=100, @@ -42,7 +42,10 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["test_lb"], placement_group="test_placement", - vpc_zone_identifier=mocked_networking['subnet1'], + vpc_zone_identifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + ), termination_policies=["OldestInstance", "NewestInstance"], tags=[Tag( resource_id='tester_group', @@ -57,12 +60,15 @@ def test_create_autoscaling_group(): group = conn.get_all_groups()[0] group.name.should.equal('tester_group') set(group.availability_zones).should.equal( - set(['us-east-1c', 'us-east-1b'])) + set(['us-east-1a', 'us-east-1b'])) group.desired_capacity.should.equal(2) group.max_size.should.equal(2) group.min_size.should.equal(2) group.instances.should.have.length_of(2) - group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) + group.vpc_zone_identifier.should.equal("{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + )) group.launch_config_name.should.equal('tester') group.default_cooldown.should.equal(60) group.health_check_period.should.equal(100) @@ -109,7 +115,7 @@ def test_create_autoscaling_groups_defaults(): group.launch_config_name.should.equal('tester') # Defaults - list(group.availability_zones).should.equal([]) + list(group.availability_zones).should.equal(['us-east-1a']) # subnet1 group.desired_capacity.should.equal(2) group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.default_cooldown.should.equal(300) @@ -217,7 +223,6 @@ def test_autoscaling_update(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], desired_capacity=2, max_size=2, min_size=2, @@ -227,13 +232,16 @@ def test_autoscaling_update(): conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] + group.availability_zones.should.equal(['us-east-1a']) group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) - group.vpc_zone_identifier = 'subnet-5678efgh' + group.availability_zones = ['us-east-1b'] + group.vpc_zone_identifier = mocked_networking['subnet2'] group.update() group = conn.get_all_groups()[0] - group.vpc_zone_identifier.should.equal('subnet-5678efgh') + group.availability_zones.should.equal(['us-east-1b']) + group.vpc_zone_identifier.should.equal(mocked_networking['subnet2']) @mock_autoscaling_deprecated @@ -249,7 +257,7 @@ def test_autoscaling_tags_update(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -309,7 +317,7 @@ def test_autoscaling_group_delete(): @mock_autoscaling_deprecated def test_autoscaling_group_describe_instances(): mocked_networking = setup_networking_deprecated() - conn = boto.connect_autoscale() + conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = LaunchConfiguration( name='tester', image_id='ami-abcd1234', @@ -332,7 +340,7 @@ def test_autoscaling_group_describe_instances(): instances[0].health_status.should.equal('Healthy') autoscale_instance_ids = [instance.instance_id for instance in instances] - ec2_conn = boto.connect_ec2() + ec2_conn = boto.ec2.connect_to_region('us-east-1') reservations = ec2_conn.get_all_instances() instances = reservations[0].instances instances.should.have.length_of(2) @@ -355,7 +363,7 @@ def test_set_desired_capacity_up(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -391,7 +399,7 @@ def test_set_desired_capacity_down(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -427,7 +435,7 @@ def test_set_desired_capacity_the_same(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -543,6 +551,7 @@ def test_describe_load_balancers(): ) response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + assert response['ResponseMetadata']['RequestId'] list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') @@ -738,8 +747,12 @@ def test_describe_autoscaling_groups_boto3(): response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) group = response['AutoScalingGroups'][0] group['AutoScalingGroupName'].should.equal('test_asg') + group['AvailabilityZones'].should.equal(['us-east-1a']) + group['VPCZoneIdentifier'].should.equal(mocked_networking['subnet1']) group['NewInstancesProtectedFromScaleIn'].should.equal(True) - group['Instances'][0]['ProtectedFromScaleIn'].should.equal(True) + for instance in group['Instances']: + instance['AvailabilityZone'].should.equal('us-east-1a') + instance['ProtectedFromScaleIn'].should.equal(True) @mock_autoscaling @@ -770,6 +783,7 @@ def test_describe_autoscaling_instances_boto3(): response = client.describe_auto_scaling_instances(InstanceIds=instance_ids) for instance in response['AutoScalingInstances']: instance['AutoScalingGroupName'].should.equal('test_asg') + instance['AvailabilityZone'].should.equal('us-east-1a') instance['ProtectedFromScaleIn'].should.equal(True) @@ -793,6 +807,10 @@ def test_update_autoscaling_group_boto3(): _ = client.update_auto_scaling_group( AutoScalingGroupName='test_asg', MinSize=1, + VPCZoneIdentifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + ), NewInstancesProtectedFromScaleIn=False, ) @@ -801,6 +819,7 @@ def test_update_autoscaling_group_boto3(): ) group = response['AutoScalingGroups'][0] group['MinSize'].should.equal(1) + set(group['AvailabilityZones']).should.equal({'us-east-1a', 'us-east-1b'}) group['NewInstancesProtectedFromScaleIn'].should.equal(False) diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index 3a50484c1..a142fd133 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -106,7 +106,7 @@ def test_detach_all_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=mocked_networking['vpc']) + VPCZoneIdentifier=mocked_networking['subnet1']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') diff --git a/tests/test_autoscaling/utils.py b/tests/test_autoscaling/utils.py index b167ba5f5..ebbffbed3 100644 --- a/tests/test_autoscaling/utils.py +++ b/tests/test_autoscaling/utils.py @@ -1,5 +1,6 @@ import boto import boto3 +from boto import vpc as boto_vpc from moto import mock_ec2, mock_ec2_deprecated @@ -19,9 +20,14 @@ def setup_networking(): @mock_ec2_deprecated def setup_networking_deprecated(): - conn = boto.connect_vpc() + conn = boto_vpc.connect_to_region('us-east-1') vpc = conn.create_vpc("10.11.0.0/16") - subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24") - subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24") + subnet1 = conn.create_subnet( + vpc.id, + "10.11.1.0/24", + availability_zone='us-east-1a') + subnet2 = conn.create_subnet( + vpc.id, + "10.11.2.0/24", + availability_zone='us-east-1b') return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} - diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 7f3b44b79..9ef6fdb0d 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -282,7 +282,7 @@ def test_create_function_from_aws_bucket(): result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.lambda_handler', @@ -291,7 +291,7 @@ def test_create_function_from_aws_bucket(): 'Description': 'test lambda function', 'Timeout': 3, 'MemorySize': 128, - 'Version': '$LATEST', + 'Version': '1', 'VpcConfig': { "SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"], @@ -327,7 +327,7 @@ def test_create_function_from_zipfile(): result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.lambda_handler', @@ -336,7 +336,7 @@ def test_create_function_from_zipfile(): 'Timeout': 3, 'MemorySize': 128, 'CodeSha256': hashlib.sha256(zip_content).hexdigest(), - 'Version': '$LATEST', + 'Version': '1', 'VpcConfig': { "SecurityGroupIds": [], "SubnetIds": [], @@ -398,6 +398,8 @@ def test_get_function(): # Test get function with result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') result['Configuration']['Version'].should.equal('$LATEST') + result['Configuration']['FunctionArn'].should.equal('arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST') + # Test get function when can't find function name with assert_raises(ClientError): @@ -464,14 +466,15 @@ def test_publish(): Description='test lambda function', Timeout=3, MemorySize=128, - Publish=True, + Publish=False, ) function_list = conn.list_functions() function_list['Functions'].should.have.length_of(1) latest_arn = function_list['Functions'][0]['FunctionArn'] - conn.publish_version(FunctionName='testFunction') + res = conn.publish_version(FunctionName='testFunction') + assert res['ResponseMetadata']['HTTPStatusCode'] == 201 function_list = conn.list_functions() function_list['Functions'].should.have.length_of(2) @@ -484,7 +487,7 @@ def test_publish(): function_list = conn.list_functions() function_list['Functions'].should.have.length_of(1) - function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') + function_list['Functions'][0]['FunctionArn'].should.contain('testFunction') @mock_lambda @@ -527,7 +530,7 @@ def test_list_create_list_get_delete_list(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.lambda_handler", "MemorySize": 128, @@ -700,7 +703,7 @@ def test_invoke_async_function(): ) success_result = conn.invoke_async( - FunctionName='testFunction', + FunctionName='testFunction', InvokeArgs=json.dumps({'test': 'event'}) ) @@ -740,7 +743,7 @@ def test_get_function_created_with_zipfile(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, @@ -841,7 +844,7 @@ def test_list_versions_by_function(): conn.create_function( FunctionName='testFunction', Runtime='python2.7', - Role='test-iam-role', + Role='arn:aws:iam::123456789012:role/test-iam-role', Handler='lambda_function.lambda_handler', Code={ 'S3Bucket': 'test-bucket', @@ -853,11 +856,31 @@ def test_list_versions_by_function(): Publish=True, ) - conn.publish_version(FunctionName='testFunction') - + res = conn.publish_version(FunctionName='testFunction') + assert res['ResponseMetadata']['HTTPStatusCode'] == 201 versions = conn.list_versions_by_function(FunctionName='testFunction') - + assert len(versions['Versions']) == 3 assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST' + assert versions['Versions'][1]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:1' + assert versions['Versions'][2]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:2' + + conn.create_function( + FunctionName='testFunction_2', + Runtime='python2.7', + Role='arn:aws:iam::123456789012:role/test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=False, + ) + versions = conn.list_versions_by_function(FunctionName='testFunction_2') + assert len(versions['Versions']) == 1 + assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction_2:$LATEST' @mock_lambda diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 2511de6da..d05bc1b53 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -184,6 +184,423 @@ dummy_import_template_json = json.dumps(dummy_import_template) dummy_redrive_template_json = json.dumps(dummy_redrive_template) +@mock_cloudformation +def test_boto3_describe_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + use1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-east-1', + ) + + usw2_instance['StackInstance'].should.have.key('Region').which.should.equal('us-west-2') + usw2_instance['StackInstance'].should.have.key('Account').which.should.equal('123456789012') + use1_instance['StackInstance'].should.have.key('Region').which.should.equal('us-east-1') + use1_instance['StackInstance'].should.have.key('Account').which.should.equal('123456789012') + + +@mock_cloudformation +def test_boto3_list_stacksets_length(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_set( + StackSetName="test_stack_set2", + TemplateBody=dummy_template_yaml, + ) + stacksets = cf_conn.list_stack_sets() + stacksets.should.have.length_of(2) + + +@mock_cloudformation +def test_boto3_list_stacksets_contents(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + stacksets = cf_conn.list_stack_sets() + stacksets['Summaries'][0].should.have.key('StackSetName').which.should.equal('test_stack_set') + stacksets['Summaries'][0].should.have.key('Status').which.should.equal('ACTIVE') + + +@mock_cloudformation +def test_boto3_stop_stack_set_operation(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + list_operation = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set" + ) + list_operation['Summaries'][-1]['Status'].should.equal('STOPPED') + + +@mock_cloudformation +def test_boto3_describe_stack_set_operation(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + response = cf_conn.describe_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id, + ) + + response['StackSetOperation']['Status'].should.equal('STOPPED') + response['StackSetOperation']['Action'].should.equal('CREATE') + + +@mock_cloudformation +def test_boto3_list_stack_set_operation_results(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + response = cf_conn.list_stack_set_operation_results( + StackSetName="test_stack_set", + OperationId=operation_id, + ) + + response['Summaries'].should.have.length_of(3) + response['Summaries'][0].should.have.key('Account').which.should.equal('123456789012') + response['Summaries'][1].should.have.key('Status').which.should.equal('STOPPED') + + +@mock_cloudformation +def test_boto3_update_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'SomeParam', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'AnotherParam', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'SomeParam', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'AnotherParam', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + cf_conn.update_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-west-1', 'us-west-2'], + ParameterOverrides=param_overrides, + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + usw1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-1', + ) + use1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-east-1', + ) + + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + usw1_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw1_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw1_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw1_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + use1_instance['StackInstance']['ParameterOverrides'].should.be.empty + + +@mock_cloudformation +def test_boto3_delete_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + cf_conn.delete_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1'], + RetainStacks=False, + ) + + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'].should.have.length_of(1) + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'][0]['Region'].should.equal( + 'us-west-2') + + +@mock_cloudformation +def test_boto3_create_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'].should.have.length_of(2) + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'][0]['Account'].should.equal( + '123456789012') + + +@mock_cloudformation +def test_boto3_create_stack_instances_with_param_overrides(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ParameterOverrides=param_overrides, + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + +@mock_cloudformation +def test_update_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.update_stack_set( + StackSetName='test_stack_set', + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param_overrides, + ) + stackset = cf_conn.describe_stack_set(StackSetName='test_stack_set') + + stackset['StackSet']['Parameters'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + stackset['StackSet']['Parameters'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + stackset['StackSet']['Parameters'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + stackset['StackSet']['Parameters'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + + +@mock_cloudformation +def test_boto3_list_stack_set_operations(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + cf_conn.update_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + list_operation = cf_conn.list_stack_set_operations(StackSetName="test_stack_set") + list_operation['Summaries'].should.have.length_of(2) + list_operation['Summaries'][-1]['Action'].should.equal('UPDATE') + + +@mock_cloudformation +def test_boto3_delete_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.delete_stack_set(StackSetName='test_stack_set') + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['Status'].should.equal( + 'DELETED') + + +@mock_cloudformation +def test_boto3_create_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['TemplateBody'].should.equal( + dummy_template_json) + + +@mock_cloudformation +def test_boto3_create_stack_set_with_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml, + ) + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['TemplateBody'].should.equal( + dummy_template_yaml) + + +@mock_cloudformation +@mock_s3 +def test_create_stack_set_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + + cf_conn = boto3.client('cloudformation', region_name='us-west-1') + cf_conn.create_stack_set( + StackSetName='stack_from_url', + TemplateURL=key_url, + ) + cf_conn.describe_stack_set(StackSetName="stack_from_url")['StackSet']['TemplateBody'].should.equal( + dummy_template_json) + + +@mock_cloudformation +def test_boto3_create_stack_set_with_ref_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack_set( + StackSetName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.describe_stack_set(StackSetName="test_stack")['StackSet']['TemplateBody'].should.equal( + dummy_template_yaml_with_ref) + + +@mock_cloudformation +def test_boto3_describe_stack_set_params(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack_set( + StackSetName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.describe_stack_set(StackSetName="test_stack")['StackSet']['Parameters'].should.equal( + params) + @mock_cloudformation def test_boto3_create_stack(): diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 449fde4ce..42ddd2351 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import json import base64 +from decimal import Decimal + import boto import boto.cloudformation import boto.datapipeline @@ -22,6 +24,7 @@ from moto import ( mock_cloudformation, mock_cloudformation_deprecated, mock_datapipeline_deprecated, + mock_dynamodb2, mock_ec2, mock_ec2_deprecated, mock_elb, @@ -39,6 +42,7 @@ from moto import ( mock_sqs, mock_sqs_deprecated, mock_elbv2) +from moto.dynamodb2.models import Table from .fixtures import ( ec2_classic_eip, @@ -2085,7 +2089,7 @@ def test_stack_kms(): def test_stack_spot_fleet(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] @@ -2169,7 +2173,7 @@ def test_stack_spot_fleet(): def test_stack_spot_fleet_should_figure_out_default_price(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] @@ -2433,3 +2437,131 @@ def test_stack_elbv2_resources_integration(): dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) + + +@mock_dynamodb2 +@mock_cloudformation +def test_stack_dynamodb_resources_integration(): + dynamodb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "myDynamoDBTable": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "Album", + "AttributeType": "S" + }, + { + "AttributeName": "Artist", + "AttributeType": "S" + }, + { + "AttributeName": "Sales", + "AttributeType": "N" + }, + { + "AttributeName": "NumberOfSongs", + "AttributeType": "N" + } + ], + "KeySchema": [ + { + "AttributeName": "Album", + "KeyType": "HASH" + }, + { + "AttributeName": "Artist", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + }, + "TableName": "myTableName", + "GlobalSecondaryIndexes": [{ + "IndexName": "myGSI", + "KeySchema": [ + { + "AttributeName": "Sales", + "KeyType": "HASH" + }, + { + "AttributeName": "Artist", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Album","NumberOfSongs"], + "ProjectionType": "INCLUDE" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + } + }, + { + "IndexName": "myGSI2", + "KeySchema": [ + { + "AttributeName": "NumberOfSongs", + "KeyType": "HASH" + }, + { + "AttributeName": "Sales", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Album","Artist"], + "ProjectionType": "INCLUDE" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + } + }], + "LocalSecondaryIndexes":[{ + "IndexName": "myLSI", + "KeySchema": [ + { + "AttributeName": "Album", + "KeyType": "HASH" + }, + { + "AttributeName": "Sales", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Artist","NumberOfSongs"], + "ProjectionType": "INCLUDE" + } + }] + } + } + } + } + + dynamodb_template_json = json.dumps(dynamodb_template) + + cfn_conn = boto3.client('cloudformation', 'us-east-1') + cfn_conn.create_stack( + StackName='dynamodb_stack', + TemplateBody=dynamodb_template_json, + ) + + dynamodb_conn = boto3.resource('dynamodb', region_name='us-east-1') + table = dynamodb_conn.Table('myTableName') + table.name.should.equal('myTableName') + + table.put_item(Item={"Album": "myAlbum", "Artist": "myArtist", "Sales": 10, "NumberOfSongs": 5}) + + response = table.get_item(Key={"Album": "myAlbum", "Artist": "myArtist"}) + + response['Item']['Album'].should.equal('myAlbum') + response['Item']['Sales'].should.equal(Decimal('10')) + response['Item']['NumberOfSongs'].should.equal(Decimal('5')) + response['Item']['Album'].should.equal('myAlbum') diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d25c69cf1..25242e352 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -83,6 +83,18 @@ get_availability_zones_output = { } } +parameters = { + "Parameters": { + "Param": { + "Type": "String", + }, + "NoEchoParam": { + "Type": "String", + "NoEcho": True + } + } +} + split_select_template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { @@ -157,6 +169,9 @@ get_attribute_outputs_template = dict( get_availability_zones_template = dict( list(dummy_template.items()) + list(get_availability_zones_output.items())) +parameters_template = dict( + list(dummy_template.items()) + list(parameters.items())) + dummy_template_json = json.dumps(dummy_template) name_type_template_json = json.dumps(name_type_template) output_type_template_json = json.dumps(outputs_template) @@ -165,6 +180,7 @@ get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) get_availability_zones_template_json = json.dumps( get_availability_zones_template) +parameters_template_json = json.dumps(parameters_template) split_select_template_json = json.dumps(split_select_template) sub_template_json = json.dumps(sub_template) export_value_template_json = json.dumps(export_value_template) @@ -290,6 +306,18 @@ def test_parse_stack_with_bad_get_attribute_outputs(): "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) +def test_parse_stack_with_parameters(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=parameters_template_json, + parameters={"Param": "visible value", "NoEchoParam": "hidden value"}, + region_name='us-west-1') + + stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam") + stack.resource_map.no_echo_parameter_keys.should_not.have("Param") + + def test_parse_equals_condition(): parse_condition( condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, @@ -448,8 +476,8 @@ def test_short_form_func_in_yaml_teamplate(): KeySplit: !Split [A, B] KeySub: !Sub A """ - yaml.add_multi_constructor('', yaml_tag_constructor) - template_dict = yaml.load(template) + yaml.add_multi_constructor('', yaml_tag_constructor, Loader=yaml.Loader) + template_dict = yaml.load(template, Loader=yaml.Loader) key_and_expects = [ ['KeyRef', {'Ref': 'foo'}], ['KeyB64', {'Fn::Base64': 'valueToEncode'}], diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 0ef082d5c..1483fcd0e 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -484,6 +484,82 @@ def test_describe_identity_providers(): result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) +@mock_cognitoidp +def test_update_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result = conn.update_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderDetails={ + "thing": new_value + }, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(new_value) + + +@mock_cognitoidp +def test_update_identity_provider_no_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + new_value = str(uuid.uuid4()) + + with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + conn.update_identity_provider( + UserPoolId="foo", + ProviderName="bar", + ProviderDetails={ + "thing": new_value + }, + ) + + cm.exception.operation_name.should.equal('UpdateIdentityProvider') + cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + +@mock_cognitoidp +def test_update_identity_provider_no_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + conn.update_identity_provider( + UserPoolId=user_pool_id, + ProviderName="foo", + ProviderDetails={ + "thing": new_value + }, + ) + + cm.exception.operation_name.should.equal('UpdateIdentityProvider') + cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + @mock_cognitoidp def test_delete_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") @@ -1086,3 +1162,53 @@ def test_confirm_forgot_password(): ConfirmationCode=str(uuid.uuid4()), Password=str(uuid.uuid4()), ) + +@mock_cognitoidp +def test_admin_update_user_attributes(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + { + 'Name': 'family_name', + 'Value': 'Doe', + }, + { + 'Name': 'given_name', + 'Value': 'John', + } + ] + ) + + conn.admin_update_user_attributes( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + { + 'Name': 'family_name', + 'Value': 'Doe', + }, + { + 'Name': 'given_name', + 'Value': 'Jane', + } + ] + ) + + user = conn.admin_get_user( + UserPoolId=user_pool_id, + Username=username + ) + attributes = user['UserAttributes'] + attributes.should.be.a(list) + for attr in attributes: + val = attr['Value'] + if attr['Name'] == 'family_name': + val.should.equal('Doe') + elif attr['Name'] == 'given_name': + val.should.equal('Jane') diff --git a/tests/test_core/test_responses.py b/tests/test_core/test_responses.py index c3cc27aef..d0f672ab8 100644 --- a/tests/test_core/test_responses.py +++ b/tests/test_core/test_responses.py @@ -2,7 +2,9 @@ from __future__ import unicode_literals import sure # noqa -from moto.core.responses import AWSServiceSpec +from botocore.awsrequest import AWSPreparedRequest + +from moto.core.responses import AWSServiceSpec, BaseResponse from moto.core.responses import flatten_json_request_body @@ -79,3 +81,9 @@ def test_flatten_json_request_body(): i += 1 key = keyfmt.format(idx + 1, i) props.should.equal(body['Configurations'][idx]['Properties']) + + +def test_parse_qs_unicode_decode_error(): + body = b'{"key": "%D0"}, "C": "#0 = :0"}' + request = AWSPreparedRequest('GET', 'http://request', {'foo': 'bar'}, body, False) + BaseResponse().setup_class(request, request.url, request.headers) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 3d6f1de65..faa467aab 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -452,6 +452,90 @@ def test_basic_projection_expressions(): assert 'body' in results['Items'][1] assert 'forum_name' in results['Items'][1] +@mock_dynamodb2 +def test_basic_projection_expressions_using_scan(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message' + }) + # Test a scan returning all items + results = table.scan( + FilterExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body, subject' + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'subject' in results['Items'][0] + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '1234', + 'body': 'yet another test message' + }) + + results = table.scan( + FilterExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body' + ) + + assert 'body' in results['Items'][0] + assert 'subject' not in results['Items'][0] + assert 'forum_name' not in results['Items'][0] + assert 'body' in results['Items'][1] + assert 'subject' not in results['Items'][1] + assert 'forum_name' not in results['Items'][1] + + # The projection expression should not remove data from storage + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ) + assert 'subject' in results['Items'][0] + assert 'body' in results['Items'][1] + assert 'forum_name' in results['Items'][1] + @mock_dynamodb2 def test_basic_projection_expressions_with_attr_expression_names(): @@ -519,6 +603,84 @@ def test_basic_projection_expressions_with_attr_expression_names(): assert 'attachment' in results['Items'][0] assert results['Items'][0]['attachment'] == 'something' +@mock_dynamodb2 +def test_basic_projection_expressions_using_scan_with_attr_expression_names(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + 'attachment': 'something' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message', + 'attachment': 'something' + }) + # Test a scan returning all items + + results = table.scan( + FilterExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='#rl, #rt, subject', + ExpressionAttributeNames={ + '#rl': 'body', + '#rt': 'attachment' + }, + ) + + assert 'body' in results['Items'][0] + assert 'attachment' in results['Items'][0] + assert 'subject' in results['Items'][0] + assert 'form_name' not in results['Items'][0] + + # Test without a FilterExpression + results = table.scan( + ProjectionExpression='#rl, #rt, subject', + ExpressionAttributeNames={ + '#rl': 'body', + '#rt': 'attachment' + }, + ) + + assert 'body' in results['Items'][0] + assert 'attachment' in results['Items'][0] + assert 'subject' in results['Items'][0] + assert 'form_name' not in results['Items'][0] + @mock_dynamodb2 def test_put_item_returns_consumed_capacity(): @@ -949,6 +1111,33 @@ def test_bad_scan_filter(): raise RuntimeError('Should of raised ResourceInUseException') +@mock_dynamodb2 +def test_create_table_pay_per_request(): + client = boto3.client('dynamodb', region_name='us-east-1') + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + BillingMode="PAY_PER_REQUEST" + ) + + +@mock_dynamodb2 +def test_create_table_error_pay_per_request_with_provisioned_param(): + client = boto3.client('dynamodb', region_name='us-east-1') + + try: + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}, + BillingMode="PAY_PER_REQUEST" + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationException') + + @mock_dynamodb2 def test_duplicate_create(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -1504,6 +1693,7 @@ def test_dynamodb_streams_2(): } assert 'LatestStreamLabel' in resp['TableDescription'] assert 'LatestStreamArn' in resp['TableDescription'] + @mock_dynamodb2 def test_condition_expressions(): @@ -1612,3 +1802,112 @@ def test_condition_expressions(): ':match': {'S': 'match2'} } ) + + +@mock_dynamodb2 +def test_query_gsi_with_range_key(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_hash_key', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_range_key', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_hash_key', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'gsi_range_key', + 'KeyType': 'RANGE' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': 'test1'}, + 'gsi_hash_key': {'S': 'key1'}, + 'gsi_range_key': {'S': 'range1'}, + } + ) + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': 'test2'}, + 'gsi_hash_key': {'S': 'key1'}, + } + ) + + res = dynamodb.query(TableName='test', IndexName='test_gsi', + KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key', + ExpressionAttributeValues={ + ':gsi_hash_key': {'S': 'key1'}, + ':gsi_range_key': {'S': 'range1'} + }) + res.should.have.key("Count").equal(1) + res.should.have.key("Items") + res['Items'][0].should.equal({ + 'id': {'S': 'test1'}, + 'gsi_hash_key': {'S': 'key1'}, + 'gsi_range_key': {'S': 'range1'}, + }) + + +@mock_dynamodb2 +def test_scan_by_non_exists_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_col', + 'KeyType': 'HASH' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + with assert_raises(ClientError) as ex: + dynamodb.scan(TableName='test', IndexName='non_exists_index') + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'The table does not have the specified index: non_exists_index' + ) diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index a9ab298b7..e64d7d196 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1344,6 +1344,34 @@ def test_update_item_add_value_string_set(): 'subject': '123', }) +@mock_dynamodb2 +def test_update_item_delete_value_string_set(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'string_set': set(['str1', 'str2']), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'string_set': { + 'Action': u'DELETE', + 'Value': set(['str2']), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'string_set': set(['str1']), + 'forum_name': 'the-key', + 'subject': '123', + }) @mock_dynamodb2 def test_update_item_add_value_does_not_exist_is_created(): @@ -1961,3 +1989,113 @@ def test_query_pagination(): results = page1['Items'] + page2['Items'] subjects = set([int(r['subject']) for r in results]) subjects.should.equal(set(range(10))) + + +@mock_dynamodb2 +def test_scan_by_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'range_key', 'KeyType': 'RANGE'}, + ], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'range_key', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_range_key', 'AttributeType': 'S'}, + {'AttributeName': 'lsi_range_key', 'AttributeType': 'S'}, + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + {'AttributeName': 'gsi_col', 'KeyType': 'HASH'}, + {'AttributeName': 'gsi_range_key', 'KeyType': 'RANGE'}, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ], + LocalSecondaryIndexes=[ + { + 'IndexName': 'test_lsi', + 'KeySchema': [ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'lsi_range_key', 'KeyType': 'RANGE'}, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'range_key': {'S': '1'}, + 'col1': {'S': 'val1'}, + 'gsi_col': {'S': '1'}, + 'gsi_range_key': {'S': '1'}, + 'lsi_range_key': {'S': '1'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'range_key': {'S': '2'}, + 'col1': {'S': 'val2'}, + 'gsi_col': {'S': '1'}, + 'gsi_range_key': {'S': '2'}, + 'lsi_range_key': {'S': '2'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '3'}, + 'range_key': {'S': '1'}, + 'col1': {'S': 'val3'}, + } + ) + + res = dynamodb.scan(TableName='test') + assert res['Count'] == 3 + assert len(res['Items']) == 3 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['gsi_col']['S'] == '1' + assert last_eval_key['gsi_range_key']['S'] == '1' + + res = dynamodb.scan(TableName='test', IndexName='test_lsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_lsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['range_key']['S'] == '1' + assert last_eval_key['lsi_range_key']['S'] == '1' diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 874804db0..1880c7cab 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -829,3 +829,77 @@ def test_scan_pagination(): results = page1['Items'] + page2['Items'] usernames = set([r['username'] for r in results]) usernames.should.equal(set(expected_usernames)) + + +@mock_dynamodb2 +def test_scan_by_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_col', + 'KeyType': 'HASH' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'col1': {'S': 'val1'}, + 'gsi_col': {'S': 'gsi_val1'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '2'}, + 'col1': {'S': 'val2'}, + 'gsi_col': {'S': 'gsi_val2'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '3'}, + 'col1': {'S': 'val3'}, + } + ) + + res = dynamodb.scan(TableName='test') + assert res['Count'] == 3 + assert len(res['Items']) == 3 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['gsi_col']['S'] == 'gsi_val1' diff --git a/tests/test_ec2/__init__.py b/tests/test_ec2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_ec2/helpers.py b/tests/test_ec2/helpers.py new file mode 100644 index 000000000..94c9c10cb --- /dev/null +++ b/tests/test_ec2/helpers.py @@ -0,0 +1,15 @@ +import six + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa + + +def rsa_check_private_key(private_key_material): + assert isinstance(private_key_material, six.string_types) + + private_key = serialization.load_pem_private_key( + data=private_key_material.encode('ascii'), + backend=default_backend(), + password=None) + assert isinstance(private_key, rsa.RSAPrivateKey) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 8f4a00b13..ab5b31ba0 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -16,7 +16,7 @@ from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated def test_create_and_delete_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") all_volumes = conn.get_all_volumes() @@ -52,7 +52,7 @@ def test_create_and_delete_volume(): @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") with assert_raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') @@ -63,7 +63,7 @@ def test_create_encrypted_volume_dryrun(): @mock_ec2_deprecated def test_create_encrypted_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) with assert_raises(EC2ResponseError) as ex: @@ -79,7 +79,7 @@ def test_create_encrypted_volume(): @mock_ec2_deprecated def test_filter_volume_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(80, "us-east-1a") volume2 = conn.create_volume(36, "us-east-1b") volume3 = conn.create_volume(20, "us-east-1c") @@ -99,7 +99,7 @@ def test_filter_volume_by_id(): @mock_ec2_deprecated def test_volume_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -196,7 +196,7 @@ def test_volume_filters(): @mock_ec2_deprecated def test_volume_attach_and_detach(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] volume = conn.create_volume(80, "us-east-1a") @@ -252,7 +252,7 @@ def test_volume_attach_and_detach(): @mock_ec2_deprecated def test_create_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") with assert_raises(EC2ResponseError) as ex: @@ -291,7 +291,7 @@ def test_create_snapshot(): @mock_ec2_deprecated def test_create_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) snapshot = volume.create_snapshot('a test snapshot') snapshot.update() @@ -306,7 +306,7 @@ def test_create_encrypted_snapshot(): @mock_ec2_deprecated def test_filter_snapshot_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(36, "us-east-1a") snap1 = volume1.create_snapshot('a test snapshot 1') volume2 = conn.create_volume(42, 'us-east-1a') @@ -333,7 +333,7 @@ def test_filter_snapshot_by_id(): @mock_ec2_deprecated def test_snapshot_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) @@ -394,12 +394,17 @@ def test_snapshot_filters(): set([snap.id for snap in snapshots_by_encrypted] ).should.equal({snapshot3.id}) + snapshots_by_owner_id = conn.get_all_snapshots( + filters={'owner-id': '123456789012'}) + set([snap.id for snap in snapshots_by_owner_id] + ).should.equal({snapshot1.id, snapshot2.id, snapshot3.id}) + @mock_ec2_deprecated def test_snapshot_attribute(): import copy - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot() @@ -502,7 +507,7 @@ def test_snapshot_attribute(): @mock_ec2_deprecated def test_create_volume_from_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot('a test snapshot') @@ -524,7 +529,7 @@ def test_create_volume_from_snapshot(): @mock_ec2_deprecated def test_create_volume_from_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) snapshot = volume.create_snapshot('a test snapshot') @@ -569,7 +574,7 @@ def test_modify_attribute_blockDeviceMapping(): @mock_ec2_deprecated def test_volume_tag_escaping(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") vol = conn.create_volume(10, 'us-east-1a') snapshot = conn.create_snapshot(vol.id, 'Desc') diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index c0f0eea4d..f14f85721 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -42,7 +42,7 @@ def test_add_servers(): @freeze_time("2014-01-01 05:00:00") @mock_ec2_deprecated def test_instance_launch_and_terminate(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") with assert_raises(EC2ResponseError) as ex: reservation = conn.run_instances('ami-1234abcd', dry_run=True) @@ -820,7 +820,7 @@ def test_run_instance_with_instance_type(): @mock_ec2_deprecated def test_run_instance_with_default_placement(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 0a7fb9f76..dfe6eabdf 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -4,12 +4,46 @@ import tests.backport_assert_raises from nose.tools import assert_raises import boto -import six import sure # noqa from boto.exception import EC2ResponseError from moto import mock_ec2_deprecated +from .helpers import rsa_check_private_key + + +RSA_PUBLIC_KEY_OPENSSH = b"""\ +ssh-rsa \ +AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H\ +6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\8kweyMQrhrt6HaKGgromRiz37LQx\ +4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBp\ +JzbZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6\ +A3t8mL7r91aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2X\ +qusUO07jKuSxzPumXBeU+JEtx0J1tqZwJlpGt2R+0qN7nKnPl2+hx \ +moto@github.com""" + +RSA_PUBLIC_KEY_RFC4716 = b"""\ +---- BEGIN SSH2 PUBLIC KEY ---- +AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H6cZANO +Q+P1o/W4BdtcAL3sor4iGi7SOeJgo8kweyMQrhrt6HaKGgromRiz37LQx4YIAcBi4Zd023 +mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBpJzbZlPN45ZCTk9ck0fS +VHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6A3t8mL7r91aM5q6QOQm219lct +FM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2XqusUO07jKuSxzPumXBeU+JEtx0J1tqZ +wJlpGt2R+0qN7nKnPl2+hx +---- END SSH2 PUBLIC KEY ---- +""" + +RSA_PUBLIC_KEY_FINGERPRINT = "6a:49:07:1c:7e:bd:d2:bd:96:25:fe:b5:74:83:ae:fd" + +DSA_PUBLIC_KEY_OPENSSH = b"""ssh-dss \ +AAAAB3NzaC1kc3MAAACBAJ0aXctVwbN6VB81gpo8R7DUk8zXRjZvrkg8Y8vEGt63gklpNJNsLXtEUXkl5D4c0nD2FZO1rJNqFoe\ +OQOCoGSfclHvt9w4yPl/lUEtb3Qtj1j80MInETHr19vaSunRk5R+M+8YH+LLcdYdz7MijuGey02mbi0H9K5nUIcuLMArVAAAAFQ\ +D0RDvsObRWBlnaW8645obZBM86jwAAAIBNZwf3B4krIzAwVfkMHLDSdAvs7lOWE7o8SJLzr9t4a9HhYp9SLbMzJ815KWfidEYV2\ ++s4ZaPCfcZ1GENFRbE8rixz5eMAjEUXEPMJkblDZTHzMsH96z2cOCQZ0vfOmgznsf18Uf725pqo9OqAioEsTJjX8jtI2qNPEBU0\ +uhMSZQAAAIBBMGhDu5CWPUlS2QG7vzmzw81XasmHE/s2YPDRbolkriwlunpgwZhCscoQP8HFHY+DLUVvUb+GZwBmFt4l1uHl03b\ +ffsm7UIHtCBYERr9Nx0u20ldfhkgB1lhaJb5o0ZJ3pmJ38KChfyHe5EUcqRdEFo89Mp72VI2Z6UHyL175RA== \ +moto@github.com""" + @mock_ec2_deprecated def test_key_pairs_empty(): @@ -33,14 +67,15 @@ def test_key_pairs_create(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - kp = conn.create_key_pair('foo', dry_run=True) + conn.create_key_pair('foo', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + rsa_check_private_key(kp.material) + kps = conn.get_all_key_pairs() assert len(kps) == 1 assert kps[0].name == 'foo' @@ -49,13 +84,19 @@ def test_key_pairs_create(): @mock_ec2_deprecated def test_key_pairs_create_two(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - kp = conn.create_key_pair('bar') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + + kp1 = conn.create_key_pair('foo') + rsa_check_private_key(kp1.material) + + kp2 = conn.create_key_pair('bar') + rsa_check_private_key(kp2.material) + + assert kp1.material != kp2.material + kps = conn.get_all_key_pairs() kps.should.have.length_of(2) - [i.name for i in kps].should.contain('foo') - [i.name for i in kps].should.contain('bar') + assert {i.name for i in kps} == {'foo', 'bar'} + kps = conn.get_all_key_pairs('foo') kps.should.have.length_of(1) kps[0].name.should.equal('foo') @@ -64,8 +105,7 @@ def test_key_pairs_create_two(): @mock_ec2_deprecated def test_key_pairs_create_exist(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + conn.create_key_pair('foo') assert len(conn.get_all_key_pairs()) == 1 with assert_raises(EC2ResponseError) as cm: @@ -105,23 +145,30 @@ def test_key_pairs_import(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - kp = conn.import_key_pair('foo', b'content', dry_run=True) + conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' + kp1 = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH) + assert kp1.name == 'foo' + assert kp1.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT + + kp2 = conn.import_key_pair('foo2', RSA_PUBLIC_KEY_RFC4716) + assert kp2.name == 'foo2' + assert kp2.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT + kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' + assert len(kps) == 2 + assert kps[0].name == kp1.name + assert kps[1].name == kp2.name @mock_ec2_deprecated def test_key_pairs_import_exist(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.import_key_pair('foo', b'content') + kp = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH) assert kp.name == 'foo' assert len(conn.get_all_key_pairs()) == 1 @@ -132,6 +179,32 @@ def test_key_pairs_import_exist(): cm.exception.request_id.should_not.be.none +@mock_ec2_deprecated +def test_key_pairs_invalid(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', b'') + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', b'garbage') + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', DSA_PUBLIC_KEY_OPENSSH) + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + @mock_ec2_deprecated def test_key_pair_filters(): conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 9c92c949e..1c69624bf 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import boto import boto3 import sure # noqa +from nose.tools import assert_raises +from botocore.exceptions import ClientError from moto import mock_ec2_deprecated, mock_ec2 @@ -28,12 +30,12 @@ def test_new_subnet_associates_with_default_network_acl(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.get_all_vpcs()[0] - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + subnet = conn.create_subnet(vpc.id, "172.31.112.0/20") all_network_acls = conn.get_all_network_acls() all_network_acls.should.have.length_of(1) acl = all_network_acls[0] - acl.associations.should.have.length_of(4) + acl.associations.should.have.length_of(7) [a.subnet_id for a in acl.associations].should.contain(subnet.id) @@ -214,3 +216,37 @@ def test_default_network_acl_default_entries(): unique_entries.append(entry) unique_entries.should.have.length_of(4) + + +@mock_ec2 +def test_delete_default_network_acl_default_entry(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + default_network_acl.entries.should.have.length_of(4) + first_default_network_acl_entry = default_network_acl.entries[0] + + default_network_acl.delete_entry(Egress=first_default_network_acl_entry['Egress'], + RuleNumber=first_default_network_acl_entry['RuleNumber']) + + default_network_acl.entries.should.have.length_of(3) + + +@mock_ec2 +def test_duplicate_network_acl_entry(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + rule_number = 200 + egress = True + default_network_acl.create_entry(CidrBlock="0.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="allow", RuleNumber=rule_number) + + with assert_raises(ClientError) as ex: + default_network_acl.create_entry(CidrBlock="10.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="deny", RuleNumber=rule_number) + str(ex.exception).should.equal( + "An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry " + "operation: The network acl entry identified by {} already exists.".format(rule_number)) + + diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 1e87b253c..f94c78eaf 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -68,8 +68,10 @@ def test_create_autoscaling_group(): image_id='ami-abcd1234', instance_type='m1.small', ) - us_conn.create_launch_configuration(config) + x = us_conn.create_launch_configuration(config) + us_subnet_id = list(ec2_backends['us-east-1'].subnets['us-east-1c'].keys())[0] + ap_subnet_id = list(ec2_backends['ap-northeast-1'].subnets['ap-northeast-1a'].keys())[0] group = boto.ec2.autoscale.AutoScalingGroup( name='us_tester_group', availability_zones=['us-east-1c'], @@ -82,7 +84,7 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["us_test_lb"], placement_group="us_test_placement", - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=us_subnet_id, termination_policies=["OldestInstance", "NewestInstance"], ) us_conn.create_auto_scaling_group(group) @@ -107,7 +109,7 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["ap_test_lb"], placement_group="ap_test_placement", - vpc_zone_identifier='subnet-5678efgh', + vpc_zone_identifier=ap_subnet_id, termination_policies=["OldestInstance", "NewestInstance"], ) ap_conn.create_auto_scaling_group(group) @@ -121,7 +123,7 @@ def test_create_autoscaling_group(): us_group.desired_capacity.should.equal(2) us_group.max_size.should.equal(2) us_group.min_size.should.equal(2) - us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') + us_group.vpc_zone_identifier.should.equal(us_subnet_id) us_group.launch_config_name.should.equal('us_tester') us_group.default_cooldown.should.equal(60) us_group.health_check_period.should.equal(100) @@ -137,7 +139,7 @@ def test_create_autoscaling_group(): ap_group.desired_capacity.should.equal(2) ap_group.max_size.should.equal(2) ap_group.min_size.should.equal(2) - ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') + ap_group.vpc_zone_identifier.should.equal(ap_subnet_id) ap_group.launch_config_name.should.equal('ap_tester') ap_group.default_cooldown.should.equal(60) ap_group.health_check_period.should.equal(100) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index b27484468..de33b3f7a 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -6,6 +6,7 @@ from nose.tools import assert_raises import boto import boto3 from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError import sure # noqa from moto import mock_ec2, mock_ec2_deprecated @@ -528,3 +529,26 @@ def test_network_acl_tagging(): if na.id == route_table.id) test_route_table.tags.should.have.length_of(1) test_route_table.tags["a key"].should.equal("some value") + + +@mock_ec2 +def test_create_route_with_invalid_destination_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + route_table = ec2.create_route_table(VpcId=vpc.id) + route_table.reload() + + internet_gateway = ec2.create_internet_gateway() + vpc.attach_internet_gateway(InternetGatewayId=internet_gateway.id) + internet_gateway.reload() + + destination_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + route = route_table.create_route(DestinationCidrBlock=destination_cidr_block, GatewayId=internet_gateway.id) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateRoute " + "operation: Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(destination_cidr_block)) \ No newline at end of file diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index d843087a6..c09b1e8f4 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -501,7 +501,7 @@ def test_sec_group_rule_limit_vpc(): ec2_conn = boto.connect_ec2() vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc('10.0.0.0/8') + vpc = vpc_conn.create_vpc('10.0.0.0/16') sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index 190f3b1f1..6221d633f 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -7,7 +7,7 @@ from moto import mock_ec2 def get_subnet_id(conn): - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 05f8ee88f..ab08d392c 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -17,7 +17,7 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds @mock_ec2 def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 99e6d45d8..38c36f682 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -7,7 +7,7 @@ import boto3 import boto import boto.vpc from boto.exception import EC2ResponseError -from botocore.exceptions import ParamValidationError +from botocore.exceptions import ParamValidationError, ClientError import json import sure # noqa @@ -84,7 +84,7 @@ def test_default_subnet(): default_vpc.is_default.should.be.ok subnet = ec2.create_subnet( - VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') + VpcId=default_vpc.id, CidrBlock='172.31.48.0/20', AvailabilityZone='us-west-1a') subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok @@ -118,7 +118,7 @@ def test_boto3_non_default_subnet(): @mock_ec2 -def test_modify_subnet_attribute(): +def test_modify_subnet_attribute_public_ip_on_launch(): ec2 = boto3.resource('ec2', region_name='us-west-1') client = boto3.client('ec2', region_name='us-west-1') @@ -126,7 +126,7 @@ def test_modify_subnet_attribute(): vpc = list(ec2.vpcs.all())[0] subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + VpcId=vpc.id, CidrBlock="172.31.48.0/20", AvailabilityZone='us-west-1a') # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action subnet.reload() @@ -145,6 +145,34 @@ def test_modify_subnet_attribute(): subnet.map_public_ip_on_launch.should.be.ok +@mock_ec2 +def test_modify_subnet_attribute_assign_ipv6_address_on_creation(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + # Get the default VPC + vpc = list(ec2.vpcs.all())[0] + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='172.31.112.0/20', AvailabilityZone='us-west-1a') + + # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action + subnet.reload() + + # For non default subnet, attribute value should be 'False' + subnet.assign_ipv6_address_on_creation.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, AssignIpv6AddressOnCreation={'Value': False}) + subnet.reload() + subnet.assign_ipv6_address_on_creation.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, AssignIpv6AddressOnCreation={'Value': True}) + subnet.reload() + subnet.assign_ipv6_address_on_creation.should.be.ok + + @mock_ec2 def test_modify_subnet_attribute_validation(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -289,3 +317,130 @@ def test_subnet_tags_through_cloudformation(): subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] subnet.tags["foo"].should.equal("bar") subnet.tags["blah"].should.equal("baz") + + +@mock_ec2 +def test_create_subnet_response_fields(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = client.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a')['Subnet'] + + subnet.should.have.key('AvailabilityZone') + subnet.should.have.key('AvailabilityZoneId') + subnet.should.have.key('AvailableIpAddressCount') + subnet.should.have.key('CidrBlock') + subnet.should.have.key('State') + subnet.should.have.key('SubnetId') + subnet.should.have.key('VpcId') + subnet.shouldnt.have.key('Tags') + subnet.should.have.key('DefaultForAz').which.should.equal(False) + subnet.should.have.key('MapPublicIpOnLaunch').which.should.equal(False) + subnet.should.have.key('OwnerId') + subnet.should.have.key('AssignIpv6AddressOnCreation').which.should.equal(False) + + subnet_arn = "arn:aws:ec2:{region}:{owner_id}:subnet/{subnet_id}".format(region=subnet['AvailabilityZone'][0:-1], + owner_id=subnet['OwnerId'], + subnet_id=subnet['SubnetId']) + subnet.should.have.key('SubnetArn').which.should.equal(subnet_arn) + subnet.should.have.key('Ipv6CidrBlockAssociationSet').which.should.equal([]) + + +@mock_ec2 +def test_describe_subnet_response_fields(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet_object = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + + subnets = client.describe_subnets(SubnetIds=[subnet_object.id])['Subnets'] + subnets.should.have.length_of(1) + subnet = subnets[0] + + subnet.should.have.key('AvailabilityZone') + subnet.should.have.key('AvailabilityZoneId') + subnet.should.have.key('AvailableIpAddressCount') + subnet.should.have.key('CidrBlock') + subnet.should.have.key('State') + subnet.should.have.key('SubnetId') + subnet.should.have.key('VpcId') + subnet.shouldnt.have.key('Tags') + subnet.should.have.key('DefaultForAz').which.should.equal(False) + subnet.should.have.key('MapPublicIpOnLaunch').which.should.equal(False) + subnet.should.have.key('OwnerId') + subnet.should.have.key('AssignIpv6AddressOnCreation').which.should.equal(False) + + subnet_arn = "arn:aws:ec2:{region}:{owner_id}:subnet/{subnet_id}".format(region=subnet['AvailabilityZone'][0:-1], + owner_id=subnet['OwnerId'], + subnet_id=subnet['SubnetId']) + subnet.should.have.key('SubnetArn').which.should.equal(subnet_arn) + subnet.should.have.key('Ipv6CidrBlockAssociationSet').which.should.equal([]) + + +@mock_ec2 +def test_create_subnet_with_invalid_availability_zone(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + subnet_availability_zone = 'asfasfas' + with assert_raises(ClientError) as ex: + subnet = client.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone=subnet_availability_zone) + assert str(ex.exception).startswith( + "An error occurred (InvalidParameterValue) when calling the CreateSubnet " + "operation: Value ({}) for parameter availabilityZone is invalid. Subnets can currently only be created in the following availability zones: ".format(subnet_availability_zone)) + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_range(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '10.1.0.0/20' + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " + "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block)) + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateSubnet " + "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(subnet_cidr_block)) + + +@mock_ec2 +def test_create_subnets_with_overlapping_cidr_blocks(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '10.0.0.0/24' + with assert_raises(ClientError) as ex: + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet " + "operation: The CIDR '{}' conflicts with another subnet".format(subnet_cidr_block)) diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index c92a4f81f..2294979ba 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -5,6 +5,7 @@ import itertools import boto import boto3 +from botocore.exceptions import ClientError from boto.exception import EC2ResponseError from boto.ec2.instance import Reservation import sure # noqa @@ -451,3 +452,31 @@ def test_create_snapshot_with_tags(): }] assert snapshot['Tags'] == expected_tags + + +@mock_ec2 +def test_create_tag_empty_resource(): + # create ec2 client in us-west-1 + client = boto3.client('ec2', region_name='us-west-1') + # create tag with empty resource + with assert_raises(ClientError) as ex: + client.create_tags( + Resources=[], + Tags=[{'Key': 'Value'}] + ) + ex.exception.response['Error']['Code'].should.equal('MissingParameter') + ex.exception.response['Error']['Message'].should.equal('The request must contain the parameter resourceIdSet') + + +@mock_ec2 +def test_delete_tag_empty_resource(): + # create ec2 client in us-west-1 + client = boto3.client('ec2', region_name='us-west-1') + # delete tag with empty resource + with assert_raises(ClientError) as ex: + client.delete_tags( + Resources=[], + Tags=[{'Key': 'Value'}] + ) + ex.exception.response['Error']['Code'].should.equal('MissingParameter') + ex.exception.response['Error']['Message'].should.equal('The request must contain the parameter resourceIdSet') diff --git a/tests/test_ec2/test_utils.py b/tests/test_ec2/test_utils.py index ef540e193..49192dc79 100644 --- a/tests/test_ec2/test_utils.py +++ b/tests/test_ec2/test_utils.py @@ -1,8 +1,12 @@ from moto.ec2 import utils +from .helpers import rsa_check_private_key + def test_random_key_pair(): key_pair = utils.random_key_pair() - assert len(key_pair['fingerprint']) == 59 - assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----') - assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') + rsa_check_private_key(key_pair['material']) + + # AWS uses MD5 fingerprints, which are 47 characters long, *not* SHA1 + # fingerprints with 59 characters. + assert len(key_pair['fingerprint']) == 47 diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 082499a72..edfbfb3c2 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -107,14 +107,19 @@ def test_vpc_peering_connections_cross_region(): ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') # create peering - vpc_pcx = ec2_usw1.create_vpc_peering_connection( + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( VpcId=vpc_usw1.id, PeerVpcId=vpc_apn1.id, PeerRegion='ap-northeast-1', ) - vpc_pcx.status['Code'].should.equal('initiating-request') - vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) - vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) + vpc_pcx_usw1.status['Code'].should.equal('initiating-request') + vpc_pcx_usw1.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx_usw1.accepter_vpc.id.should.equal(vpc_apn1.id) + # test cross region vpc peering connection exist + vpc_pcx_apn1 = ec2_apn1.VpcPeeringConnection(vpc_pcx_usw1.id) + vpc_pcx_apn1.id.should.equal(vpc_pcx_usw1.id) + vpc_pcx_apn1.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx_apn1.accepter_vpc.id.should.equal(vpc_apn1.id) @mock_ec2 @@ -131,3 +136,148 @@ def test_vpc_peering_connections_cross_region_fail(): PeerVpcId=vpc_apn1.id, PeerRegion='ap-northeast-2') cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_accept(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # accept peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + acp_pcx_apn1 = ec2_apn1.accept_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + acp_pcx_apn1['VpcPeeringConnection']['Status']['Code'].should.equal('active') + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_reject(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + rej_pcx_apn1 = ec2_apn1.reject_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + rej_pcx_apn1['Return'].should.equal(True) + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_delete(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + del_pcx_apn1 = ec2_apn1.delete_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + del_pcx_apn1['Return'].should.equal(True) + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_accept_wrong_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + + # accept wrong peering from us-west-1 which will raise error + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + with assert_raises(ClientError) as cm: + ec2_usw1.accept_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted') + exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \ + 'peering connection {1} must be ' \ + 'accepted in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1') + cm.exception.response['Error']['Message'].should.equal(exp_msg) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_reject_wrong_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject wrong peering from us-west-1 which will raise error + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + with assert_raises(ClientError) as cm: + ec2_usw1.reject_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted') + exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \ + 'peering connection {1} must be accepted or ' \ + 'rejected in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1') + cm.exception.response['Error']['Message'].should.equal(exp_msg) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 318491b44..ad17deb3c 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -539,3 +539,27 @@ def test_ipv6_cidr_block_association_filters(): filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', 'Values': ['associated']}])) filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs + + +@mock_ec2 +def test_create_vpc_with_invalid_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateVpc " + "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(vpc_cidr_block)) + + +@mock_ec2 +def test_create_vpc_with_invalid_cidr_range(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc_cidr_block = '10.1.0.0/29' + with assert_raises(ClientError) as ex: + vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidVpc.Range) when calling the CreateVpc " + "operation: The CIDR '{}' is invalid.".format(vpc_cidr_block)) diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index c0cef81a9..ec0e4e732 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -3,6 +3,8 @@ from __future__ import unicode_literals import hashlib import json from datetime import datetime +from freezegun import freeze_time +import os from random import random import re @@ -13,6 +15,7 @@ from botocore.exceptions import ClientError, ParamValidationError from dateutil.tz import tzlocal from moto import mock_ecr +from nose import SkipTest def _create_image_digest(contents=None): @@ -198,6 +201,42 @@ def test_put_image(): response['image']['repositoryName'].should.equal('test_repository') response['image']['registryId'].should.equal('012345678910') + +@mock_ecr +def test_put_image_with_push_date(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + with freeze_time('2018-08-28 00:00:00'): + image1_date = datetime.now() + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + with freeze_time('2019-05-31 00:00:00'): + image2_date = datetime.now() + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + describe_response = client.describe_images(repositoryName='test_repository') + + type(describe_response['imageDetails']).should.be(list) + len(describe_response['imageDetails']).should.be(2) + + set([describe_response['imageDetails'][0]['imagePushedAt'], + describe_response['imageDetails'][1]['imagePushedAt']]).should.equal(set([image1_date, image2_date])) + + @mock_ecr def test_put_image_with_multiple_tags(): client = boto3.client('ecr', region_name='us-east-1') @@ -240,6 +279,7 @@ def test_put_image_with_multiple_tags(): len(response2['imageDetails'][0]['imageTags']).should.be(2) response2['imageDetails'][0]['imageTags'].should.be.equal(['v1', 'latest']) + @mock_ecr def test_list_images(): client = boto3.client('ecr', region_name='us-east-1') @@ -695,3 +735,347 @@ def test_batch_get_image_no_tags(): client.batch_get_image.when.called_with( repositoryName='test_repository').should.throw( ParamValidationError, error_msg) + + +@mock_ecr +def test_batch_delete_image_by_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v1.0', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag, + ) + + describe_response1 = client.describe_images(repositoryName='test_repository') + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'latest' + }, + ], + ) + + describe_response2 = client.describe_images(repositoryName='test_repository') + + type(describe_response1['imageDetails'][0]['imageTags']).should.be(list) + len(describe_response1['imageDetails'][0]['imageTags']).should.be(3) + + type(describe_response2['imageDetails'][0]['imageTags']).should.be(list) + len(describe_response2['imageDetails'][0]['imageTags']).should.be(2) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(1) + + batch_delete_response['imageIds'][0]['imageTag'].should.equal("latest") + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(0) + + +@mock_ecr +def test_batch_delete_image_delete_last_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1', + ) + + describe_response1 = client.describe_images(repositoryName='test_repository') + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v1' + }, + ], + ) + + describe_response2 = client.describe_images(repositoryName='test_repository') + + type(describe_response1['imageDetails'][0]['imageTags']).should.be(list) + len(describe_response1['imageDetails'][0]['imageTags']).should.be(1) + + type(describe_response2['imageDetails']).should.be(list) + len(describe_response2['imageDetails']).should.be(0) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(1) + + batch_delete_response['imageIds'][0]['imageTag'].should.equal("v1") + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(0) + + +@mock_ecr +def test_batch_delete_image_with_nonexistent_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v1.0', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag, + ) + + describe_response = client.describe_images(repositoryName='test_repository') + + missing_tag = "missing-tag" + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': missing_tag + }, + ], + ) + + type(describe_response['imageDetails'][0]['imageTags']).should.be(list) + len(describe_response['imageDetails'][0]['imageTags']).should.be(3) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(0) + + batch_delete_response['failures'][0]['imageId']['imageTag'].should.equal(missing_tag) + batch_delete_response['failures'][0]['failureCode'].should.equal("ImageNotFound") + batch_delete_response['failures'][0]['failureReason'].should.equal("Requested image not found") + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(1) + + +@mock_ecr +def test_batch_delete_image_by_digest(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v2', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + describe_response = client.describe_images(repositoryName='test_repository') + image_digest = describe_response['imageDetails'][0]['imageDigest'] + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageDigest': image_digest + }, + ], + ) + + describe_response = client.describe_images(repositoryName='test_repository') + + type(describe_response['imageDetails']).should.be(list) + len(describe_response['imageDetails']).should.be(0) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(3) + + batch_delete_response['imageIds'][0]['imageDigest'].should.equal(image_digest) + batch_delete_response['imageIds'][1]['imageDigest'].should.equal(image_digest) + batch_delete_response['imageIds'][2]['imageDigest'].should.equal(image_digest) + + set([ + batch_delete_response['imageIds'][0]['imageTag'], + batch_delete_response['imageIds'][1]['imageTag'], + batch_delete_response['imageIds'][2]['imageTag']]).should.equal(set(tags)) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(0) + + +@mock_ecr +def test_batch_delete_image_with_invalid_digest(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v2', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + invalid_image_digest = 'sha256:invalid-digest' + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageDigest': invalid_image_digest + }, + ], + ) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(0) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(1) + + batch_delete_response['failures'][0]['imageId']['imageDigest'].should.equal(invalid_image_digest) + batch_delete_response['failures'][0]['failureCode'].should.equal("InvalidImageDigest") + batch_delete_response['failures'][0]['failureReason'].should.equal("Invalid request parameters: image digest should satisfy the regex '[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+'") + + +@mock_ecr +def test_batch_delete_image_with_missing_parameters(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + }, + ], + ) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(0) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(1) + + batch_delete_response['failures'][0]['failureCode'].should.equal("MissingDigestAndTag") + batch_delete_response['failures'][0]['failureReason'].should.equal("Invalid request parameters: both tag and digest cannot be null") + + +@mock_ecr +def test_batch_delete_image_with_matching_digest_and_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v1.0', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + describe_response = client.describe_images(repositoryName='test_repository') + image_digest = describe_response['imageDetails'][0]['imageDigest'] + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageDigest': image_digest, + 'imageTag': 'v1' + }, + ], + ) + + describe_response = client.describe_images(repositoryName='test_repository') + + type(describe_response['imageDetails']).should.be(list) + len(describe_response['imageDetails']).should.be(0) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(3) + + batch_delete_response['imageIds'][0]['imageDigest'].should.equal(image_digest) + batch_delete_response['imageIds'][1]['imageDigest'].should.equal(image_digest) + batch_delete_response['imageIds'][2]['imageDigest'].should.equal(image_digest) + + set([ + batch_delete_response['imageIds'][0]['imageTag'], + batch_delete_response['imageIds'][1]['imageTag'], + batch_delete_response['imageIds'][2]['imageTag']]).should.equal(set(tags)) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(0) + + +@mock_ecr +def test_batch_delete_image_with_mismatched_digest_and_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + describe_response = client.describe_images(repositoryName='test_repository') + image_digest = describe_response['imageDetails'][0]['imageDigest'] + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageDigest': image_digest, + 'imageTag': 'v2' + }, + ], + ) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(0) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(1) + + batch_delete_response['failures'][0]['imageId']['imageDigest'].should.equal(image_digest) + batch_delete_response['failures'][0]['imageId']['imageTag'].should.equal("v2") + batch_delete_response['failures'][0]['failureCode'].should.equal("ImageNotFound") + batch_delete_response['failures'][0]['failureReason'].should.equal("Requested image not found") diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index a0d470935..b147c4159 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -47,6 +47,15 @@ def test_list_clusters(): 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') +@mock_ecs +def test_describe_clusters(): + client = boto3.client('ecs', region_name='us-east-1') + response = client.describe_clusters(clusters=["some-cluster"]) + response['failures'].should.contain({ + 'arn': 'arn:aws:ecs:us-east-1:012345678910:cluster/some-cluster', + 'reason': 'MISSING' + }) + @mock_ecs def test_delete_cluster(): client = boto3.client('ecs', region_name='us-east-1') @@ -379,23 +388,32 @@ def test_list_services(): cluster='test_ecs_cluster', serviceName='test_ecs_service1', taskDefinition='test_ecs_task', + schedulingStrategy='REPLICA', desiredCount=2 ) _ = client.create_service( cluster='test_ecs_cluster', serviceName='test_ecs_service2', taskDefinition='test_ecs_task', + schedulingStrategy='DAEMON', desiredCount=2 ) - response = client.list_services( + unfiltered_response = client.list_services( cluster='test_ecs_cluster' ) - len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal( + len(unfiltered_response['serviceArns']).should.equal(2) + unfiltered_response['serviceArns'][0].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal( + unfiltered_response['serviceArns'][1].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + filtered_response = client.list_services( + cluster='test_ecs_cluster', + schedulingStrategy='REPLICA' + ) + len(filtered_response['serviceArns']).should.equal(1) + filtered_response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') @mock_ecs def test_describe_services(): diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index a67508430..447896f15 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -21,7 +21,7 @@ from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated @mock_ec2_deprecated def test_create_load_balancer(): conn = boto.connect_elb() - ec2 = boto.connect_ec2('the_key', 'the_secret') + ec2 = boto.ec2.connect_to_region("us-east-1") security_group = ec2.create_security_group('sg-abc987', 'description') diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index b58345fdb..03273ad3a 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -27,7 +27,7 @@ def test_create_load_balancer(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -69,7 +69,7 @@ def test_describe_load_balancers(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') conn.create_load_balancer( @@ -112,7 +112,7 @@ def test_add_remove_tags(): vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, @@ -234,7 +234,7 @@ def test_create_elb_in_multiple_region(): InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone=region + 'a') subnet2 = ec2.create_subnet( VpcId=vpc.id, @@ -275,7 +275,7 @@ def test_create_target_group_and_listeners(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -434,7 +434,7 @@ def test_create_target_group_without_non_required_parameters(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -538,7 +538,7 @@ def test_describe_paginated_balancers(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') for i in range(51): @@ -573,7 +573,7 @@ def test_delete_load_balancer(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -606,7 +606,7 @@ def test_register_targets(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') conn.create_load_balancer( @@ -682,7 +682,7 @@ def test_target_group_attributes(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -773,7 +773,7 @@ def test_handle_listener_rules(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -1078,7 +1078,7 @@ def test_describe_invalid_target_group(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -1124,7 +1124,7 @@ def test_describe_target_groups_no_arguments(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -1188,7 +1188,7 @@ def test_set_ip_address_type(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1238,7 +1238,7 @@ def test_set_security_groups(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1275,11 +1275,11 @@ def test_set_subnets(): vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.64/26', AvailabilityZone='us-east-1b') subnet3 = ec2.create_subnet( VpcId=vpc.id, @@ -1332,7 +1332,7 @@ def test_set_subnets(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1421,7 +1421,7 @@ def test_modify_listener_http_to_https(): AvailabilityZone='eu-central-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='eu-central-1b') response = client.create_load_balancer( @@ -1586,3 +1586,143 @@ def test_create_target_groups_through_cloudformation(): assert len( [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] ) == 2 + + +@mock_elbv2 +@mock_ec2 +def test_redirect_action_listener_rule(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.128/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_listener(LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[ + {'Type': 'redirect', + 'RedirectConfig': { + 'Protocol': 'HTTPS', + 'Port': '443', + 'StatusCode': 'HTTP_301' + }}]) + + listener = response.get('Listeners')[0] + expected_default_actions = [{ + 'Type': 'redirect', + 'RedirectConfig': { + 'Protocol': 'HTTPS', + 'Port': '443', + 'StatusCode': 'HTTP_301' + } + }] + listener.get('DefaultActions').should.equal(expected_default_actions) + listener_arn = listener.get('ListenerArn') + + describe_rules_response = conn.describe_rules(ListenerArn=listener_arn) + describe_rules_response['Rules'][0]['Actions'].should.equal(expected_default_actions) + + describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn, ]) + describe_listener_actions = describe_listener_response['Listeners'][0]['DefaultActions'] + describe_listener_actions.should.equal(expected_default_actions) + + modify_listener_response = conn.modify_listener(ListenerArn=listener_arn, Port=81) + modify_listener_actions = modify_listener_response['Listeners'][0]['DefaultActions'] + modify_listener_actions.should.equal(expected_default_actions) + + +@mock_elbv2 +@mock_cloudformation +def test_redirect_action_listener_rule_cloudformation(): + cnf_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + } + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [{ + "Type": "redirect", + "RedirectConfig": { + "Port": "443", + "Protocol": "HTTPS", + "StatusCode": "HTTP_301", + } + }] + } + + } + } + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers(Names=['my-lb',]) + describe_load_balancers_response['LoadBalancers'].should.have.length_of(1) + load_balancer_arn = describe_load_balancers_response['LoadBalancers'][0]['LoadBalancerArn'] + + describe_listeners_response = elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn) + + describe_listeners_response['Listeners'].should.have.length_of(1) + describe_listeners_response['Listeners'][0]['DefaultActions'].should.equal([{ + 'Type': 'redirect', + 'RedirectConfig': { + 'Port': '443', 'Protocol': 'HTTPS', 'StatusCode': 'HTTP_301', + } + },]) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 237ff8bba..b9a5025d9 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -432,6 +432,47 @@ def test_run_job_flow_with_instance_groups(): x['BidPrice'].should.equal(y['BidPrice']) +@mock_emr +def test_run_job_flow_with_custom_ami(): + client = boto3.client('emr', region_name='us-east-1') + + with assert_raises(ClientError) as ex: + # CustomAmiId available in Amazon EMR 5.7.0 and later + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['ReleaseLabel'] = 'emr-5.6.0' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.equal('Custom AMI is not allowed') + + with assert_raises(ClientError) as ex: + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['AmiVersion'] = '3.8.1' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.equal( + 'Custom AMI is not supported in this version of EMR') + + with assert_raises(ClientError) as ex: + # AMI version and release label exception raises before CustomAmi exception + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['ReleaseLabel'] = 'emr-5.6.0' + args['AmiVersion'] = '3.8.1' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.contain( + 'Only one AMI version and release label may be specified.') + + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomAmi' + args['ReleaseLabel'] = 'emr-5.7.0' + cluster_id = client.run_job_flow(**args)['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['CustomAmiId'].should.equal('MyEmrCustomAmi') + + @mock_emr def test_set_termination_protection(): client = boto3.client('emr', region_name='us-east-1') diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index a457d5127..232ab3019 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -209,6 +209,47 @@ def test_get_table_when_database_not_exits(): exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') +@mock_glue +def test_delete_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + result = client.delete_table(DatabaseName=database_name, Name=table_name) + result['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # confirm table is deleted + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myspecialtable not found') + +@mock_glue +def test_batch_delete_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + result = client.batch_delete_table(DatabaseName=database_name, TablesToDelete=[table_name]) + result['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # confirm table is deleted + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myspecialtable not found') + + @mock_glue def test_get_partitions_empty(): client = boto3.client('glue', region_name='us-east-1') @@ -289,6 +330,72 @@ def test_get_partition_not_found(): exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') exc.exception.response['Error']['Message'].should.match('partition') +@mock_glue +def test_batch_create_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + before = datetime.now(pytz.utc) + + partition_inputs = [] + for i in range(0, 20): + values = ["2018-10-{:2}".format(i)] + part_input = helpers.create_partition_input(database_name, table_name, values=values) + partition_inputs.append(part_input) + + client.batch_create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInputList=partition_inputs + ) + + after = datetime.now(pytz.utc) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + partitions = response['Partitions'] + + partitions.should.have.length_of(20) + + for idx, partition in enumerate(partitions): + partition_input = partition_inputs[idx] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor'].should.equal(partition_input['StorageDescriptor']) + partition['Values'].should.equal(partition_input['Values']) + partition['CreationTime'].should.be.greater_than(before) + partition['CreationTime'].should.be.lower_than(after) + + +@mock_glue +def test_batch_create_partition_already_exist(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + helpers.create_partition(client, database_name, table_name, values=values) + + partition_input = helpers.create_partition_input(database_name, table_name, values=values) + + response = client.batch_create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInputList=[partition_input] + ) + + response.should.have.key('Errors') + response['Errors'].should.have.length_of(1) + response['Errors'][0]['PartitionValues'].should.equal(values) + response['Errors'][0]['ErrorDetail']['ErrorCode'].should.equal('AlreadyExistsException') + @mock_glue def test_get_partition(): @@ -424,3 +531,112 @@ def test_update_partition_move(): partition['TableName'].should.equal(table_name) partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) + +@mock_glue +def test_delete_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + part_input = helpers.create_partition_input(database_name, table_name, values=values) + helpers.create_partition(client, database_name, table_name, part_input) + + client.delete_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + partitions = response['Partitions'] + partitions.should.be.empty + +@mock_glue +def test_delete_partition_bad_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + client.delete_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + +@mock_glue +def test_batch_delete_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + partition_inputs = [] + for i in range(0, 20): + values = ["2018-10-{:2}".format(i)] + part_input = helpers.create_partition_input(database_name, table_name, values=values) + partition_inputs.append(part_input) + + client.batch_create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInputList=partition_inputs + ) + + partition_values = [{"Values": p["Values"]} for p in partition_inputs] + + response = client.batch_delete_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionsToDelete=partition_values, + ) + + response.should_not.have.key('Errors') + +@mock_glue +def test_batch_delete_partition_with_bad_partitions(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + partition_inputs = [] + for i in range(0, 20): + values = ["2018-10-{:2}".format(i)] + part_input = helpers.create_partition_input(database_name, table_name, values=values) + partition_inputs.append(part_input) + + client.batch_create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInputList=partition_inputs + ) + + partition_values = [{"Values": p["Values"]} for p in partition_inputs] + + partition_values.insert(5, {"Values": ["2018-11-01"]}) + partition_values.insert(10, {"Values": ["2018-11-02"]}) + partition_values.insert(15, {"Values": ["2018-11-03"]}) + + response = client.batch_delete_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionsToDelete=partition_values, + ) + + response.should.have.key('Errors') + response['Errors'].should.have.length_of(3) + error_partitions = map(lambda x: x['PartitionValues'], response['Errors']) + ['2018-11-01'].should.be.within(error_partitions) + ['2018-11-02'].should.be.within(error_partitions) + ['2018-11-03'].should.be.within(error_partitions) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index ceec5e06a..0d96fd1b1 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import base64 +import json import boto import boto3 @@ -29,6 +30,44 @@ FyDHrtlrS80dPUQWNYHw++oACDpWO01LGLPPrGmuO/7cOdojPEd852q5gd+7W9xt 8vUH+pBa6IBLbvBp+szli51V3TLSWcoyy4ceJNQU2vCkTLoFdS0RLd/7tQ== -----END CERTIFICATE-----""" +MOCK_POLICY = """ +{ + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +""" + +MOCK_POLICY_2 = """ +{ + "Version": "2012-10-17", + "Id": "2", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +""" + +MOCK_POLICY_3 = """ +{ + "Version": "2012-10-17", + "Id": "3", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +""" + @mock_iam_deprecated() def test_get_all_server_certs(): @@ -128,7 +167,6 @@ def test_create_role_and_instance_profile(): profile = conn.create_instance_profile('my-other-profile') profile.path.should.equal('/') - @mock_iam_deprecated() def test_remove_role_from_instance_profile(): conn = boto.connect_iam() @@ -244,12 +282,12 @@ def test_list_instance_profiles_for_role(): def test_list_role_policies(): conn = boto.connect_iam() conn.create_role("my-role") - conn.put_role_policy("my-role", "test policy", "my policy") + conn.put_role_policy("my-role", "test policy", MOCK_POLICY) role = conn.list_role_policies("my-role") role.policy_names.should.have.length_of(1) role.policy_names[0].should.equal("test policy") - conn.put_role_policy("my-role", "test policy 2", "another policy") + conn.put_role_policy("my-role", "test policy 2", MOCK_POLICY) role = conn.list_role_policies("my-role") role.policy_names.should.have.length_of(2) @@ -267,7 +305,7 @@ def test_put_role_policy(): conn = boto.connect_iam() conn.create_role( "my-role", assume_role_policy_document="some policy", path="my-path") - conn.put_role_policy("my-role", "test policy", "my policy") + conn.put_role_policy("my-role", "test policy", MOCK_POLICY) policy = conn.get_role_policy( "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] policy.should.equal("test policy") @@ -287,7 +325,7 @@ def test_create_policy(): conn = boto3.client('iam', region_name='us-east-1') response = conn.create_policy( PolicyName="TestCreatePolicy", - PolicyDocument='{"some":"policy"}') + PolicyDocument=MOCK_POLICY) response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestCreatePolicy") @@ -300,11 +338,20 @@ def test_create_policy_versions(): PolicyDocument='{"some":"policy"}') conn.create_policy( PolicyName="TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument=MOCK_POLICY) version = conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') - version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + PolicyDocument=MOCK_POLICY, + SetAsDefault=True) + version.get('PolicyVersion').get('Document').should.equal(json.loads(MOCK_POLICY)) + version.get('PolicyVersion').get('VersionId').should.equal("v2") + conn.delete_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + VersionId="v1") + version = conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + PolicyDocument=MOCK_POLICY) + version.get('PolicyVersion').get('VersionId').should.equal("v3") @mock_iam @@ -312,10 +359,21 @@ def test_get_policy(): conn = boto3.client('iam', region_name='us-east-1') response = conn.create_policy( PolicyName="TestGetPolicy", - PolicyDocument='{"some":"policy"}') + PolicyDocument=MOCK_POLICY) policy = conn.get_policy( PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") - response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") + policy['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") + + +@mock_iam +def test_get_aws_managed_policy(): + conn = boto3.client('iam', region_name='us-east-1') + managed_policy_arn = 'arn:aws:iam::aws:policy/IAMUserChangePassword' + managed_policy_create_date = datetime.strptime("2016-11-15T00:25:16+00:00", "%Y-%m-%dT%H:%M:%S+00:00") + policy = conn.get_policy( + PolicyArn=managed_policy_arn) + policy['Policy']['Arn'].should.equal(managed_policy_arn) + policy['Policy']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_create_date) @mock_iam @@ -323,10 +381,10 @@ def test_get_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestGetPolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument=MOCK_POLICY) version = conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument=MOCK_POLICY) with assert_raises(ClientError): conn.get_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", @@ -334,7 +392,39 @@ def test_get_policy_version(): retrieved = conn.get_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", VersionId=version.get('PolicyVersion').get('VersionId')) - retrieved.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + retrieved.get('PolicyVersion').get('Document').should.equal(json.loads(MOCK_POLICY)) + + +@mock_iam +def test_get_aws_managed_policy_version(): + conn = boto3.client('iam', region_name='us-east-1') + managed_policy_arn = 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole' + managed_policy_version_create_date = datetime.strptime("2015-04-09T15:03:43+00:00", "%Y-%m-%dT%H:%M:%S+00:00") + with assert_raises(ClientError): + conn.get_policy_version( + PolicyArn=managed_policy_arn, + VersionId='v2-does-not-exist') + retrieved = conn.get_policy_version( + PolicyArn=managed_policy_arn, + VersionId="v1") + retrieved['PolicyVersion']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_version_create_date) + retrieved['PolicyVersion']['Document'].should.be.an(dict) + + +@mock_iam +def test_get_aws_managed_policy_v4_version(): + conn = boto3.client('iam', region_name='us-east-1') + managed_policy_arn = 'arn:aws:iam::aws:policy/job-function/SystemAdministrator' + managed_policy_version_create_date = datetime.strptime("2018-10-08T21:33:45+00:00", "%Y-%m-%dT%H:%M:%S+00:00") + with assert_raises(ClientError): + conn.get_policy_version( + PolicyArn=managed_policy_arn, + VersionId='v2-does-not-exist') + retrieved = conn.get_policy_version( + PolicyArn=managed_policy_arn, + VersionId="v4") + retrieved['PolicyVersion']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_version_create_date) + retrieved['PolicyVersion']['Document'].should.be.an(dict) @mock_iam @@ -345,22 +435,22 @@ def test_list_policy_versions(): PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") conn.create_policy( PolicyName="TestListPolicyVersions", - PolicyDocument='{"first":"policy"}') + PolicyDocument=MOCK_POLICY) versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") versions.get('Versions')[0].get('VersionId').should.equal('v1') - + conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", - PolicyDocument='{"second":"policy"}') + PolicyDocument=MOCK_POLICY_2) conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", - PolicyDocument='{"third":"policy"}') + PolicyDocument=MOCK_POLICY_3) versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") print(versions.get('Versions')) - versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'}) - versions.get('Versions')[2].get('Document').should.equal({'third': 'policy'}) + versions.get('Versions')[1].get('Document').should.equal(json.loads(MOCK_POLICY_2)) + versions.get('Versions')[2].get('Document').should.equal(json.loads(MOCK_POLICY_3)) @mock_iam @@ -368,10 +458,10 @@ def test_delete_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestDeletePolicyVersion", - PolicyDocument='{"first":"policy"}') + PolicyDocument=MOCK_POLICY) conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - PolicyDocument='{"second":"policy"}') + PolicyDocument=MOCK_POLICY) with assert_raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", @@ -401,6 +491,19 @@ def test_get_user(): conn.get_user('my-user') +@mock_iam() +def test_update_user(): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(conn.exceptions.NoSuchEntityException): + conn.update_user(UserName='my-user') + conn.create_user(UserName='my-user') + conn.update_user(UserName='my-user', NewPath='/new-path/', NewUserName='new-user') + response = conn.get_user(UserName='new-user') + response['User'].get('Path').should.equal('/new-path/') + with assert_raises(conn.exceptions.NoSuchEntityException): + conn.get_user(UserName='my-user') + + @mock_iam_deprecated() def test_get_current_user(): """If no user is specific, IAM returns the current user""" @@ -425,22 +528,20 @@ def test_list_users(): @mock_iam() def test_user_policies(): policy_name = 'UserManagedPolicy' - policy_document = "{'mypolicy': 'test'}" user_name = 'my-user' conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName=user_name) conn.put_user_policy( UserName=user_name, PolicyName=policy_name, - PolicyDocument=policy_document + PolicyDocument=MOCK_POLICY ) policy_doc = conn.get_user_policy( UserName=user_name, PolicyName=policy_name ) - test = policy_document in policy_doc['PolicyDocument'] - test.should.equal(True) + policy_doc['PolicyDocument'].should.equal(json.loads(MOCK_POLICY)) policies = conn.list_user_policies(UserName=user_name) len(policies['PolicyNames']).should.equal(1) @@ -601,7 +702,7 @@ def test_managed_policy(): conn = boto.connect_iam() conn.create_policy(policy_name='UserManagedPolicy', - policy_document={'mypolicy': 'test'}, + policy_document=MOCK_POLICY, path='/mypolicy/', description='my user managed policy') @@ -702,7 +803,7 @@ def test_attach_detach_user_policy(): policy_name = 'UserAttachedPolicy' policy = iam.create_policy(PolicyName=policy_name, - PolicyDocument='{"mypolicy": "test"}', + PolicyDocument=MOCK_POLICY, Path='/mypolicy/', Description='my user attached policy') @@ -758,7 +859,6 @@ def test_get_access_key_last_used(): @mock_iam def test_get_account_authorization_details(): - import json test_policy = json.dumps({ "Version": "2012-10-17", "Statement": [ @@ -1190,7 +1290,6 @@ def test_update_role(): @mock_iam() def test_list_entities_for_policy(): - import json test_policy = json.dumps({ "Version": "2012-10-17", "Statement": [ @@ -1265,4 +1364,27 @@ def test_list_entities_for_policy(): assert response['PolicyRoles'] == [{'RoleName': 'my-role'}] +@mock_iam() +def test_create_role_no_path(): + conn = boto3.client('iam', region_name='us-east-1') + resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test') + resp.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-role') + resp.get('Role').should_not.have.key('PermissionsBoundary') +@mock_iam() +def test_create_role_with_permissions_boundary(): + conn = boto3.client('iam', region_name='us-east-1') + boundary = 'arn:aws:iam::123456789012:policy/boundary' + resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=boundary) + expected = { + 'PermissionsBoundaryType': 'PermissionsBoundaryPolicy', + 'PermissionsBoundaryArn': boundary + } + resp.get('Role').get('PermissionsBoundary').should.equal(expected) + + invalid_boundary_arn = 'arn:aws:iam::123456789:not_a_boundary' + with assert_raises(ClientError): + conn.create_role(RoleName='bad-boundary', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=invalid_boundary_arn) + + # Ensure the PermissionsBoundary is included in role listing as well + conn.list_roles().get('Roles')[0].get('PermissionsBoundary').should.equal(expected) diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 0d4756f75..1ca9f2512 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -10,6 +10,18 @@ from nose.tools import assert_raises from boto.exception import BotoServerError from moto import mock_iam, mock_iam_deprecated +MOCK_POLICY = """ +{ + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +""" + @mock_iam_deprecated() def test_create_group(): @@ -101,7 +113,7 @@ def test_get_groups_for_user(): def test_put_group_policy(): conn = boto.connect_iam() conn.create_group('my-group') - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + conn.put_group_policy('my-group', 'my-policy', MOCK_POLICY) @mock_iam @@ -131,7 +143,7 @@ def test_get_group_policy(): with assert_raises(BotoServerError): conn.get_group_policy('my-group', 'my-policy') - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + conn.put_group_policy('my-group', 'my-policy', MOCK_POLICY) conn.get_group_policy('my-group', 'my-policy') @@ -141,7 +153,7 @@ def test_get_all_group_policies(): conn.create_group('my-group') policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] assert policies == [] - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + conn.put_group_policy('my-group', 'my-policy', MOCK_POLICY) policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] assert policies == ['my-policy'] @@ -151,5 +163,5 @@ def test_list_group_policies(): conn = boto3.client('iam', region_name='us-east-1') conn.create_group(GroupName='my-group') conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty - conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') + conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument=MOCK_POLICY) conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) diff --git a/tests/test_iam/test_iam_policies.py b/tests/test_iam/test_iam_policies.py new file mode 100644 index 000000000..e1924a559 --- /dev/null +++ b/tests/test_iam/test_iam_policies.py @@ -0,0 +1,1861 @@ +import json + +import boto3 +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_iam + +invalid_policy_document_test_cases = [ + { + "document": "This is not a json document", + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2008-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2013-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17" + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": ["afd"] + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + "Extra field": "value" + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Extra field": "value" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Id": ["cd3a324d2343d942772346-34234234423404-4c2242343242349d1642ee"], + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Id": {}, + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "invalid", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "invalid", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "a a:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Vendor a a is not valid' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:List:Bucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Condition can contain only one colon.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Effect": "Allow", + "Action": "s:3s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + "error_message": 'Actions/Condition can contain only one colon.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "invalid resource" + } + }, + "error_message": 'Resource invalid resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EnableDisableHongKong", + "Effect": "Allow", + "Action": [ + "account:EnableRegion", + "account:DisableRegion" + ], + "Resource": "", + "Condition": { + "StringEquals": {"account:TargetRegion": "ap-east-1"} + } + }, + { + "Sid": "ViewConsole", + "Effect": "Allow", + "Action": [ + "aws-portal:ViewAccount", + "account:ListRegions" + ], + "Resource": "" + } + ] + }, + "error_message": 'Resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s:3:ListBucket", + "Resource": "sdfsadf" + } + }, + "error_message": 'Resource sdfsadf must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["adf"] + } + }, + "error_message": 'Resource adf must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "" + } + }, + "error_message": 'Resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:bsdfdsafsad" + } + }, + "error_message": 'Partition "bsdfdsafsad" is not valid for resource "arn:bsdfdsafsad:*:*:*:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:b:cadfsdf" + } + }, + "error_message": 'Partition "b" is not valid for resource "arn:b:cadfsdf:*:*:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:b:c:d:e:f:g:h" + } + }, + "error_message": 'Partition "b" is not valid for resource "arn:b:c:d:e:f:g:h".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "aws:s3:::example_bucket" + } + }, + "error_message": 'Partition "s3" is not valid for resource "arn:s3:::example_bucket:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:error:s3:::example_bucket", + "arn:error:s3::example_bucket" + ] + } + }, + "error_message": 'Partition "error" is not valid for resource "arn:error:s3:::example_bucket".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [] + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket" + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [] + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "invalid" + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": [], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:::example_bucket" + } + }, + "error_message": 'IAM resource path must either be "*" or start with user/, federated-user/, role/, group/, instance-profile/, mfa/, server-certificate/, policy/, sms-mfa/, saml-provider/, oidc-provider/, report/, access-report/.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Resource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws" + } + }, + "error_message": 'Resource vendor must be fully qualified and cannot contain regexes.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": { + "a": "arn:aws:s3:::example_bucket" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": "s3:ListBucket", + "Resource": ["adfdf", {}] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "NotResource": [] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": [[]], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Action": [], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": {}, + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": [] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": "a" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "a": "b" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": "b" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": [] + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": {}} + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": {}} + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "x": { + "a": "1" + } + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "ForAnyValue::StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": [ + {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}} + ] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:us-east-1::example_bucket" + } + }, + "error_message": 'IAM resource arn:aws:iam:us-east-1::example_bucket cannot contain region information.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:us-east-1::example_bucket" + } + }, + "error_message": 'Resource arn:aws:s3:us-east-1::example_bucket can not contain region information.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Sid": {}, + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Sid": [], + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'Statement IDs (SID) in a single policy must be unique.' + }, + { + "document": { + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Action": "iam:dsf", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "NotResource": "*" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "denY", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Statement": { + "Effect": "denY", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws::::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "allow", + "Resource": "arn:aws:s3:us-east-1::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "aLLow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "NotResource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "234-13" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.593194+1" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.1999999999+10:59" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThan": { + "a": "9223372036854775808" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:error:s3:::example_bucket", + "Condition": { + "DateGreaterThan": { + "a": "sdfdsf" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws::fdsasf" + } + }, + "error_message": 'The policy failed legacy parsing' + } +] + +valid_policy_documents = [ + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket" + ] + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "iam: asdf safdsf af ", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket", + "*" + ] + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "*", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "service-prefix:action-name", + "Resource": "*", + "Condition": { + "DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"}, + "DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "fsx:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:::user/example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s33:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:fdsasf" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": {} + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}} + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:cloudwatch:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:ec2:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:invalid-service:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:invalid-service:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"}, + "DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": []} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "a": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Sid": "dsfsdfsdfsdfsdfsadfsd", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ConsoleDisplay", + "Effect": "Allow", + "Action": [ + "iam:GetRole", + "iam:GetUser", + "iam:ListRoles", + "iam:ListRoleTags", + "iam:ListUsers", + "iam:ListUserTags" + ], + "Resource": "*" + }, + { + "Sid": "AddTag", + "Effect": "Allow", + "Action": [ + "iam:TagUser", + "iam:TagRole" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:RequestTag/CostCenter": [ + "A-123", + "B-456" + ] + }, + "ForAllValues:StringEquals": {"aws:TagKeys": "CostCenter"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": "s3:*", + "NotResource": [ + "arn:aws:s3:::HRBucket/Payroll", + "arn:aws:s3:::HRBucket/Payroll/*" + ] + } + }, + { + "Version": "2012-10-17", + "Id": "sdfsdfsdf", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "aaaaaadsfdsafsadfsadfaaaaa:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3-s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3.s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "NotResource": "*" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": { + "a": "01T" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "x": { + }, + "y": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "ForAnyValue:StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2019-07-01T13:20:15Z" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13T21:20:37.593194+00:00" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.593194+23" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThan": { + "a": "-292275054" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowViewAccountInfo", + "Effect": "Allow", + "Action": [ + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary", + "iam:ListVirtualMFADevices" + ], + "Resource": "*" + }, + { + "Sid": "AllowManageOwnPasswords", + "Effect": "Allow", + "Action": [ + "iam:ChangePassword", + "iam:GetUser" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnAccessKeys", + "Effect": "Allow", + "Action": [ + "iam:CreateAccessKey", + "iam:DeleteAccessKey", + "iam:ListAccessKeys", + "iam:UpdateAccessKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSigningCertificates", + "Effect": "Allow", + "Action": [ + "iam:DeleteSigningCertificate", + "iam:ListSigningCertificates", + "iam:UpdateSigningCertificate", + "iam:UploadSigningCertificate" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSSHPublicKeys", + "Effect": "Allow", + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnGitCredentials", + "Effect": "Allow", + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:ResetServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnVirtualMFADevice", + "Effect": "Allow", + "Action": [ + "iam:CreateVirtualMFADevice", + "iam:DeleteVirtualMFADevice" + ], + "Resource": "arn:aws:iam::*:mfa/${aws:username}" + }, + { + "Sid": "AllowManageOwnUserMFA", + "Effect": "Allow", + "Action": [ + "iam:DeactivateMFADevice", + "iam:EnableMFADevice", + "iam:ListMFADevices", + "iam:ResyncMFADevice" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "DenyAllExceptListedIfNoMFA", + "Effect": "Deny", + "NotAction": [ + "iam:CreateVirtualMFADevice", + "iam:EnableMFADevice", + "iam:GetUser", + "iam:ListMFADevices", + "iam:ListVirtualMFADevices", + "iam:ResyncMFADevice", + "sts:GetSessionToken" + ], + "Resource": "*", + "Condition": { + "BoolIfExists": { + "aws:MultiFactorAuthPresent": "false" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListAndDescribe", + "Effect": "Allow", + "Action": [ + "dynamodb:List*", + "dynamodb:DescribeReservedCapacity*", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTimeToLive" + ], + "Resource": "*" + }, + { + "Sid": "SpecificTable", + "Effect": "Allow", + "Action": [ + "dynamodb:BatchGet*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Get*", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:BatchWrite*", + "dynamodb:CreateTable", + "dynamodb:Delete*", + "dynamodb:Update*", + "dynamodb:PutItem" + ], + "Resource": "arn:aws:dynamodb:*:*:table/MyTable" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:instance/*" + ], + "Condition": { + "ArnEquals": {"ec2:SourceInstanceARN": "arn:aws:ec2:*:*:instance/instance-id"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringEquals": {"ec2:ResourceTag/Department": "Development"} + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": "arn:aws:ec2:*:*:volume/*", + "Condition": { + "StringEquals": {"ec2:ResourceTag/VolumeUser": "${aws:username}"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "StartStopIfTags", + "Effect": "Allow", + "Action": [ + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:DescribeTags" + ], + "Resource": "arn:aws:ec2:region:account-id:instance/*", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Project": "DataAnalytics", + "aws:PrincipalTag/Department": "Data" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListYourObjects", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["arn:aws:s3:::bucket-name"], + "Condition": { + "StringLike": { + "s3:prefix": ["cognito/application-name/${cognito-identity.amazonaws.com:sub}"] + } + } + }, + { + "Sid": "ReadWriteDeleteYourObjects", + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}", + "arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}/*" + ] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::bucket-name", + "Condition": { + "StringLike": { + "s3:prefix": [ + "", + "home/", + "home/${aws:userid}/*" + ] + } + } + }, + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": [ + "arn:aws:s3:::bucket-name/home/${aws:userid}", + "arn:aws:s3:::bucket-name/home/${aws:userid}/*" + ] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ConsoleAccess", + "Effect": "Allow", + "Action": [ + "s3:GetAccountPublicAccessBlock", + "s3:GetBucketAcl", + "s3:GetBucketLocation", + "s3:GetBucketPolicyStatus", + "s3:GetBucketPublicAccessBlock", + "s3:ListAllMyBuckets" + ], + "Resource": "*" + }, + { + "Sid": "ListObjectsInBucket", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["arn:aws:s3:::bucket-name"] + }, + { + "Sid": "AllObjectActions", + "Effect": "Allow", + "Action": "s3:*Object", + "Resource": ["arn:aws:s3:::bucket-name/*"] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowViewAccountInfo", + "Effect": "Allow", + "Action": [ + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary" + ], + "Resource": "*" + }, + { + "Sid": "AllowManageOwnPasswords", + "Effect": "Allow", + "Action": [ + "iam:ChangePassword", + "iam:GetUser" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnAccessKeys", + "Effect": "Allow", + "Action": [ + "iam:CreateAccessKey", + "iam:DeleteAccessKey", + "iam:ListAccessKeys", + "iam:UpdateAccessKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSigningCertificates", + "Effect": "Allow", + "Action": [ + "iam:DeleteSigningCertificate", + "iam:ListSigningCertificates", + "iam:UpdateSigningCertificate", + "iam:UploadSigningCertificate" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSSHPublicKeys", + "Effect": "Allow", + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnGitCredentials", + "Effect": "Allow", + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:ResetServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "ec2:*", + "Resource": "*", + "Effect": "Allow", + "Condition": { + "StringEquals": { + "ec2:Region": "region" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "rds:*", + "Resource": ["arn:aws:rds:region:*:*"] + }, + { + "Effect": "Allow", + "Action": ["rds:Describe*"], + "Resource": ["*"] + } + ] + } +] + + +def test_create_policy_with_invalid_policy_documents(): + for test_case in invalid_policy_document_test_cases: + yield check_create_policy_with_invalid_policy_document, test_case + + +def test_create_policy_with_valid_policy_documents(): + for valid_policy_document in valid_policy_documents: + yield check_create_policy_with_valid_policy_document, valid_policy_document + + +@mock_iam +def check_create_policy_with_invalid_policy_document(test_case): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError) as ex: + conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=json.dumps(test_case["document"])) + ex.exception.response['Error']['Code'].should.equal('MalformedPolicyDocument') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal(test_case["error_message"]) + + +@mock_iam +def check_create_policy_with_valid_policy_document(valid_policy_document): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=json.dumps(valid_policy_document)) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 826d2c56b..a580f56d1 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -228,7 +228,7 @@ def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): @mock_iot def test_certs(): - client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot', region_name='us-east-1') cert = client.create_keys_and_certificate(setAsActive=True) cert.should.have.key('certificateArn').which.should_not.be.none cert.should.have.key('certificateId').which.should_not.be.none @@ -245,6 +245,29 @@ def test_certs(): cert_desc.should.have.key('certificateId').which.should_not.be.none cert_desc.should.have.key('certificatePem').which.should_not.be.none cert_desc.should.have.key('status').which.should.equal('ACTIVE') + cert_pem = cert_desc['certificatePem'] + + res = client.list_certificates() + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates') + + # Test register_certificate flow + cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True) + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificateArn').which.should_not.be.none + cert_id = cert['certificateId'] res = client.list_certificates() res.should.have.key('certificates').which.should.have.length_of(1) @@ -256,11 +279,12 @@ def test_certs(): client.update_certificate(certificateId=cert_id, newStatus='REVOKED') cert = client.describe_certificate(certificateId=cert_id) - cert_desc.should.have.key('status').which.should.equal('ACTIVE') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') client.delete_certificate(certificateId=cert_id) res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(0) + res.should.have.key('certificates') @mock_iot diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index c70236978..e2de866fc 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -1,12 +1,13 @@ from __future__ import unicode_literals -import boto.kinesis -from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException -import boto3 -import sure # noqa import datetime import time +import boto.kinesis +import boto3 +from boto.kinesis.exceptions import ResourceNotFoundException, \ + InvalidArgumentException + from moto import mock_kinesis, mock_kinesis_deprecated @@ -14,7 +15,7 @@ from moto import mock_kinesis, mock_kinesis_deprecated def test_create_cluster(): conn = boto.kinesis.connect_to_region("us-west-2") - conn.create_stream("my_stream", 2) + conn.create_stream("my_stream", 3) stream_response = conn.describe_stream("my_stream") @@ -26,7 +27,7 @@ def test_create_cluster(): stream["StreamStatus"].should.equal("ACTIVE") shards = stream['Shards'] - shards.should.have.length_of(2) + shards.should.have.length_of(3) @mock_kinesis_deprecated @@ -73,6 +74,23 @@ def test_list_many_streams(): has_more_streams.should.equal(False) +@mock_kinesis +def test_describe_stream_summary(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = 'my_stream_summary' + shard_count = 5 + conn.create_stream(StreamName=stream_name, ShardCount=shard_count) + + resp = conn.describe_stream_summary(StreamName=stream_name) + stream = resp["StreamDescriptionSummary"] + + stream["StreamName"].should.equal(stream_name) + stream["OpenShardCount"].should.equal(shard_count) + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:{}".format(stream_name)) + stream["StreamStatus"].should.equal("ACTIVE") + + @mock_kinesis_deprecated def test_basic_shard_iterator(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -100,7 +118,8 @@ def test_get_invalid_shard_iterator(): conn.create_stream(stream_name, 1) conn.get_shard_iterator.when.called_with( - stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) + stream_name, "123", 'TRIM_HORIZON').should.throw( + ResourceNotFoundException) @mock_kinesis_deprecated @@ -354,8 +373,8 @@ def test_get_records_timestamp_filtering(): timestamp = datetime.datetime.utcnow() conn.put_record(StreamName=stream_name, - Data='1', - PartitionKey='1') + Data='1', + PartitionKey='1') response = conn.describe_stream(StreamName=stream_name) shard_id = response['StreamDescription']['Shards'][0]['ShardId'] @@ -368,7 +387,7 @@ def test_get_records_timestamp_filtering(): response = conn.get_records(ShardIterator=shard_iterator) response['Records'].should.have.length_of(1) response['Records'][0]['PartitionKey'].should.equal('1') - response['Records'][0]['ApproximateArrivalTimestamp'].should.be.\ + response['Records'][0]['ApproximateArrivalTimestamp'].should.be. \ greater_than(timestamp) response['MillisBehindLatest'].should.equal(0) @@ -461,7 +480,8 @@ def test_invalid_shard_iterator_type(): response = conn.describe_stream(stream_name) shard_id = response['StreamDescription']['Shards'][0]['ShardId'] response = conn.get_shard_iterator.when.called_with( - stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) + stream_name, shard_id, 'invalid-type').should.throw( + InvalidArgumentException) @mock_kinesis_deprecated @@ -549,7 +569,8 @@ def test_split_shard(): shard_range = shards[0]['HashKeyRange'] new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + int(shard_range['EndingHashKey']) + int( + shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -562,7 +583,8 @@ def test_split_shard(): shard_range = shards[2]['HashKeyRange'] new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + int(shard_range['EndingHashKey']) + int( + shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -592,7 +614,8 @@ def test_merge_shards(): shards.should.have.length_of(4) conn.merge_shards.when.called_with( - stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) + stream_name, 'shardId-000000000000', + 'shardId-000000000002').should.throw(InvalidArgumentException) stream_response = conn.describe_stream(stream_name) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 0f7bab4cd..f0d77d3e9 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -2,26 +2,30 @@ from __future__ import unicode_literals import os, re import boto3 import boto.kms +import botocore.exceptions from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException + +from moto.kms.exceptions import NotFoundException as MotoNotFoundException import sure # noqa from moto import mock_kms, mock_kms_deprecated from nose.tools import assert_raises from freezegun import freeze_time -from datetime import datetime, timedelta -from dateutil.tz import tzlocal +from datetime import datetime +from dateutil.tz import tzutc @mock_kms_deprecated def test_create_key(): conn = boto.kms.connect_to_region("us-west-2") + with freeze_time("2015-01-01 00:00:00"): + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - - key['KeyMetadata']['Description'].should.equal("my key") - key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - key['KeyMetadata']['Enabled'].should.equal(True) + key['KeyMetadata']['Description'].should.equal("my key") + key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + key['KeyMetadata']['Enabled'].should.equal(True) + key['KeyMetadata']['CreationDate'].should.equal("1420070400") @mock_kms_deprecated @@ -127,7 +131,7 @@ def test_enable_key_rotation_via_arn(): def test_enable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.enable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated @@ -142,7 +146,7 @@ def test_enable_key_rotation_with_alias_name_should_fail(): alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) conn.enable_key_rotation.when.called_with( - 'alias/my-alias').should.throw(JSONResponseError) + 'alias/my-alias').should.throw(NotFoundException) @mock_kms_deprecated @@ -171,6 +175,7 @@ def test_encrypt(): conn = boto.kms.connect_to_region("us-west-2") response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') + response['KeyId'].should.equal('key_id') @mock_kms_deprecated @@ -184,14 +189,14 @@ def test_decrypt(): def test_disable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.disable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated def test_get_key_rotation_status_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.get_key_rotation_status.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated @@ -277,7 +282,7 @@ def test_put_key_policy_via_alias_should_not_update(): target_key_id=key['KeyMetadata']['KeyId']) conn.put_key_policy.when.called_with( - 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) + 'alias/my-key-alias', 'default', 'new policy').should.throw(NotFoundException) policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') policy['Policy'].should.equal('my policy') @@ -597,9 +602,9 @@ def test__assert_valid_key_id(): import uuid _assert_valid_key_id.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(MotoNotFoundException) _assert_valid_key_id.when.called_with( - str(uuid.uuid4())).should_not.throw(JSONResponseError) + str(uuid.uuid4())).should_not.throw(MotoNotFoundException) @mock_kms_deprecated @@ -607,9 +612,9 @@ def test__assert_default_policy(): from moto.kms.responses import _assert_default_policy _assert_default_policy.when.called_with( - "not-default").should.throw(JSONResponseError) + "not-default").should.throw(MotoNotFoundException) _assert_default_policy.when.called_with( - "default").should_not.throw(JSONResponseError) + "default").should_not.throw(MotoNotFoundException) @mock_kms @@ -660,7 +665,7 @@ def test_schedule_key_deletion(): KeyId=key['KeyMetadata']['KeyId'] ) assert response['KeyId'] == key['KeyMetadata']['KeyId'] - assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzlocal()) + assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzutc()) else: # Can't manipulate time in server mode response = client.schedule_key_deletion( @@ -685,7 +690,7 @@ def test_schedule_key_deletion_custom(): PendingWindowInDays=7 ) assert response['KeyId'] == key['KeyMetadata']['KeyId'] - assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzlocal()) + assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzutc()) else: # Can't manipulate time in server mode response = client.schedule_key_deletion( @@ -773,3 +778,206 @@ def test_list_resource_tags(): response = client.list_resource_tags(KeyId=keyid) assert response['Tags'][0]['TagKey'] == 'string' assert response['Tags'][0]['TagValue'] == 'string' + + +@mock_kms +def test_generate_data_key_sizes(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + resp1 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + resp2 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_128' + ) + resp3 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + NumberOfBytes=64 + ) + + assert len(resp1['Plaintext']) == 32 + assert len(resp2['Plaintext']) == 16 + assert len(resp3['Plaintext']) == 64 + + +@mock_kms +def test_generate_data_key_decrypt(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-decrypt') + + resp1 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + resp2 = client.decrypt( + CiphertextBlob=resp1['CiphertextBlob'] + ) + + assert resp1['Plaintext'] == resp2['Plaintext'] + + +@mock_kms +def test_generate_data_key_invalid_size_params(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_257' + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_128', + NumberOfBytes=16 + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + NumberOfBytes=2048 + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + +@mock_kms +def test_generate_data_key_invalid_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + with assert_raises(client.exceptions.NotFoundException): + client.generate_data_key( + KeyId='alias/randomnonexistantkey', + KeySpec='AES_256' + ) + + with assert_raises(client.exceptions.NotFoundException): + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'] + '4', + KeySpec='AES_256' + ) + + +@mock_kms +def test_generate_data_key_without_plaintext_decrypt(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-decrypt') + + resp1 = client.generate_data_key_without_plaintext( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + + assert 'Plaintext' not in resp1 + + +@mock_kms +def test_enable_key_rotation_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.enable_key_rotation( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_disable_key_rotation_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.disable_key_rotation( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_enable_key_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.enable_key( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_disable_key_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.disable_key( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_cancel_key_deletion_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.cancel_key_deletion( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_schedule_key_deletion_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.schedule_key_deletion( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_get_key_rotation_status_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.get_key_rotation_status( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_get_key_policy_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.get_key_policy( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02', + PolicyName='default' + ) + + +@mock_kms +def test_list_key_policies_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.list_key_policies( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_put_key_policy_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.put_key_policy( + KeyId='00000000-0000-0000-0000-000000000000', + PolicyName='default', + Policy='new policy' + ) diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index e3d46fd87..7048061f0 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -17,6 +17,8 @@ def test_log_group_create(): response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) assert len(response['logGroups']) == 1 + # AWS defaults to Never Expire for log group retention + assert response['logGroups'][0].get('retentionInDays') == None response = conn.delete_log_group(logGroupName=log_group_name) @@ -126,3 +128,37 @@ def test_filter_logs_interleaved(): resulting_event['timestamp'].should.equal(original_message['timestamp']) resulting_event['message'].should.equal(original_message['message']) +@mock_logs +def test_put_retention_policy(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == 7 + + response = conn.delete_log_group(logGroupName=log_group_name) + +@mock_logs +def test_delete_retention_policy(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == 7 + + response = conn.delete_retention_policy(logGroupName=log_group_name) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == None + + response = conn.delete_log_group(logGroupName=log_group_name) + diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py index 6548b1830..36933d41a 100644 --- a/tests/test_organizations/organizations_test_utils.py +++ b/tests/test_organizations/organizations_test_utils.py @@ -5,38 +5,36 @@ import sure # noqa import datetime from moto.organizations import utils -EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" -ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE -ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE -OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) -ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE -CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE - def test_make_random_org_id(): org_id = utils.make_random_org_id() - org_id.should.match(ORG_ID_REGEX) + org_id.should.match(utils.ORG_ID_REGEX) def test_make_random_root_id(): root_id = utils.make_random_root_id() - root_id.should.match(ROOT_ID_REGEX) + root_id.should.match(utils.ROOT_ID_REGEX) def test_make_random_ou_id(): root_id = utils.make_random_root_id() ou_id = utils.make_random_ou_id(root_id) - ou_id.should.match(OU_ID_REGEX) + ou_id.should.match(utils.OU_ID_REGEX) def test_make_random_account_id(): account_id = utils.make_random_account_id() - account_id.should.match(ACCOUNT_ID_REGEX) + account_id.should.match(utils.ACCOUNT_ID_REGEX) def test_make_random_create_account_status_id(): create_account_status_id = utils.make_random_create_account_status_id() - create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) + + +def test_make_random_service_control_policy_id(): + service_control_policy_id = utils.make_random_service_control_policy_id() + service_control_policy_id.should.match(utils.SCP_ID_REGEX) def validate_organization(response): @@ -50,7 +48,7 @@ def validate_organization(response): 'MasterAccountEmail', 'MasterAccountId', ]) - org['Id'].should.match(ORG_ID_REGEX) + org['Id'].should.match(utils.ORG_ID_REGEX) org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( org['MasterAccountId'], @@ -72,7 +70,7 @@ def validate_roots(org, response): response.should.have.key('Roots').should.be.a(list) response['Roots'].should_not.be.empty root = response['Roots'][0] - root.should.have.key('Id').should.match(ROOT_ID_REGEX) + root.should.have.key('Id').should.match(utils.ROOT_ID_REGEX) root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( org['MasterAccountId'], org['Id'], @@ -87,7 +85,7 @@ def validate_roots(org, response): def validate_organizational_unit(org, response): response.should.have.key('OrganizationalUnit').should.be.a(dict) ou = response['OrganizationalUnit'] - ou.should.have.key('Id').should.match(OU_ID_REGEX) + ou.should.have.key('Id').should.match(utils.OU_ID_REGEX) ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( org['MasterAccountId'], org['Id'], @@ -106,13 +104,13 @@ def validate_account(org, account): 'Name', 'Status', ]) - account['Id'].should.match(ACCOUNT_ID_REGEX) + account['Id'].should.match(utils.ACCOUNT_ID_REGEX) account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( org['MasterAccountId'], org['Id'], account['Id'], )) - account['Email'].should.match(EMAIL_REGEX) + account['Email'].should.match(utils.EMAIL_REGEX) account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) account['Name'].should.be.a(six.string_types) @@ -128,9 +126,27 @@ def validate_create_account_status(create_status): 'RequestedTimestamp', 'State', ]) - create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) - create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) + create_status['Id'].should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) + create_status['AccountId'].should.match(utils.ACCOUNT_ID_REGEX) create_status['AccountName'].should.be.a(six.string_types) create_status['State'].should.equal('SUCCEEDED') create_status['RequestedTimestamp'].should.be.a(datetime.datetime) create_status['CompletedTimestamp'].should.be.a(datetime.datetime) + +def validate_policy_summary(org, summary): + summary.should.be.a(dict) + summary.should.have.key('Id').should.match(utils.SCP_ID_REGEX) + summary.should.have.key('Arn').should.equal(utils.SCP_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + summary['Id'], + )) + summary.should.have.key('Name').should.be.a(six.string_types) + summary.should.have.key('Description').should.be.a(six.string_types) + summary.should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + summary.should.have.key('AwsManaged').should.be.a(bool) + +def validate_service_control_policy(org, response): + response.should.have.key('PolicySummary').should.be.a(dict) + response.should.have.key('Content').should.be.a(six.string_types) + validate_policy_summary(org, response['PolicySummary']) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index dfac5feeb..05f831e62 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -1,6 +1,8 @@ from __future__ import unicode_literals import boto3 +import json +import six import sure # noqa from botocore.exceptions import ClientError from nose.tools import assert_raises @@ -13,6 +15,8 @@ from .organizations_test_utils import ( validate_organizational_unit, validate_account, validate_create_account_status, + validate_service_control_policy, + validate_policy_summary, ) @@ -320,3 +324,271 @@ def test_list_children_exception(): ex.operation_name.should.equal('ListChildren') ex.response['Error']['Code'].should.equal('400') ex.response['Error']['Message'].should.contain('InvalidInputException') + + +# Service Control Policies +policy_doc01 = dict( + Version='2012-10-17', + Statement=[dict( + Sid='MockPolicyStatement', + Effect='Allow', + Action='s3:*', + Resource='*', + )] +) + +@mock_organizations +def test_create_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + policy = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy'] + validate_service_control_policy(org, policy) + policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy') + policy['PolicySummary']['Description'].should.equal('A dummy service control policy') + policy['Content'].should.equal(json.dumps(policy_doc01)) + + +@mock_organizations +def test_describe_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + policy = client.describe_policy(PolicyId=policy_id)['Policy'] + validate_service_control_policy(org, policy) + policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy') + policy['PolicySummary']['Description'].should.equal('A dummy service control policy') + policy['Content'].should.equal(json.dumps(policy_doc01)) + + +@mock_organizations +def test_describe_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = 'p-47fhe9s3' + with assert_raises(ClientError) as e: + response = client.describe_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal('DescribePolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('PolicyNotFoundException') + with assert_raises(ClientError) as e: + response = client.describe_policy(PolicyId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('DescribePolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_attach_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_organizations +def test_attach_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + root_id='r-dj873' + ou_id='ou-gi99-i7r8eh2i2' + account_id='126644886543' + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_list_polices(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(0,4): + client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy' + str(i), + Type='SERVICE_CONTROL_POLICY' + ) + response = client.list_policies(Filter='SERVICE_CONTROL_POLICY') + for policy in response['Policies']: + validate_policy_summary(org, policy) + + +@mock_organizations +def test_list_policies_for_target(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response = client.list_policies_for_target( + TargetId=ou_id, + Filter='SERVICE_CONTROL_POLICY', + ) + for policy in response['Policies']: + validate_policy_summary(org, policy) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.list_policies_for_target( + TargetId=account_id, + Filter='SERVICE_CONTROL_POLICY', + ) + for policy in response['Policies']: + validate_policy_summary(org, policy) + + +@mock_organizations +def test_list_policies_for_target_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + ou_id='ou-gi99-i7r8eh2i2' + account_id='126644886543' + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId=ou_id, + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId=account_id, + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId='meaninglessstring', + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_list_targets_for_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.list_targets_for_policy(PolicyId=policy_id) + for target in response['Targets']: + target.should.be.a(dict) + target.should.have.key('Name').should.be.a(six.string_types) + target.should.have.key('Arn').should.be.a(six.string_types) + target.should.have.key('TargetId').should.be.a(six.string_types) + target.should.have.key('Type').should.be.within( + ['ROOT', 'ORGANIZATIONAL_UNIT', 'ACCOUNT'] + ) + + +@mock_organizations +def test_list_targets_for_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = 'p-47fhe9s3' + with assert_raises(ClientError) as e: + response = client.list_targets_for_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal('ListTargetsForPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('PolicyNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_targets_for_policy(PolicyId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('ListTargetsForPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 5bf733dc6..af330e672 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -174,8 +174,8 @@ def test_add_security_group_to_database(): def test_add_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") + subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.2.0/24") subnet_ids = [subnet1.id, subnet2.id] conn = boto.rds.connect_to_region("us-west-2") @@ -191,7 +191,7 @@ def test_add_database_subnet_group(): def test_describe_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) @@ -209,7 +209,7 @@ def test_describe_database_subnet_group(): def test_delete_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) @@ -227,7 +227,7 @@ def test_delete_database_subnet_group(): def test_create_database_in_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index cf9805444..a25b53196 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1045,9 +1045,9 @@ def test_create_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet1 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] subnet2 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.2.0/24')['Subnet'] subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] conn = boto3.client('rds', region_name='us-west-2') @@ -1069,7 +1069,7 @@ def test_create_database_in_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', @@ -1094,7 +1094,7 @@ def test_describe_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", @@ -1123,7 +1123,7 @@ def test_delete_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -1149,7 +1149,7 @@ def test_list_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -1176,7 +1176,7 @@ def test_add_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -1207,7 +1207,7 @@ def test_remove_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 9208c92dd..541614788 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -9,7 +9,7 @@ from boto.redshift.exceptions import ( ClusterParameterGroupNotFound, ClusterSecurityGroupNotFound, ClusterSubnetGroupNotFound, - InvalidSubnet, + InvalidSubnet ) from botocore.exceptions import ( ClientError @@ -177,30 +177,29 @@ def test_default_cluster_attributes(): cluster['NumberOfNodes'].should.equal(1) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_create_cluster_in_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group( - "my_subnet_group", - "This is my subnet group", - subnet_ids=[subnet.id], + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_subnet_group( + ClusterSubnetGroupName="my_subnet_group", + Description="This is my subnet group", + SubnetIds=[subnet.id], ) - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_subnet_group_name='my_subnet_group', + client.create_cluster( + ClusterIdentifier="my_cluster", + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSubnetGroupName='my_subnet_group', ) - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] + cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") + cluster = cluster_response['Clusters'][0] cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') @@ -339,7 +338,7 @@ def test_create_cluster_with_vpc_security_groups_boto3(): @mock_redshift def test_create_cluster_with_iam_roles(): - iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] + iam_roles_arn = ['arn:aws:iam:::role/my-iam-role', ] client = boto3.client('redshift', region_name='us-east-1') cluster_id = 'my_cluster' client.create_cluster( @@ -385,29 +384,41 @@ def test_describe_non_existent_cluster(): conn.describe_clusters.when.called_with( "not-a-cluster").should.throw(ClusterNotFound) - @mock_redshift_deprecated def test_delete_cluster(): conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' + cluster_identifier = "my_cluster" + snapshot_identifier = "my_snapshot" conn.create_cluster( cluster_identifier, - node_type='single-node', + node_type="single-node", master_username="username", master_user_password="password", ) + conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw(AttributeError) + clusters = conn.describe_clusters()['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(1) - conn.delete_cluster(cluster_identifier) + conn.delete_cluster( + cluster_identifier=cluster_identifier, + skip_final_cluster_snapshot=False, + final_cluster_snapshot_identifier=snapshot_identifier + ) clusters = conn.describe_clusters()['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(0) + snapshots = conn.describe_cluster_snapshots()["DescribeClusterSnapshotsResponse"][ + "DescribeClusterSnapshotsResult"]["Snapshots"] + list(snapshots).should.have.length_of(1) + + assert snapshot_identifier in snapshots[0]["SnapshotIdentifier"] + # Delete invalid id conn.delete_cluster.when.called_with( "not-a-cluster").should.throw(ClusterNotFound) @@ -464,28 +475,26 @@ def test_modify_cluster(): cluster['NumberOfNodes'].should.equal(1) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_create_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.1.0/24") + client = boto3.client('redshift', region_name='us-east-1') - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet1.id, subnet2.id], + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet1.id, subnet2.id], ) - subnets_response = redshift_conn.describe_cluster_subnet_groups( - "my_subnet") - my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] + subnets_response = client.describe_cluster_subnet_groups( + ClusterSubnetGroupName="my_subnet_group") + my_subnet = subnets_response['ClusterSubnetGroups'][0] - my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") + my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet_group") my_subnet['Description'].should.equal("This is my subnet group") subnet_ids = [subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets']] @@ -510,35 +519,33 @@ def test_describe_non_existent_subnet_group(): "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_delete_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + client = boto3.client('redshift', region_name='us-east-1') - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet.id], + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id], ) - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets_response = client.describe_cluster_subnet_groups() + subnets = subnets_response['ClusterSubnetGroups'] subnets.should.have.length_of(1) - redshift_conn.delete_cluster_subnet_group("my_subnet") + client.delete_cluster_subnet_group(ClusterSubnetGroupName="my_subnet_group") - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets_response = client.describe_cluster_subnet_groups() + subnets = subnets_response['ClusterSubnetGroups'] subnets.should.have.length_of(0) # Delete invalid id - redshift_conn.delete_cluster_subnet_group.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + client.delete_cluster_subnet_group.when.called_with( + ClusterSubnetGroupName="not-a-subnet-group").should.throw(ClientError) @mock_redshift_deprecated @@ -643,7 +650,6 @@ def test_delete_cluster_parameter_group(): "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - @mock_redshift def test_create_cluster_snapshot_of_non_existent_cluster(): client = boto3.client('redshift', region_name='us-east-1') @@ -688,7 +694,8 @@ def test_create_cluster_snapshot(): def test_describe_cluster_snapshots(): client = boto3.client('redshift', region_name='us-east-1') cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' + snapshot_identifier_1 = 'my_snapshot_1' + snapshot_identifier_2 = 'my_snapshot_2' client.create_cluster( DBName='test-db', @@ -700,19 +707,33 @@ def test_describe_cluster_snapshots(): ) client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, + SnapshotIdentifier=snapshot_identifier_1, + ClusterIdentifier=cluster_identifier, + ) + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier_2, ClusterIdentifier=cluster_identifier, ) + resp_snap_1 = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier_1) + snapshot_1 = resp_snap_1['Snapshots'][0] + snapshot_1['SnapshotIdentifier'].should.equal(snapshot_identifier_1) + snapshot_1['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot_1['NumberOfNodes'].should.equal(1) + snapshot_1['NodeType'].should.equal('ds2.xlarge') + snapshot_1['MasterUsername'].should.equal('username') + + resp_snap_2 = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier_2) + snapshot_2 = resp_snap_2['Snapshots'][0] + snapshot_2['SnapshotIdentifier'].should.equal(snapshot_identifier_2) + snapshot_2['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot_2['NumberOfNodes'].should.equal(1) + snapshot_2['NodeType'].should.equal('ds2.xlarge') + snapshot_2['MasterUsername'].should.equal('username') + resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) - resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) - resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) - snapshot = resp_snap['Snapshots'][0] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') + resp_clust['Snapshots'][0].should.equal(resp_snap_1['Snapshots'][0]) + resp_clust['Snapshots'][1].should.equal(resp_snap_2['Snapshots'][0]) @mock_redshift diff --git a/tests/test_resourcegroups/__init__.py b/tests/test_resourcegroups/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_resourcegroups/test_resourcegroups.py b/tests/test_resourcegroups/test_resourcegroups.py new file mode 100644 index 000000000..bb3624413 --- /dev/null +++ b/tests/test_resourcegroups/test_resourcegroups.py @@ -0,0 +1,165 @@ +from __future__ import unicode_literals + +import boto3 +import json +import sure # noqa + +from moto import mock_resourcegroups + + +@mock_resourcegroups +def test_create_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = resource_groups.create_group( + Name="test_resource_group", + Description="description", + ResourceQuery={ + "Type": "TAG_FILTERS_1_0", + "Query": json.dumps( + { + "ResourceTypeFilters": ["AWS::AllSupported"], + "TagFilters": [ + {"Key": "resources_tag_key", "Values": ["resources_tag_value"]} + ], + } + ), + }, + Tags={"resource_group_tag_key": "resource_group_tag_value"} + ) + response["Group"]["Name"].should.contain("test_resource_group") + response["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0") + response["Tags"]["resource_group_tag_key"].should.contain("resource_group_tag_value") + + +@mock_resourcegroups +def test_delete_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.delete_group(GroupName="test_resource_group") + response["Group"]["Name"].should.contain("test_resource_group") + + response = resource_groups.list_groups() + response["GroupIdentifiers"].should.have.length_of(0) + response["Groups"].should.have.length_of(0) + + +@mock_resourcegroups +def test_get_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.get_group(GroupName="test_resource_group") + response["Group"]["Description"].should.contain("description") + + return response + + +@mock_resourcegroups +def test_get_group_query(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.get_group_query(GroupName="test_resource_group") + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0") + + +@mock_resourcegroups +def test_get_tags(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_group() + + response = resource_groups.get_tags(Arn=response["Group"]["GroupArn"]) + response["Tags"].should.have.length_of(1) + response["Tags"]["resource_group_tag_key"].should.contain("resource_group_tag_value") + + return response + + +@mock_resourcegroups +def test_list_groups(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.list_groups() + response["GroupIdentifiers"].should.have.length_of(1) + response["Groups"].should.have.length_of(1) + + +@mock_resourcegroups +def test_tag(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_tags() + + response = resource_groups.tag( + Arn=response["Arn"], + Tags={"resource_group_tag_key_2": "resource_group_tag_value_2"} + ) + response["Tags"]["resource_group_tag_key_2"].should.contain("resource_group_tag_value_2") + + response = resource_groups.get_tags(Arn=response["Arn"]) + response["Tags"].should.have.length_of(2) + response["Tags"]["resource_group_tag_key_2"].should.contain("resource_group_tag_value_2") + + +@mock_resourcegroups +def test_untag(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_tags() + + response = resource_groups.untag(Arn=response["Arn"], Keys=["resource_group_tag_key"]) + response["Keys"].should.contain("resource_group_tag_key") + + response = resource_groups.get_tags(Arn=response["Arn"]) + response["Tags"].should.have.length_of(0) + + +@mock_resourcegroups +def test_update_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_get_group() + + response = resource_groups.update_group( + GroupName="test_resource_group", + Description="description_2", + ) + response["Group"]["Description"].should.contain("description_2") + + response = resource_groups.get_group(GroupName="test_resource_group") + response["Group"]["Description"].should.contain("description_2") + + +@mock_resourcegroups +def test_update_group_query(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.update_group_query( + GroupName="test_resource_group", + ResourceQuery={ + "Type": "CLOUDFORMATION_STACK_1_0", + "Query": json.dumps( + { + "ResourceTypeFilters": ["AWS::AllSupported"], + "StackIdentifier": ( + "arn:aws:cloudformation:eu-west-1:012345678912:stack/" + "test_stack/c223eca0-e744-11e8-8910-500c41f59083" + ) + } + ), + }, + ) + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("CLOUDFORMATION_STACK_1_0") + + response = resource_groups.get_group_query(GroupName="test_resource_group") + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("CLOUDFORMATION_STACK_1_0") diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 759063329..8015472bf 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -239,7 +239,7 @@ def test_get_resources_elbv2(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') conn.create_load_balancer( diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index d730f8dcf..e174e1c26 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -123,12 +123,12 @@ def test_rrset(): rrsets.should.have.length_of(2) rrsets = conn.get_all_rrsets( - zoneid, name="foo.bar.testdns.aws.com", type="A") + zoneid, name="bar.foo.testdns.aws.com", type="A") rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('1.2.3.4') + rrsets[0].resource_records[0].should.equal('5.6.7.8') rrsets = conn.get_all_rrsets( - zoneid, name="bar.foo.testdns.aws.com", type="A") + zoneid, name="foo.bar.testdns.aws.com", type="A") rrsets.should.have.length_of(2) resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records] resource_records.should.contain('1.2.3.4') diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index cf45822b5..f26964ab7 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1529,6 +1529,45 @@ def test_boto3_copy_object_with_versioning(): # Version should be different to previous version obj2_version_new.should_not.equal(obj2_version) + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test2', 'VersionId': obj2_version}, Bucket='blah', Key='test3') + obj3_version_new = client.get_object(Bucket='blah', Key='test3')['VersionId'] + obj3_version_new.should_not.equal(obj2_version_new) + + # Copy file that doesn't exist + with assert_raises(ClientError) as e: + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test4', 'VersionId': obj2_version}, Bucket='blah', Key='test5') + e.exception.response['Error']['Code'].should.equal('404') + + response = client.create_multipart_upload(Bucket='blah', Key='test4') + upload_id = response['UploadId'] + response = client.upload_part_copy(Bucket='blah', Key='test4', CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new}, + UploadId=upload_id, PartNumber=1) + etag = response["CopyPartResult"]["ETag"] + client.complete_multipart_upload( + Bucket='blah', Key='test4', UploadId=upload_id, + MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': 1}]}) + + response = client.get_object(Bucket='blah', Key='test4') + data = response["Body"].read() + data.should.equal(b'test2') + + +@mock_s3 +def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='src', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.create_bucket(Bucket='dest', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.put_bucket_versioning(Bucket='dest', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='src', Key='test', Body=b'content') + + obj2_version_new = client.copy_object(CopySource={'Bucket': 'src', 'Key': 'test'}, Bucket='dest', Key='test') \ + .get('VersionId') + + # VersionId should be present in the response + obj2_version_new.should_not.equal(None) + @mock_s3 def test_boto3_deleted_versionings_list(): @@ -2745,6 +2784,7 @@ def test_boto3_multiple_delete_markers(): latest['Key'].should.equal('key-with-versions-and-unicode-ó') oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + @mock_s3 def test_get_stream_gzipped(): payload = b"this is some stuff here" @@ -2803,3 +2843,80 @@ def test_boto3_bucket_name_too_short(): with assert_raises(ClientError) as exc: s3.create_bucket(Bucket='x'*2) exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') + +@mock_s3 +def test_accelerated_none_when_unspecified(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.shouldnt.have.key('Status') + +@mock_s3 +def test_can_enable_bucket_acceleration(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.should.have.key('Status') + resp['Status'].should.equal('Enabled') + +@mock_s3 +def test_can_suspend_bucket_acceleration(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Suspended'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.should.have.key('Status') + resp['Status'].should.equal('Suspended') + +@mock_s3 +def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Suspended'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.shouldnt.have.key('Status') + +@mock_s3 +def test_accelerate_configuration_status_validation(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as exc: + s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'bad_status'}, + ) + exc.exception.response['Error']['Code'].should.equal('MalformedXML') + +@mock_s3 +def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): + bucket_name = 'some.bucket.with.dots' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as exc: + s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + exc.exception.response['Error']['Code'].should.equal('InvalidRequest') diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 9c8252a04..b179a2329 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -15,6 +15,7 @@ class AuthenticatedClient(FlaskClient): def open(self, *args, **kwargs): kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers']['Authorization'] = "Any authorization header" + kwargs['content_length'] = 0 # Fixes content-length complaints. return super(AuthenticatedClient, self).open(*args, **kwargs) diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index 434110e87..f6238dd28 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -13,6 +13,7 @@ class AuthenticatedClient(FlaskClient): def open(self, *args, **kwargs): kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers']['Authorization'] = "Any authorization header" + kwargs['content_length'] = 0 # Fixes content-length complaints. return super(AuthenticatedClient, self).open(*args, **kwargs) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 169282421..78b95ee6a 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -4,10 +4,15 @@ import boto3 from moto import mock_secretsmanager from botocore.exceptions import ClientError -import sure # noqa import string import unittest +import pytz +from datetime import datetime from nose.tools import assert_raises +from six import b + +DEFAULT_SECRET_NAME = 'test-secret' + @mock_secretsmanager def test_get_secret_value(): @@ -18,6 +23,15 @@ def test_get_secret_value(): result = conn.get_secret_value(SecretId='java-util-test-password') assert result['SecretString'] == 'foosecret' +@mock_secretsmanager +def test_get_secret_value_binary(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + create_secret = conn.create_secret(Name='java-util-test-password', + SecretBinary=b("foosecret")) + result = conn.get_secret_value(SecretId='java-util-test-password') + assert result['SecretBinary'] == b('foosecret') + @mock_secretsmanager def test_get_secret_that_does_not_exist(): conn = boto3.client('secretsmanager', region_name='us-west-2') @@ -34,6 +48,20 @@ def test_get_secret_that_does_not_match(): with assert_raises(ClientError): result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_get_secret_value_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='test-secret') + + @mock_secretsmanager def test_create_secret(): conn = boto3.client('secretsmanager', region_name='us-east-1') @@ -61,6 +89,98 @@ def test_create_secret_with_tags(): secret_details = conn.describe_secret(SecretId=secret_name) assert secret_details['Tags'] == [{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}] + +@mock_secretsmanager +def test_delete_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + deleted_secret = conn.delete_secret(SecretId='test-secret') + + assert deleted_secret['ARN'] + assert deleted_secret['Name'] == 'test-secret' + assert deleted_secret['DeletionDate'] > datetime.fromtimestamp(1, pytz.utc) + + secret_details = conn.describe_secret(SecretId='test-secret') + + assert secret_details['ARN'] + assert secret_details['Name'] == 'test-secret' + assert secret_details['DeletedDate'] > datetime.fromtimestamp(1, pytz.utc) + + +@mock_secretsmanager +def test_delete_secret_force(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + result = conn.delete_secret(SecretId='test-secret', ForceDeleteWithoutRecovery=True) + + assert result['ARN'] + assert result['DeletionDate'] > datetime.fromtimestamp(1, pytz.utc) + assert result['Name'] == 'test-secret' + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='test-secret') + + +@mock_secretsmanager +def test_delete_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='i-dont-exist', ForceDeleteWithoutRecovery=True) + + +@mock_secretsmanager +def test_delete_secret_fails_with_both_force_delete_flag_and_recovery_window_flag(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=1, ForceDeleteWithoutRecovery=True) + + +@mock_secretsmanager +def test_delete_secret_recovery_window_too_short(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=6) + + +@mock_secretsmanager +def test_delete_secret_recovery_window_too_long(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=31) + + +@mock_secretsmanager +def test_delete_secret_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + deleted_secret = conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret') + + @mock_secretsmanager def test_get_random_password_default_length(): conn = boto3.client('secretsmanager', region_name='us-west-2') @@ -203,40 +323,128 @@ def test_describe_secret_that_does_not_match(): with assert_raises(ClientError): result = conn.get_secret_value(SecretId='i-dont-match') + @mock_secretsmanager -def test_rotate_secret(): - secret_name = 'test-secret' +def test_list_secrets_empty(): conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + + secrets = conn.list_secrets() + + assert secrets['SecretList'] == [] + + +@mock_secretsmanager +def test_list_secrets(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', SecretString='foosecret') - rotated_secret = conn.rotate_secret(SecretId=secret_name) + conn.create_secret(Name='test-secret-2', + SecretString='barsecret', + Tags=[{ + 'Key': 'a', + 'Value': '1' + }]) + + secrets = conn.list_secrets() + + assert secrets['SecretList'][0]['ARN'] is not None + assert secrets['SecretList'][0]['Name'] == 'test-secret' + assert secrets['SecretList'][1]['ARN'] is not None + assert secrets['SecretList'][1]['Name'] == 'test-secret-2' + assert secrets['SecretList'][1]['Tags'] == [{ + 'Key': 'a', + 'Value': '1' + }] + + +@mock_secretsmanager +def test_restore_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + described_secret_before = conn.describe_secret(SecretId='test-secret') + assert described_secret_before['DeletedDate'] > datetime.fromtimestamp(1, pytz.utc) + + restored_secret = conn.restore_secret(SecretId='test-secret') + assert restored_secret['ARN'] + assert restored_secret['Name'] == 'test-secret' + + described_secret_after = conn.describe_secret(SecretId='test-secret') + assert 'DeletedDate' not in described_secret_after + + +@mock_secretsmanager +def test_restore_secret_that_is_not_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + restored_secret = conn.restore_secret(SecretId='test-secret') + assert restored_secret['ARN'] + assert restored_secret['Name'] == 'test-secret' + + +@mock_secretsmanager +def test_restore_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.restore_secret(SecretId='i-dont-exist') + + +@mock_secretsmanager +def test_rotate_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=DEFAULT_SECRET_NAME, + SecretString='foosecret') + + rotated_secret = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME) assert rotated_secret assert rotated_secret['ARN'] != '' # Test arn not empty - assert rotated_secret['Name'] == secret_name + assert rotated_secret['Name'] == DEFAULT_SECRET_NAME assert rotated_secret['VersionId'] != '' @mock_secretsmanager def test_rotate_secret_enable_rotation(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') - initial_description = conn.describe_secret(SecretId=secret_name) + initial_description = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) assert initial_description assert initial_description['RotationEnabled'] is False assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 - conn.rotate_secret(SecretId=secret_name, + conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationRules={'AutomaticallyAfterDays': 42}) - rotated_description = conn.describe_secret(SecretId=secret_name) + rotated_description = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) assert rotated_description assert rotated_description['RotationEnabled'] is True assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='test-secret') + + @mock_secretsmanager def test_rotate_secret_that_does_not_exist(): conn = boto3.client('secretsmanager', 'us-west-2') @@ -262,9 +470,8 @@ def test_rotate_secret_client_request_token_too_short(): @mock_secretsmanager def test_rotate_secret_client_request_token_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') client_request_token = ( @@ -272,19 +479,18 @@ def test_rotate_secret_client_request_token_too_long(): 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' ) with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, ClientRequestToken=client_request_token) @mock_secretsmanager def test_rotate_secret_rotation_lambda_arn_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationLambdaARN=rotation_lambda_arn) @mock_secretsmanager @@ -296,12 +502,78 @@ def test_rotate_secret_rotation_period_zero(): @mock_secretsmanager def test_rotate_secret_rotation_period_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') rotation_rules = {'AutomaticallyAfterDays': 1001} with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationRules=rotation_rules) + +@mock_secretsmanager +def test_put_secret_value_puts_new_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='foosecret', + VersionStages=['AWSCURRENT']) + version_id = put_secret_value_dict['VersionId'] + + get_secret_value_dict = conn.get_secret_value(SecretId=DEFAULT_SECRET_NAME, + VersionId=version_id, + VersionStage='AWSCURRENT') + + assert get_secret_value_dict + assert get_secret_value_dict['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_put_secret_value_can_get_first_version_if_put_twice(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='first_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='second_secret', + VersionStages=['AWSCURRENT']) + + first_secret_value_dict = conn.get_secret_value(SecretId=DEFAULT_SECRET_NAME, + VersionId=first_version_id) + first_secret_value = first_secret_value_dict['SecretString'] + + assert first_secret_value == 'first_secret' + + +@mock_secretsmanager +def test_put_secret_value_versions_differ_if_same_secret_put_twice(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + second_version_id = put_secret_value_dict['VersionId'] + + assert first_version_id != second_version_id + + +@mock_secretsmanager +def test_can_list_secret_version_ids(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + second_version_id = put_secret_value_dict['VersionId'] + + versions_list = conn.list_secret_version_ids(SecretId=DEFAULT_SECRET_NAME) + + returned_version_ids = [v['VersionId'] for v in versions_list['Versions']] + + assert [first_version_id, second_version_id].sort() == returned_version_ids.sort() + diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index d0f495f57..23d823239 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -10,6 +10,8 @@ from moto import mock_secretsmanager Test the different server responses for secretsmanager ''' +DEFAULT_SECRET_NAME = 'test-secret' + @mock_secretsmanager def test_get_secret_value(): @@ -18,19 +20,20 @@ def test_get_secret_value(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret"}, ) get_secret = test_client.post('/', - data={"SecretId": "test-secret", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) + data={"SecretId": DEFAULT_SECRET_NAME, + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['SecretString'] == 'foo-secret' @mock_secretsmanager @@ -55,7 +58,7 @@ def test_get_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret"}, @@ -165,7 +168,7 @@ def test_describe_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -188,7 +191,7 @@ def test_rotate_secret(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -197,7 +200,7 @@ def test_rotate_secret(): client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -207,7 +210,7 @@ def test_rotate_secret(): json_data = json.loads(rotate_secret.data.decode("utf-8")) assert json_data # Returned dict is not empty assert json_data['ARN'] != '' - assert json_data['Name'] == 'test-secret' + assert json_data['Name'] == DEFAULT_SECRET_NAME assert json_data['VersionId'] == client_request_token # @mock_secretsmanager @@ -289,7 +292,7 @@ def test_rotate_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -313,7 +316,7 @@ def test_rotate_secret_client_request_token_too_short(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -322,7 +325,7 @@ def test_rotate_secret_client_request_token_too_short(): client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -339,7 +342,7 @@ def test_rotate_secret_client_request_token_too_long(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -351,7 +354,7 @@ def test_rotate_secret_client_request_token_too_long(): 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' ) rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -368,7 +371,7 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -377,7 +380,7 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "RotationLambdaARN": rotation_lambda_arn}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -389,7 +392,165 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): assert json_data['__type'] == 'InvalidParameterException' -# + + + +@mock_secretsmanager +def test_put_secret_value_puts_new_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "foosecret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "foosecret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + + version_id = second_secret_json_data['VersionId'] + + secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "VersionId": version_id, + "VersionStage": 'AWSCURRENT'}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + second_secret_json_data = json.loads(secret_value_json.data.decode("utf-8")) + + assert second_secret_json_data + assert second_secret_json_data['SecretString'] == 'foosecret' + + +@mock_secretsmanager +def test_put_secret_value_can_get_first_version_if_put_twice(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + first_secret_string = 'first_secret' + second_secret_string = 'second_secret' + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": first_secret_string, + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + + first_secret_version_id = first_secret_json_data['VersionId'] + + test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": second_secret_string, + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + get_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "VersionId": first_secret_version_id, + "VersionStage": 'AWSCURRENT'}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + get_first_secret_json_data = json.loads(get_first_secret_value_json.data.decode("utf-8")) + + assert get_first_secret_json_data + assert get_first_secret_json_data['SecretString'] == first_secret_string + + +@mock_secretsmanager +def test_put_secret_value_versions_differ_if_same_secret_put_twice(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + first_secret_version_id = first_secret_json_data['VersionId'] + + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + second_secret_version_id = second_secret_json_data['VersionId'] + + assert first_secret_version_id != second_secret_version_id + + +@mock_secretsmanager +def test_can_list_secret_version_ids(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + first_secret_version_id = first_secret_json_data['VersionId'] + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + second_secret_version_id = second_secret_json_data['VersionId'] + + list_secret_versions_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, }, + headers={ + "X-Amz-Target": "secretsmanager.ListSecretVersionIds"}, + ) + + versions_list = json.loads(list_secret_versions_json.data.decode("utf-8")) + + returned_version_ids = [v['VersionId'] for v in versions_list['Versions']] + + assert [first_secret_version_id, second_secret_version_id].sort() == returned_version_ids.sort() + +# # The following tests should work, but fail on the embedded dict in # RotationRules. The error message suggests a problem deeper in the code, which # needs further investigation. diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9beb9a3fa..d53ae50f7 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -416,7 +416,9 @@ def test_send_receive_message_timestamps(): conn.create_queue(QueueName="test-queue") queue = sqs.Queue("test-queue") - queue.send_message(MessageBody="derp") + response = queue.send_message(MessageBody="derp") + assert response['ResponseMetadata']['RequestId'] + messages = conn.receive_message( QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] diff --git a/update_version_from_git.py b/update_version_from_git.py new file mode 100644 index 000000000..355bc2ba9 --- /dev/null +++ b/update_version_from_git.py @@ -0,0 +1,118 @@ +""" +Adapted from https://github.com/pygame/pygameweb/blob/master/pygameweb/builds/update_version_from_git.py + +For updating the version from git. +__init__.py contains a __version__ field. +Update that. +If we are on master, we want to update the version as a pre-release. +git describe --tags +With these: + __init__.py + __version__= '0.0.2' + git describe --tags + 0.0.1-22-g729a5ae +We want this: + __init__.py + __version__= '0.0.2.dev22.g729a5ae' +Get the branch/tag name with this. + git symbolic-ref -q --short HEAD || git describe --tags --exact-match +""" + +import io +import os +import re +import subprocess + + +def migrate_source_attribute(attr, to_this, target_file, regex): + """Updates __magic__ attributes in the source file""" + change_this = re.compile(regex, re.S) + new_file = [] + found = False + + with open(target_file, 'r') as fp: + lines = fp.readlines() + + for line in lines: + if line.startswith(attr): + found = True + line = re.sub(change_this, to_this, line) + new_file.append(line) + + if found: + with open(target_file, 'w') as fp: + fp.writelines(new_file) + +def migrate_version(target_file, new_version): + """Updates __version__ in the source file""" + regex = r"['\"](.*)['\"]" + migrate_source_attribute('__version__', "'{new_version}'".format(new_version=new_version), target_file, regex) + + +def is_master_branch(): + cmd = ('git rev-parse --abbrev-ref HEAD') + tag_branch = subprocess.check_output(cmd, shell=True) + return tag_branch in [b'master\n'] + +def git_tag_name(): + cmd = ('git describe --tags') + tag_branch = subprocess.check_output(cmd, shell=True) + tag_branch = tag_branch.decode().strip() + return tag_branch + +def get_git_version_info(): + cmd = 'git describe --tags' + ver_str = subprocess.check_output(cmd, shell=True) + ver, commits_since, githash = ver_str.decode().strip().split('-') + return ver, commits_since, githash + +def prerelease_version(): + """ return what the prerelease version should be. + https://packaging.python.org/tutorials/distributing-packages/#pre-release-versioning + 0.0.2.dev22 + """ + ver, commits_since, githash = get_git_version_info() + initpy_ver = get_version() + + assert len(initpy_ver.split('.')) in [3, 4], 'moto/__init__.py version should be like 0.0.2 or 0.0.2.dev' + assert initpy_ver > ver, 'the moto/__init__.py version should be newer than the last tagged release.' + return '{initpy_ver}.dev{commits_since}'.format(initpy_ver=initpy_ver, commits_since=commits_since) + +def read(*parts): + """ Reads in file from *parts. + """ + try: + return io.open(os.path.join(*parts), 'r', encoding='utf-8').read() + except IOError: + return '' + +def get_version(): + """ Returns version from moto/__init__.py + """ + version_file = read('moto', '__init__.py') + version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', + version_file, re.MULTILINE) + if version_match: + return version_match.group(1) + raise RuntimeError('Unable to find version string.') + + +def release_version_correct(): + """Makes sure the: + - prerelease verion for master is correct. + - release version is correct for tags. + """ + if is_master_branch(): + # update for a pre release version. + initpy = os.path.abspath("moto/__init__.py") + + new_version = prerelease_version() + print('updating version in __init__.py to {new_version}'.format(new_version=new_version)) + migrate_version(initpy, new_version) + else: + # check that we are a tag with the same version as in __init__.py + assert get_version() == git_tag_name(), 'git tag/branch name not the same as moto/__init__.py __verion__' + + +if __name__ == '__main__': + release_version_correct() diff --git a/wait_for.py b/wait_for.py index d313ea5a9..1f291c16b 100755 --- a/wait_for.py +++ b/wait_for.py @@ -12,8 +12,9 @@ except ImportError: # py3 import urllib.request as urllib from urllib.error import URLError + import socket - EXCEPTIONS = (URLError, ConnectionResetError) + EXCEPTIONS = (URLError, socket.timeout, ConnectionResetError) start_ts = time.time()