Merge branch 'master' into set-lambda-function-physical-resource-id

This commit is contained in:
Steve Pulec 2019-07-07 23:00:26 -05:00 committed by GitHub
commit 8d06e6e93d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
152 changed files with 30319 additions and 2467 deletions

2
.gitignore vendored
View File

@ -17,3 +17,5 @@ python_env
venv/
.python-version
.vscode/
tests/file.tmp
.eggs/

View File

@ -13,9 +13,6 @@ env:
before_install:
- export BOTO_CONFIG=/dev/null
install:
# We build moto first so the docker container doesn't try to compile it as well, also note we don't use
# -d for docker run so the logs show up in travis
# Python images come from here: https://hub.docker.com/_/python/
- |
python setup.py sdist
@ -35,3 +32,26 @@ script:
- make test
after_success:
- coveralls
before_deploy:
- git checkout $TRAVIS_BRANCH
- git fetch --unshallow
- python update_version_from_git.py
deploy:
- provider: pypi
distributions: sdist bdist_wheel
user: spulec
password:
secure: NxnPylnTfekJmGyoufCw0lMoYRskSMJzvAIyAlJJVYKwEhmiCPOrdy5qV8i8mRZ1AkUsqU3jBZ/PD56n96clHW0E3d080UleRDj6JpyALVdeLfMqZl9kLmZ8bqakWzYq3VSJKw2zGP/L4tPGf8wTK1SUv9yl/YNDsBdCkjDverw=
on:
branch:
- master
skip_cleanup: true
skip_existing: true
- provider: pypi
distributions: sdist bdist_wheel
user: spulec
password:
secure: NxnPylnTfekJmGyoufCw0lMoYRskSMJzvAIyAlJJVYKwEhmiCPOrdy5qV8i8mRZ1AkUsqU3jBZ/PD56n96clHW0E3d080UleRDj6JpyALVdeLfMqZl9kLmZ8bqakWzYq3VSJKw2zGP/L4tPGf8wTK1SUv9yl/YNDsBdCkjDverw=
on:
tags: true
skip_existing: true

View File

@ -54,3 +54,6 @@ Moto is written by Steve Pulec with contributions from:
* [William Richard](https://github.com/william-richard)
* [Alex Casalboni](https://github.com/alexcasalboni)
* [Jon Beilke](https://github.com/jrbeilke)
* [Bendeguz Acs](https://github.com/acsbendi)
* [Craig Anderson](https://github.com/craiga)
* [Robert Lewis](https://github.com/ralewis85)

View File

@ -470,48 +470,55 @@
- [ ] upgrade_applied_schema
- [ ] upgrade_published_schema
## cloudformation - 21% implemented
## cloudformation - 65% implemented
- [ ] cancel_update_stack
- [ ] continue_update_rollback
- [X] create_change_set
- [X] create_stack
- [ ] create_stack_instances
- [ ] create_stack_set
- [ ] delete_change_set
- [X] create_stack_instances
- [X] create_stack_set
- [X] delete_change_set
- [X] delete_stack
- [ ] delete_stack_instances
- [ ] delete_stack_set
- [X] delete_stack_instances
- [X] delete_stack_set
- [ ] deploy
- [ ] describe_account_limits
- [ ] describe_change_set
- [ ] describe_stack_events
- [ ] describe_stack_instance
- [ ] describe_stack_resource
- [ ] describe_stack_resources
- [ ] describe_stack_set
- [ ] describe_stack_set_operation
- [X] describe_change_set
- [ ] describe_stack_drift_detection_status
- [X] describe_stack_events
- [X] describe_stack_instance
- [X] describe_stack_resource
- [ ] describe_stack_resource_drifts
- [X] describe_stack_resources
- [X] describe_stack_set
- [X] describe_stack_set_operation
- [X] describe_stacks
- [ ] detect_stack_drift
- [ ] detect_stack_resource_drift
- [ ] estimate_template_cost
- [X] execute_change_set
- [ ] get_stack_policy
- [ ] get_template
- [X] get_template
- [ ] get_template_summary
- [ ] list_change_sets
- [X] list_change_sets
- [X] list_exports
- [ ] list_imports
- [ ] list_stack_instances
- [X] list_stack_instances
- [X] list_stack_resources
- [ ] list_stack_set_operation_results
- [ ] list_stack_set_operations
- [ ] list_stack_sets
- [X] list_stack_set_operation_results
- [X] list_stack_set_operations
- [X] list_stack_sets
- [X] list_stacks
- [ ] package
- [ ] set_stack_policy
- [ ] signal_resource
- [ ] stop_stack_set_operation
- [X] stop_stack_set_operation
- [X] update_stack
- [ ] update_stack_instances
- [ ] update_stack_set
- [X] update_stack_instances
- [X] update_stack_set
- [ ] update_termination_protection
- [ ] validate_template
- [X] validate_template
- [ ] wait
## cloudfront - 0% implemented
- [ ] create_cloud_front_origin_access_identity
@ -852,7 +859,7 @@
- [ ] admin_set_user_settings
- [ ] admin_update_auth_event_feedback
- [ ] admin_update_device_status
- [ ] admin_update_user_attributes
- [X] admin_update_user_attributes
- [ ] admin_user_global_sign_out
- [ ] associate_software_token
- [X] change_password
@ -916,7 +923,7 @@
- [ ] update_auth_event_feedback
- [ ] update_device_status
- [ ] update_group
- [ ] update_identity_provider
- [x] update_identity_provider
- [ ] update_resource_server
- [ ] update_user_attributes
- [ ] update_user_pool
@ -1466,7 +1473,7 @@
- [X] describe_spot_instance_requests
- [ ] describe_spot_price_history
- [ ] describe_stale_security_groups
- [ ] describe_subnets
- [X] describe_subnets
- [X] describe_tags
- [ ] describe_volume_attribute
- [ ] describe_volume_status
@ -1574,9 +1581,9 @@
- [ ] update_security_group_rule_descriptions_egress
- [ ] update_security_group_rule_descriptions_ingress
## ecr - 31% implemented
## ecr - 36% implemented
- [ ] batch_check_layer_availability
- [ ] batch_delete_image
- [X] batch_delete_image
- [X] batch_get_image
- [ ] complete_layer_upload
- [X] create_repository
@ -2005,23 +2012,23 @@
- [ ] upload_archive
- [ ] upload_multipart_part
## glue - 0% implemented
- [ ] batch_create_partition
## glue - 23% implemented
- [x] batch_create_partition
- [ ] batch_delete_connection
- [ ] batch_delete_partition
- [ ] batch_delete_table
- [x] batch_delete_partition
- [x] batch_delete_table
- [ ] batch_delete_table_version
- [ ] batch_get_partition
- [ ] batch_stop_job_run
- [ ] create_classifier
- [ ] create_connection
- [ ] create_crawler
- [ ] create_database
- [x] create_database
- [ ] create_dev_endpoint
- [ ] create_job
- [ ] create_partition
- [x] create_partition
- [ ] create_script
- [ ] create_table
- [x] create_table
- [ ] create_trigger
- [ ] create_user_defined_function
- [ ] delete_classifier
@ -2030,8 +2037,8 @@
- [ ] delete_database
- [ ] delete_dev_endpoint
- [ ] delete_job
- [ ] delete_partition
- [ ] delete_table
- [x] delete_partition
- [x] delete_table
- [ ] delete_table_version
- [ ] delete_trigger
- [ ] delete_user_defined_function
@ -2043,7 +2050,7 @@
- [ ] get_crawler
- [ ] get_crawler_metrics
- [ ] get_crawlers
- [ ] get_database
- [x] get_database
- [ ] get_databases
- [ ] get_dataflow_graph
- [ ] get_dev_endpoint
@ -2053,13 +2060,13 @@
- [ ] get_job_runs
- [ ] get_jobs
- [ ] get_mapping
- [ ] get_partition
- [ ] get_partitions
- [x] get_partition
- [x] get_partitions
- [ ] get_plan
- [ ] get_table
- [ ] get_table_version
- [ ] get_table_versions
- [ ] get_tables
- [x] get_table
- [x] get_table_version
- [x] get_table_versions
- [x] get_tables
- [ ] get_trigger
- [ ] get_triggers
- [ ] get_user_defined_function
@ -2080,8 +2087,8 @@
- [ ] update_database
- [ ] update_dev_endpoint
- [ ] update_job
- [ ] update_partition
- [ ] update_table
- [x] update_partition
- [x] update_table
- [ ] update_trigger
- [ ] update_user_defined_function
@ -2208,7 +2215,7 @@
- [ ] describe_event_types
- [ ] describe_events
## iam - 48% implemented
## iam - 62% implemented
- [ ] add_client_id_to_open_id_connect_provider
- [X] add_role_to_instance_profile
- [X] add_user_to_group
@ -2247,7 +2254,7 @@
- [X] delete_server_certificate
- [ ] delete_service_linked_role
- [ ] delete_service_specific_credential
- [ ] delete_signing_certificate
- [X] delete_signing_certificate
- [ ] delete_ssh_public_key
- [X] delete_user
- [X] delete_user_policy
@ -2279,7 +2286,7 @@
- [ ] get_ssh_public_key
- [X] get_user
- [X] get_user_policy
- [ ] list_access_keys
- [X] list_access_keys
- [X] list_account_aliases
- [X] list_attached_group_policies
- [X] list_attached_role_policies
@ -2287,19 +2294,21 @@
- [ ] list_entities_for_policy
- [X] list_group_policies
- [X] list_groups
- [ ] list_groups_for_user
- [ ] list_instance_profiles
- [ ] list_instance_profiles_for_role
- [X] list_groups_for_user
- [X] list_instance_profiles
- [X] list_instance_profiles_for_role
- [X] list_mfa_devices
- [ ] list_open_id_connect_providers
- [X] list_policies
- [X] list_policy_versions
- [X] list_role_policies
- [ ] list_roles
- [X] list_roles
- [X] list_role_tags
- [ ] list_user_tags
- [X] list_saml_providers
- [ ] list_server_certificates
- [X] list_server_certificates
- [ ] list_service_specific_credentials
- [ ] list_signing_certificates
- [X] list_signing_certificates
- [ ] list_ssh_public_keys
- [X] list_user_policies
- [X] list_users
@ -2315,6 +2324,10 @@
- [ ] set_default_policy_version
- [ ] simulate_custom_policy
- [ ] simulate_principal_policy
- [X] tag_role
- [ ] tag_user
- [X] untag_role
- [ ] untag_user
- [X] update_access_key
- [ ] update_account_password_policy
- [ ] update_assume_role_policy
@ -2326,11 +2339,11 @@
- [X] update_saml_provider
- [ ] update_server_certificate
- [ ] update_service_specific_credential
- [ ] update_signing_certificate
- [X] update_signing_certificate
- [ ] update_ssh_public_key
- [ ] update_user
- [ ] upload_server_certificate
- [ ] upload_signing_certificate
- [X] update_user
- [X] upload_server_certificate
- [X] upload_signing_certificate
- [ ] upload_ssh_public_key
## importexport - 0% implemented
@ -2376,7 +2389,7 @@
- [ ] unsubscribe_from_event
- [ ] update_assessment_target
## iot - 32% implemented
## iot - 33% implemented
- [ ] accept_certificate_transfer
- [X] add_thing_to_thing_group
- [ ] associate_targets_with_job
@ -2474,7 +2487,7 @@
- [ ] list_topic_rules
- [ ] list_v2_logging_levels
- [ ] register_ca_certificate
- [ ] register_certificate
- [X] register_certificate
- [ ] register_thing
- [ ] reject_certificate_transfer
- [X] remove_thing_from_thing_group
@ -2513,14 +2526,14 @@
- [ ] start_next_pending_job_execution
- [ ] update_job_execution
## kinesis - 56% implemented
## kinesis - 61% implemented
- [X] add_tags_to_stream
- [X] create_stream
- [ ] decrease_stream_retention_period
- [X] delete_stream
- [ ] describe_limits
- [X] describe_stream
- [ ] describe_stream_summary
- [X] describe_stream_summary
- [ ] disable_enhanced_monitoring
- [ ] enable_enhanced_monitoring
- [X] get_records
@ -3092,14 +3105,14 @@
- [ ] update_server
- [ ] update_server_engine_attributes
## organizations - 30% implemented
## organizations - 47% implemented
- [ ] accept_handshake
- [ ] attach_policy
- [X] attach_policy
- [ ] cancel_handshake
- [X] create_account
- [X] create_organization
- [X] create_organizational_unit
- [ ] create_policy
- [X] create_policy
- [ ] decline_handshake
- [ ] delete_organization
- [ ] delete_organizational_unit
@ -3109,7 +3122,7 @@
- [ ] describe_handshake
- [X] describe_organization
- [X] describe_organizational_unit
- [ ] describe_policy
- [X] describe_policy
- [ ] detach_policy
- [ ] disable_aws_service_access
- [ ] disable_policy_type
@ -3127,10 +3140,10 @@
- [ ] list_handshakes_for_organization
- [X] list_organizational_units_for_parent
- [X] list_parents
- [ ] list_policies
- [ ] list_policies_for_target
- [X] list_policies
- [X] list_policies_for_target
- [X] list_roots
- [ ] list_targets_for_policy
- [X] list_targets_for_policy
- [X] move_account
- [ ] remove_account_from_organization
- [ ] update_organizational_unit
@ -3409,19 +3422,19 @@
- [ ] start_stream_processor
- [ ] stop_stream_processor
## resource-groups - 0% implemented
- [ ] create_group
- [ ] delete_group
- [ ] get_group
- [ ] get_group_query
## resource-groups - 62% implemented
- [X] create_group
- [X] delete_group
- [X] get_group
- [X] get_group_query
- [ ] get_tags
- [ ] list_group_resources
- [ ] list_groups
- [X] list_groups
- [ ] search_resources
- [ ] tag
- [ ] untag
- [ ] update_group
- [ ] update_group_query
- [X] update_group
- [X] update_group_query
## resourcegroupstaggingapi - 60% implemented
- [X] get_resources
@ -3534,7 +3547,7 @@
- [ ] delete_object
- [ ] delete_object_tagging
- [ ] delete_objects
- [ ] get_bucket_accelerate_configuration
- [X] get_bucket_accelerate_configuration
- [X] get_bucket_acl
- [ ] get_bucket_analytics_configuration
- [ ] get_bucket_cors
@ -3542,7 +3555,7 @@
- [ ] get_bucket_inventory_configuration
- [ ] get_bucket_lifecycle
- [ ] get_bucket_lifecycle_configuration
- [ ] get_bucket_location
- [X] get_bucket_location
- [ ] get_bucket_logging
- [ ] get_bucket_metrics_configuration
- [ ] get_bucket_notification
@ -3568,7 +3581,7 @@
- [ ] list_objects
- [ ] list_objects_v2
- [ ] list_parts
- [ ] put_bucket_accelerate_configuration
- [X] put_bucket_accelerate_configuration
- [ ] put_bucket_acl
- [ ] put_bucket_analytics_configuration
- [X] put_bucket_cors
@ -3648,14 +3661,14 @@
## secretsmanager - 33% implemented
- [ ] cancel_rotate_secret
- [X] create_secret
- [ ] delete_secret
- [X] delete_secret
- [X] describe_secret
- [X] get_random_password
- [X] get_secret_value
- [ ] list_secret_version_ids
- [ ] list_secrets
- [ ] put_secret_value
- [ ] restore_secret
- [X] list_secret_version_ids
- [X] list_secrets
- [X] put_secret_value
- [X] restore_secret
- [X] rotate_secret
- [ ] tag_resource
- [ ] untag_resource
@ -3913,7 +3926,7 @@
- [ ] delete_message_batch
- [X] delete_queue
- [ ] get_queue_attributes
- [ ] get_queue_url
- [X] get_queue_url
- [X] list_dead_letter_source_queues
- [ ] list_queue_tags
- [X] list_queues

102
README.md
View File

@ -2,8 +2,8 @@
[![Join the chat at https://gitter.im/awsmoto/Lobby](https://badges.gitter.im/awsmoto/Lobby.svg)](https://gitter.im/awsmoto/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build Status](https://travis-ci.org/spulec/moto.png?branch=master)](https://travis-ci.org/spulec/moto)
[![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.png?branch=master)](https://coveralls.io/r/spulec/moto)
[![Build Status](https://travis-ci.org/spulec/moto.svg?branch=master)](https://travis-ci.org/spulec/moto)
[![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.svg?branch=master)](https://coveralls.io/r/spulec/moto)
[![Docs](https://readthedocs.org/projects/pip/badge/?version=stable)](http://docs.getmoto.org)
# In a nutshell
@ -47,7 +47,7 @@ def test_my_model_save():
body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8")
assert body == b'is awesome'
assert body == 'is awesome'
```
With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys.
@ -55,93 +55,95 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock
It gets even better! Moto isn't just for Python code and it isn't just for S3. Look at the [standalone server mode](https://github.com/spulec/moto#stand-alone-server-mode) for more information about running Moto with other languages. Here's the status of the other AWS services implemented:
```gherkin
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Service Name | Decorator | Development Status |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| ACM | @mock_acm | all endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| API Gateway | @mock_apigateway | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Autoscaling | @mock_autoscaling | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Cloudformation | @mock_cloudformation | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Cloudwatch | @mock_cloudwatch | basic endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| CloudwatchEvents | @mock_events | all endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Cognito Identity | @mock_cognitoidentity | basic endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Cognito Identity Provider | @mock_cognitoidp | basic endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Config | @mock_config | basic endpoints done |
|-------------------------------------------------------------------------------------|
| Data Pipeline | @mock_datapipeline | basic endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| DynamoDB | @mock_dynamodb | core endpoints done |
| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| EC2 | @mock_ec2 | core endpoints done |
| - AMI | | core endpoints done |
| - EBS | | core endpoints done |
| - Instances | | all endpoints done |
| - Security Groups | | core endpoints done |
| - Tags | | all endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| ECR | @mock_ecr | basic endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| ECS | @mock_ecs | basic endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| ELB | @mock_elb | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| ELBv2 | @mock_elbv2 | all endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| EMR | @mock_emr | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Glacier | @mock_glacier | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| IAM | @mock_iam | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| IoT | @mock_iot | core endpoints done |
| | @mock_iotdata | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Kinesis | @mock_kinesis | core endpoints done |
|-------------------------------------------------------------------------------------|
| KMS | @mock_kms | basic endpoints done |
|-------------------------------------------------------------------------------------|
| Lambda | @mock_lambda | basic endpoints done, requires |
| | | docker |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Logs | @mock_logs | basic endpoints done |
|------------------------------------------------------------------------------|
| Kinesis | @mock_kinesis | core endpoints done |
|------------------------------------------------------------------------------|
| KMS | @mock_kms | basic endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Organizations | @mock_organizations | some core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Polly | @mock_polly | all endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| RDS | @mock_rds | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| RDS2 | @mock_rds2 | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Redshift | @mock_redshift | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| Route53 | @mock_route53 | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| S3 | @mock_s3 | core endpoints done |
|------------------------------------------------------------------------------|
| SecretsManager | @mock_secretsmanager | basic endpoints done
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| SecretsManager | @mock_secretsmanager | basic endpoints done |
|-------------------------------------------------------------------------------------|
| SES | @mock_ses | all endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| SNS | @mock_sns | all endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| SQS | @mock_sqs | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| SSM | @mock_ssm | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| STS | @mock_sts | core endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| SWF | @mock_swf | basic endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
| X-Ray | @mock_xray | all endpoints done |
|------------------------------------------------------------------------------|
|-------------------------------------------------------------------------------------|
```
For a full list of endpoint [implementation coverage](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md)
@ -316,3 +318,11 @@ boto3.resource(
```console
$ pip install moto
```
## Releases
Releases are done from travisci. Fairly closely following this:
https://docs.travis-ci.com/user/deployment/pypi/
- Commits to `master` branch do a dev deploy to pypi.
- Commits to a tag do a real deploy to pypi.

View File

@ -17,66 +17,95 @@ with ``moto`` and its usage.
Currently implemented Services:
-------------------------------
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| Service Name | Decorator | Development Status |
+=======================+=====================+===================================+
+===========================+=======================+====================================+
| ACM | @mock_acm | all endpoints done |
+---------------------------+-----------------------+------------------------------------+
| API Gateway | @mock_apigateway | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| Autoscaling | @mock_autoscaling | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| Cloudformation | @mock_cloudformation | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| Cloudwatch | @mock_cloudwatch | basic endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| CloudwatchEvents | @mock_events | all endpoints done |
+---------------------------+-----------------------+------------------------------------+
| Cognito Identity | @mock_cognitoidentity | all endpoints done |
+---------------------------+-----------------------+------------------------------------+
| Cognito Identity Provider | @mock_cognitoidp | all endpoints done |
+---------------------------+-----------------------+------------------------------------+
| Config | @mock_config | basic endpoints done |
+---------------------------+-----------------------+------------------------------------+
| Data Pipeline | @mock_datapipeline | basic endpoints done |
+-----------------------+---------------------+-----------------------------------+
| - DynamoDB | - @mock_dynamodb | - core endpoints done |
| - DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes|
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| DynamoDB | - @mock_dynamodb | - core endpoints done |
| DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes |
+---------------------------+-----------------------+------------------------------------+
| EC2 | @mock_ec2 | core endpoints done |
| - AMI | | - core endpoints done |
| - EBS | | - core endpoints done |
| - Instances | | - all endpoints done |
| - Security Groups | | - core endpoints done |
| - Tags | | - all endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| ECR | @mock_ecr | basic endpoints done |
+---------------------------+-----------------------+------------------------------------+
| ECS | @mock_ecs | basic endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| ELB | @mock_elb | core endpoints done |
| | @mock_elbv2 | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| ELBv2 | @mock_elbv2 | all endpoints done |
+---------------------------+-----------------------+------------------------------------+
| EMR | @mock_emr | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| Glacier | @mock_glacier | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| IAM | @mock_iam | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
| Lambda | @mock_lambda | basic endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| IoT | @mock_iot | core endpoints done |
| | @mock_iotdata | core endpoints done |
+---------------------------+-----------------------+------------------------------------+
| Kinesis | @mock_kinesis | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| KMS | @mock_kms | basic endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| Lambda | @mock_lambda | basic endpoints done, |
| | | requires docker |
+---------------------------+-----------------------+------------------------------------+
| Logs | @mock_logs | basic endpoints done |
+---------------------------+-----------------------+------------------------------------+
| Organizations | @mock_organizations | some core edpoints done |
+---------------------------+-----------------------+------------------------------------+
| Polly | @mock_polly | all endpoints done |
+---------------------------+-----------------------+------------------------------------+
| RDS | @mock_rds | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| RDS2 | @mock_rds2 | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| Redshift | @mock_redshift | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| Route53 | @mock_route53 | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| S3 | @mock_s3 | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
| SES | @mock_ses | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
| SNS | @mock_sns | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| SecretsManager | @mock_secretsmanager | basic endpoints done |
+---------------------------+-----------------------+------------------------------------+
| SES | @mock_ses | all endpoints done |
+---------------------------+-----------------------+------------------------------------+
| SNS | @mock_sns | all endpoints done |
+---------------------------+-----------------------+------------------------------------+
| SQS | @mock_sqs | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| SSM | @mock_ssm | core endpoints done |
+---------------------------+-----------------------+------------------------------------+
| STS | @mock_sts | core endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| SWF | @mock_swf | basic endpoints done |
+-----------------------+---------------------+-----------------------------------+
+---------------------------+-----------------------+------------------------------------+
| X-Ray | @mock_xray | all endpoints done |
+---------------------------+-----------------------+------------------------------------+

View File

@ -3,7 +3,7 @@ import logging
# logging.getLogger('boto').setLevel(logging.CRITICAL)
__title__ = 'moto'
__version__ = '1.3.7'
__version__ = '1.3.9'
from .acm import mock_acm # flake8: noqa
from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa
@ -13,6 +13,7 @@ from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated
from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa
from .cognitoidentity import mock_cognitoidentity, mock_cognitoidentity_deprecated # flake8: noqa
from .cognitoidp import mock_cognitoidp, mock_cognitoidp_deprecated # flake8: noqa
from .config import mock_config # flake8: noqa
from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa
from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa
from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa
@ -35,6 +36,7 @@ from .polly import mock_polly # flake8: noqa
from .rds import mock_rds, mock_rds_deprecated # flake8: noqa
from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa
from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa
from .resourcegroups import mock_resourcegroups # flake8: noqa
from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa
from .ses import mock_ses, mock_ses_deprecated # flake8: noqa
from .secretsmanager import mock_secretsmanager # flake8: noqa

View File

@ -1,4 +1,7 @@
from __future__ import unicode_literals
import random
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel
@ -159,13 +162,7 @@ class FakeAutoScalingGroup(BaseModel):
self.autoscaling_backend = autoscaling_backend
self.name = name
if not availability_zones and not vpc_zone_identifier:
raise AutoscalingClientError(
"ValidationError",
"At least one Availability Zone or VPC Subnet is required."
)
self.availability_zones = availability_zones
self.vpc_zone_identifier = vpc_zone_identifier
self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier)
self.max_size = max_size
self.min_size = min_size
@ -188,6 +185,35 @@ class FakeAutoScalingGroup(BaseModel):
self.tags = tags if tags else []
self.set_desired_capacity(desired_capacity)
def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False):
# for updates, if only AZs are provided, they must not clash with
# the AZs of existing VPCs
if update and availability_zones and not vpc_zone_identifier:
vpc_zone_identifier = self.vpc_zone_identifier
if vpc_zone_identifier:
# extract azs for vpcs
subnet_ids = vpc_zone_identifier.split(',')
subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(subnet_ids=subnet_ids)
vpc_zones = [subnet.availability_zone for subnet in subnets]
if availability_zones and set(availability_zones) != set(vpc_zones):
raise AutoscalingClientError(
"ValidationError",
"The availability zones of the specified subnets and the Auto Scaling group do not match",
)
availability_zones = vpc_zones
elif not availability_zones:
if not update:
raise AutoscalingClientError(
"ValidationError",
"At least one Availability Zone or VPC Subnet is required."
)
return
self.availability_zones = availability_zones
self.vpc_zone_identifier = vpc_zone_identifier
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
@ -246,8 +272,8 @@ class FakeAutoScalingGroup(BaseModel):
health_check_period, health_check_type,
placement_group, termination_policies,
new_instances_protected_from_scale_in=None):
if availability_zones:
self.availability_zones = availability_zones
self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True)
if max_size is not None:
self.max_size = max_size
if min_size is not None:
@ -257,8 +283,6 @@ class FakeAutoScalingGroup(BaseModel):
self.launch_config = self.autoscaling_backend.launch_configurations[
launch_config_name]
self.launch_config_name = launch_config_name
if vpc_zone_identifier is not None:
self.vpc_zone_identifier = vpc_zone_identifier
if health_check_period is not None:
self.health_check_period = health_check_period
if health_check_type is not None:
@ -319,7 +343,8 @@ class FakeAutoScalingGroup(BaseModel):
self.launch_config.user_data,
self.launch_config.security_groups,
instance_type=self.launch_config.instance_type,
tags={'instance': propagated_tags}
tags={'instance': propagated_tags},
placement=random.choice(self.availability_zones),
)
for instance in reservation.instances:
instance.autoscaling_group = self

View File

@ -404,7 +404,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<AttachLoadBalancerTargetGroups
<AttachLoadBalancerTargetGroupsResult>
</AttachLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</AttachLoadBalancerTargetGroupsResponse>"""
@ -412,7 +412,7 @@ ATTACH_INSTANCES_TEMPLATE = """<AttachInstancesResponse xmlns="http://autoscalin
<AttachInstancesResult>
</AttachInstancesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</AttachInstancesResponse>"""
@ -428,7 +428,7 @@ DESCRIBE_LOAD_BALANCER_TARGET_GROUPS = """<DescribeLoadBalancerTargetGroupsRespo
</LoadBalancerTargetGroups>
</DescribeLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DescribeLoadBalancerTargetGroupsResponse>"""
@ -454,7 +454,7 @@ DETACH_INSTANCES_TEMPLATE = """<DetachInstancesResponse xmlns="http://autoscalin
</Activities>
</DetachInstancesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DetachInstancesResponse>"""
@ -462,7 +462,7 @@ DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<DetachLoadBalancerTargetGroups
<DetachLoadBalancerTargetGroupsResult>
</DetachLoadBalancerTargetGroupsResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DetachLoadBalancerTargetGroupsResponse>"""
@ -499,7 +499,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
{% for instance_state in group.instance_states %}
<member>
<HealthStatus>{{ instance_state.health_status }}</HealthStatus>
<AvailabilityZone>us-east-1e</AvailabilityZone>
<AvailabilityZone>{{ instance_state.instance.placement }}</AvailabilityZone>
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
<LaunchConfigurationName>{{ group.launch_config_name }}</LaunchConfigurationName>
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
@ -585,7 +585,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """<DescribeAutoScalingInstancesRespon
<member>
<HealthStatus>{{ instance_state.health_status }}</HealthStatus>
<AutoScalingGroupName>{{ instance_state.instance.autoscaling_group.name }}</AutoScalingGroupName>
<AvailabilityZone>us-east-1e</AvailabilityZone>
<AvailabilityZone>{{ instance_state.instance.placement }}</AvailabilityZone>
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
<LaunchConfigurationName>{{ instance_state.instance.autoscaling_group.launch_config_name }}</LaunchConfigurationName>
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
@ -654,7 +654,7 @@ DELETE_POLICY_TEMPLATE = """<DeleteScalingPolicyResponse xmlns="http://autoscali
ATTACH_LOAD_BALANCERS_TEMPLATE = """<AttachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<AttachLoadBalancersResult></AttachLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</AttachLoadBalancersResponse>"""
@ -670,14 +670,14 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http
</LoadBalancers>
</DescribeLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DescribeLoadBalancersResponse>"""
DETACH_LOAD_BALANCERS_TEMPLATE = """<DetachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DetachLoadBalancersResult></DetachLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DetachLoadBalancersResponse>"""
@ -690,13 +690,13 @@ SUSPEND_PROCESSES_TEMPLATE = """<SuspendProcessesResponse xmlns="http://autoscal
SET_INSTANCE_HEALTH_TEMPLATE = """<SetInstanceHealthResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<SetInstanceHealthResponse></SetInstanceHealthResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SetInstanceHealthResponse>"""
SET_INSTANCE_PROTECTION_TEMPLATE = """<SetInstanceProtectionResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<SetInstanceProtectionResult></SetInstanceProtectionResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SetInstanceProtectionResponse>"""

View File

@ -30,7 +30,7 @@ from moto.s3.models import s3_backend
from moto.logs.models import logs_backends
from moto.s3.exceptions import MissingBucket, MissingKey
from moto import settings
from .utils import make_function_arn
from .utils import make_function_arn, make_function_ver_arn
logger = logging.getLogger(__name__)
@ -45,7 +45,7 @@ except ImportError:
_stderr_regex = re.compile(r'START|END|REPORT RequestId: .*')
_orig_adapter_send = requests.adapters.HTTPAdapter.send
docker_3 = docker.__version__.startswith("3")
docker_3 = docker.__version__[0] >= '3'
def zip2tar(zip_bytes):
@ -215,12 +215,12 @@ class LambdaFunction(BaseModel):
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version)
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name)
self.tags = dict()
def set_version(self, version):
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version)
self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version)
self.version = version
self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
@ -504,6 +504,14 @@ class LambdaStorage(object):
except ValueError:
return self._functions[name]['latest']
def list_versions_by_function(self, name):
if name not in self._functions:
return None
latest = copy.copy(self._functions[name]['latest'])
latest.function_arn += ':$LATEST'
return [latest] + self._functions[name]['versions']
def get_arn(self, arn):
return self._arns.get(arn, None)
@ -534,6 +542,7 @@ class LambdaStorage(object):
fn.set_version(new_version)
self._functions[name]['versions'].append(fn)
self._arns[fn.function_arn] = fn
return fn
def del_function(self, name, qualifier=None):
@ -603,6 +612,9 @@ class LambdaBackend(BaseBackend):
self._lambdas.put_function(fn)
if spec.get('Publish'):
ver = self.publish_function(function_name)
fn.version = ver.version
return fn
def publish_function(self, function_name):
@ -611,6 +623,9 @@ class LambdaBackend(BaseBackend):
def get_function(self, function_name, qualifier=None):
return self._lambdas.get_function(function_name, qualifier)
def list_versions_by_function(self, function_name):
return self._lambdas.list_versions_by_function(function_name)
def get_function_by_arn(self, function_arn):
return self._lambdas.get_arn(function_arn)

View File

@ -52,7 +52,11 @@ class LambdaResponse(BaseResponse):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
# This is ListVersionByFunction
raise ValueError("Cannot handle request")
path = request.path if hasattr(request, 'path') else path_url(request.url)
function_name = path.split('/')[-2]
return self._list_versions_by_function(function_name)
elif request.method == 'POST':
return self._publish_function(request, full_url, headers)
else:
@ -146,11 +150,24 @@ class LambdaResponse(BaseResponse):
for fn in self.lambda_backend.list_functions():
json_data = fn.get_configuration()
json_data['Version'] = '$LATEST'
result['Functions'].append(json_data)
return 200, {}, json.dumps(result)
def _list_versions_by_function(self, function_name):
result = {
'Versions': []
}
functions = self.lambda_backend.list_versions_by_function(function_name)
if functions:
for fn in functions:
json_data = fn.get_configuration()
result['Versions'].append(json_data)
return 200, {}, json.dumps(result)
def _create_function(self, request, full_url, headers):
try:
fn = self.lambda_backend.create_function(self.json_body)
@ -166,7 +183,7 @@ class LambdaResponse(BaseResponse):
fn = self.lambda_backend.publish_function(function_name)
if fn:
config = fn.get_configuration()
return 200, {}, json.dumps(config)
return 201, {}, json.dumps(config)
else:
return 404, {}, "{}"
@ -187,7 +204,10 @@ class LambdaResponse(BaseResponse):
if fn:
code = fn.get_code()
if qualifier is None or qualifier == '$LATEST':
code['Configuration']['Version'] = '$LATEST'
if qualifier == '$LATEST':
code['Configuration']['FunctionArn'] += ':$LATEST'
return 200, {}, json.dumps(code)
else:
return 404, {}, "{}"

View File

@ -3,8 +3,13 @@ from collections import namedtuple
ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version'])
def make_function_arn(region, account, name, version='1'):
return 'arn:aws:lambda:{0}:{1}:function:{2}:{3}'.format(region, account, name, version)
def make_function_arn(region, account, name):
return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account, name)
def make_function_ver_arn(region, account, name, version='1'):
arn = make_function_arn(region, account, name)
return '{0}:{1}'.format(arn, version)
def split_function_arn(arn):

View File

@ -32,6 +32,7 @@ from moto.organizations import organizations_backends
from moto.polly import polly_backends
from moto.rds2 import rds2_backends
from moto.redshift import redshift_backends
from moto.resourcegroups import resourcegroups_backends
from moto.route53 import route53_backends
from moto.s3 import s3_backends
from moto.ses import ses_backends
@ -46,7 +47,7 @@ from moto.iot import iot_backends
from moto.iotdata import iotdata_backends
from moto.batch import batch_backends
from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends
from moto.config import config_backends
BACKENDS = {
'acm': acm_backends,
@ -57,6 +58,7 @@ BACKENDS = {
'cloudwatch': cloudwatch_backends,
'cognito-identity': cognitoidentity_backends,
'cognito-idp': cognitoidp_backends,
'config': config_backends,
'datapipeline': datapipeline_backends,
'dynamodb': dynamodb_backends,
'dynamodb2': dynamodb_backends2,
@ -80,6 +82,7 @@ BACKENDS = {
'organizations': organizations_backends,
'polly': polly_backends,
'redshift': redshift_backends,
'resource-groups': resourcegroups_backends,
'rds': rds2_backends,
's3': s3_backends,
's3bucket_path': s3_backends,

View File

@ -1,5 +1,5 @@
from __future__ import unicode_literals
from datetime import datetime
from datetime import datetime, timedelta
import json
import yaml
import uuid
@ -12,12 +12,156 @@ from .parsing import ResourceMap, OutputMap
from .utils import (
generate_changeset_id,
generate_stack_id,
generate_stackset_arn,
generate_stackset_id,
yaml_tag_constructor,
validate_template_cfn_lint,
)
from .exceptions import ValidationError
class FakeStackSet(BaseModel):
def __init__(self, stackset_id, name, template, region='us-east-1',
status='ACTIVE', description=None, parameters=None, tags=None,
admin_role='AWSCloudFormationStackSetAdministrationRole',
execution_role='AWSCloudFormationStackSetExecutionRole'):
self.id = stackset_id
self.arn = generate_stackset_arn(stackset_id, region)
self.name = name
self.template = template
self.description = description
self.parameters = parameters
self.tags = tags
self.admin_role = admin_role
self.execution_role = execution_role
self.status = status
self.instances = FakeStackInstances(parameters, self.id, self.name)
self.stack_instances = self.instances.stack_instances
self.operations = []
def _create_operation(self, operation_id, action, status, accounts=[], regions=[]):
operation = {
'OperationId': str(operation_id),
'Action': action,
'Status': status,
'CreationTimestamp': datetime.now(),
'EndTimestamp': datetime.now() + timedelta(minutes=2),
'Instances': [{account: region} for account in accounts for region in regions],
}
self.operations += [operation]
return operation
def get_operation(self, operation_id):
for operation in self.operations:
if operation_id == operation['OperationId']:
return operation
raise ValidationError(operation_id)
def update_operation(self, operation_id, status):
operation = self.get_operation(operation_id)
operation['Status'] = status
return operation_id
def delete(self):
self.status = 'DELETED'
def update(self, template, description, parameters, tags, admin_role,
execution_role, accounts, regions, operation_id=None):
if not operation_id:
operation_id = uuid.uuid4()
self.template = template if template else self.template
self.description = description if description is not None else self.description
self.parameters = parameters if parameters else self.parameters
self.tags = tags if tags else self.tags
self.admin_role = admin_role if admin_role else self.admin_role
self.execution_role = execution_role if execution_role else self.execution_role
if accounts and regions:
self.update_instances(accounts, regions, self.parameters)
operation = self._create_operation(operation_id=operation_id,
action='UPDATE', status='SUCCEEDED', accounts=accounts,
regions=regions)
return operation
def create_stack_instances(self, accounts, regions, parameters, operation_id=None):
if not operation_id:
operation_id = uuid.uuid4()
if not parameters:
parameters = self.parameters
self.instances.create_instances(accounts, regions, parameters, operation_id)
self._create_operation(operation_id=operation_id, action='CREATE',
status='SUCCEEDED', accounts=accounts, regions=regions)
def delete_stack_instances(self, accounts, regions, operation_id=None):
if not operation_id:
operation_id = uuid.uuid4()
self.instances.delete(accounts, regions)
operation = self._create_operation(operation_id=operation_id, action='DELETE',
status='SUCCEEDED', accounts=accounts, regions=regions)
return operation
def update_instances(self, accounts, regions, parameters, operation_id=None):
if not operation_id:
operation_id = uuid.uuid4()
self.instances.update(accounts, regions, parameters)
operation = self._create_operation(operation_id=operation_id,
action='UPDATE', status='SUCCEEDED', accounts=accounts,
regions=regions)
return operation
class FakeStackInstances(BaseModel):
def __init__(self, parameters, stackset_id, stackset_name):
self.parameters = parameters if parameters else {}
self.stackset_id = stackset_id
self.stack_name = "StackSet-{}".format(stackset_id)
self.stackset_name = stackset_name
self.stack_instances = []
def create_instances(self, accounts, regions, parameters, operation_id):
new_instances = []
for region in regions:
for account in accounts:
instance = {
'StackId': generate_stack_id(self.stack_name, region, account),
'StackSetId': self.stackset_id,
'Region': region,
'Account': account,
'Status': "CURRENT",
'ParameterOverrides': parameters if parameters else [],
}
new_instances.append(instance)
self.stack_instances += new_instances
return new_instances
def update(self, accounts, regions, parameters):
for account in accounts:
for region in regions:
instance = self.get_instance(account, region)
if parameters:
instance['ParameterOverrides'] = parameters
else:
instance['ParameterOverrides'] = []
def delete(self, accounts, regions):
for i, instance in enumerate(self.stack_instances):
if instance['Region'] in regions and instance['Account'] in accounts:
self.stack_instances.pop(i)
def get_instance(self, account, region):
for i, instance in enumerate(self.stack_instances):
if instance['Region'] == region and instance['Account'] == account:
return self.stack_instances[i]
class FakeStack(BaseModel):
def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False):
@ -85,9 +229,9 @@ class FakeStack(BaseModel):
def _parse_template(self):
yaml.add_multi_constructor('', yaml_tag_constructor)
try:
self.template_dict = yaml.load(self.template)
self.template_dict = yaml.load(self.template, Loader=yaml.Loader)
except yaml.parser.ParserError:
self.template_dict = json.loads(self.template)
self.template_dict = json.loads(self.template, Loader=yaml.Loader)
@property
def stack_parameters(self):
@ -127,6 +271,49 @@ class FakeStack(BaseModel):
self.status = "DELETE_COMPLETE"
class FakeChange(BaseModel):
def __init__(self, action, logical_resource_id, resource_type):
self.action = action
self.logical_resource_id = logical_resource_id
self.resource_type = resource_type
class FakeChangeSet(FakeStack):
def __init__(self, stack_id, stack_name, stack_template, change_set_id, change_set_name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None):
super(FakeChangeSet, self).__init__(
stack_id,
stack_name,
stack_template,
parameters,
region_name,
notification_arns=notification_arns,
tags=tags,
role_arn=role_arn,
cross_stack_resources=cross_stack_resources,
create_change_set=True,
)
self.stack_name = stack_name
self.change_set_id = change_set_id
self.change_set_name = change_set_name
self.changes = self.diff(template=template, parameters=parameters)
def diff(self, template, parameters=None):
self.template = template
self._parse_template()
changes = []
resources_by_action = self.resource_map.diff(self.template_dict, parameters)
for action, resources in resources_by_action.items():
for resource_name, resource in resources.items():
changes.append(FakeChange(
action=action,
logical_resource_id=resource_name,
resource_type=resource['ResourceType'],
))
return changes
class FakeEvent(BaseModel):
def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):
@ -146,10 +333,72 @@ class CloudFormationBackend(BaseBackend):
def __init__(self):
self.stacks = OrderedDict()
self.stacksets = OrderedDict()
self.deleted_stacks = {}
self.exports = OrderedDict()
self.change_sets = OrderedDict()
def create_stack_set(self, name, template, parameters, tags=None, description=None, region='us-east-1', admin_role=None, execution_role=None):
stackset_id = generate_stackset_id(name)
new_stackset = FakeStackSet(
stackset_id=stackset_id,
name=name,
template=template,
parameters=parameters,
description=description,
tags=tags,
admin_role=admin_role,
execution_role=execution_role,
)
self.stacksets[stackset_id] = new_stackset
return new_stackset
def get_stack_set(self, name):
stacksets = self.stacksets.keys()
for stackset in stacksets:
if self.stacksets[stackset].name == name:
return self.stacksets[stackset]
raise ValidationError(name)
def delete_stack_set(self, name):
stacksets = self.stacksets.keys()
for stackset in stacksets:
if self.stacksets[stackset].name == name:
self.stacksets[stackset].delete()
def create_stack_instances(self, stackset_name, accounts, regions, parameters, operation_id=None):
stackset = self.get_stack_set(stackset_name)
stackset.create_stack_instances(
accounts=accounts,
regions=regions,
parameters=parameters,
operation_id=operation_id,
)
return stackset
def update_stack_set(self, stackset_name, template=None, description=None,
parameters=None, tags=None, admin_role=None, execution_role=None,
accounts=None, regions=None, operation_id=None):
stackset = self.get_stack_set(stackset_name)
update = stackset.update(
template=template,
description=description,
parameters=parameters,
tags=tags,
admin_role=admin_role,
execution_role=execution_role,
accounts=accounts,
regions=regions,
operation_id=operation_id
)
return update
def delete_stack_instances(self, stackset_name, accounts, regions, operation_id=None):
stackset = self.get_stack_set(stackset_name)
stackset.delete_stack_instances(accounts, regions, operation_id)
return stackset
def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False):
stack_id = generate_stack_id(name)
new_stack = FakeStack(
@ -171,24 +420,62 @@ class CloudFormationBackend(BaseBackend):
return new_stack
def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):
stack_id = None
stack_template = None
if change_set_type == 'UPDATE':
stacks = self.stacks.values()
stack = None
for s in stacks:
if s.name == stack_name:
stack = s
stack_id = stack.stack_id
stack_template = stack.template
if stack is None:
raise ValidationError(stack_name)
else:
stack = self.create_stack(stack_name, template, parameters,
region_name, notification_arns, tags,
role_arn, create_change_set=True)
stack_id = generate_stack_id(stack_name)
stack_template = template
change_set_id = generate_changeset_id(change_set_name, region_name)
self.stacks[change_set_name] = {'Id': change_set_id,
'StackId': stack.stack_id}
self.change_sets[change_set_id] = stack
return change_set_id, stack.stack_id
new_change_set = FakeChangeSet(
stack_id=stack_id,
stack_name=stack_name,
stack_template=stack_template,
change_set_id=change_set_id,
change_set_name=change_set_name,
template=template,
parameters=parameters,
region_name=region_name,
notification_arns=notification_arns,
tags=tags,
role_arn=role_arn,
cross_stack_resources=self.exports
)
self.change_sets[change_set_id] = new_change_set
self.stacks[stack_id] = new_change_set
return change_set_id, stack_id
def delete_change_set(self, change_set_name, stack_name=None):
if change_set_name in self.change_sets:
# This means arn was passed in
del self.change_sets[change_set_name]
else:
for cs in self.change_sets:
if self.change_sets[cs].change_set_name == change_set_name:
del self.change_sets[cs]
def describe_change_set(self, change_set_name, stack_name=None):
change_set = None
if change_set_name in self.change_sets:
# This means arn was passed in
change_set = self.change_sets[change_set_name]
else:
for cs in self.change_sets:
if self.change_sets[cs].change_set_name == change_set_name:
change_set = self.change_sets[cs]
if change_set is None:
raise ValidationError(change_set_name)
return change_set
def execute_change_set(self, change_set_name, stack_name=None):
stack = None
@ -197,7 +484,7 @@ class CloudFormationBackend(BaseBackend):
stack = self.change_sets[change_set_name]
else:
for cs in self.change_sets:
if self.change_sets[cs].name == change_set_name:
if self.change_sets[cs].change_set_name == change_set_name:
stack = self.change_sets[cs]
if stack is None:
raise ValidationError(stack_name)
@ -223,6 +510,9 @@ class CloudFormationBackend(BaseBackend):
else:
return list(stacks)
def list_change_sets(self):
return self.change_sets.values()
def list_stacks(self):
return [
v for v in self.stacks.values()

View File

@ -12,7 +12,7 @@ from moto.batch import models as batch_models
from moto.cloudwatch import models as cloudwatch_models
from moto.cognitoidentity import models as cognitoidentity_models
from moto.datapipeline import models as datapipeline_models
from moto.dynamodb import models as dynamodb_models
from moto.dynamodb2 import models as dynamodb2_models
from moto.ec2 import models as ec2_models
from moto.ecs import models as ecs_models
from moto.elb import models as elb_models
@ -37,7 +37,7 @@ MODEL_MAP = {
"AWS::Batch::JobDefinition": batch_models.JobDefinition,
"AWS::Batch::JobQueue": batch_models.JobQueue,
"AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment,
"AWS::DynamoDB::Table": dynamodb_models.Table,
"AWS::DynamoDB::Table": dynamodb2_models.Table,
"AWS::Kinesis::Stream": kinesis_models.Stream,
"AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping,
"AWS::Lambda::Function": lambda_models.LambdaFunction,
@ -246,7 +246,8 @@ def resource_name_property_from_type(resource_type):
def generate_resource_name(resource_type, stack_name, logical_id):
if resource_type == "AWS::ElasticLoadBalancingV2::TargetGroup":
if resource_type in ["AWS::ElasticLoadBalancingV2::TargetGroup",
"AWS::ElasticLoadBalancingV2::LoadBalancer"]:
# Target group names need to be less than 32 characters, so when cloudformation creates a name for you
# it makes sure to stay under that limit
name_prefix = '{0}-{1}'.format(stack_name, logical_id)
@ -425,11 +426,18 @@ class ResourceMap(collections.Mapping):
self.resolved_parameters[parameter_name] = parameter.get('Default')
# Set any input parameters that were passed
self.no_echo_parameter_keys = []
for key, value in self.input_parameters.items():
if key in self.resolved_parameters:
value_type = parameter_slots[key].get('Type', 'String')
parameter_slot = parameter_slots[key]
value_type = parameter_slot.get('Type', 'String')
if value_type == 'CommaDelimitedList' or value_type.startswith("List"):
value = value.split(',')
if parameter_slot.get('NoEcho'):
self.no_echo_parameter_keys.append(key)
self.resolved_parameters[key] = value
# Check if there are any non-default params that were not passed input
@ -465,36 +473,70 @@ class ResourceMap(collections.Mapping):
ec2_models.ec2_backends[self._region_name].create_tags(
[self[resource].physical_resource_id], self.tags)
def update(self, template, parameters=None):
def diff(self, template, parameters=None):
if parameters:
self.input_parameters = parameters
self.load_mapping()
self.load_parameters()
self.load_conditions()
old_template = self._resource_json_map
new_template = template['Resources']
resource_names_by_action = {
'Add': set(new_template) - set(old_template),
'Modify': set(name for name in new_template if name in old_template and new_template[
name] != old_template[name]),
'Remove': set(old_template) - set(new_template)
}
resources_by_action = {
'Add': {},
'Modify': {},
'Remove': {},
}
for resource_name in resource_names_by_action['Add']:
resources_by_action['Add'][resource_name] = {
'LogicalResourceId': resource_name,
'ResourceType': new_template[resource_name]['Type']
}
for resource_name in resource_names_by_action['Modify']:
resources_by_action['Modify'][resource_name] = {
'LogicalResourceId': resource_name,
'ResourceType': new_template[resource_name]['Type']
}
for resource_name in resource_names_by_action['Remove']:
resources_by_action['Remove'][resource_name] = {
'LogicalResourceId': resource_name,
'ResourceType': old_template[resource_name]['Type']
}
return resources_by_action
def update(self, template, parameters=None):
resources_by_action = self.diff(template, parameters)
old_template = self._resource_json_map
new_template = template['Resources']
self._resource_json_map = new_template
new_resource_names = set(new_template) - set(old_template)
for resource_name in new_resource_names:
for resource_name, resource in resources_by_action['Add'].items():
resource_json = new_template[resource_name]
new_resource = parse_and_create_resource(
resource_name, resource_json, self, self._region_name)
self._parsed_resources[resource_name] = new_resource
removed_resource_names = set(old_template) - set(new_template)
for resource_name in removed_resource_names:
for resource_name, resource in resources_by_action['Remove'].items():
resource_json = old_template[resource_name]
parse_and_delete_resource(
resource_name, resource_json, self, self._region_name)
self._parsed_resources.pop(resource_name)
resources_to_update = set(name for name in new_template if name in old_template and new_template[
name] != old_template[name])
tries = 1
while resources_to_update and tries < 5:
for resource_name in resources_to_update.copy():
while resources_by_action['Modify'] and tries < 5:
for resource_name, resource in resources_by_action['Modify'].copy().items():
resource_json = new_template[resource_name]
try:
changed_resource = parse_and_update_resource(
@ -505,7 +547,7 @@ class ResourceMap(collections.Mapping):
last_exception = e
else:
self._parsed_resources[resource_name] = changed_resource
resources_to_update.remove(resource_name)
del resources_by_action['Modify'][resource_name]
tries += 1
if tries == 5:
raise last_exception

View File

@ -120,6 +120,31 @@ class CloudFormationResponse(BaseResponse):
template = self.response_template(CREATE_CHANGE_SET_RESPONSE_TEMPLATE)
return template.render(stack_id=stack_id, change_set_id=change_set_id)
def delete_change_set(self):
stack_name = self._get_param('StackName')
change_set_name = self._get_param('ChangeSetName')
self.cloudformation_backend.delete_change_set(change_set_name=change_set_name, stack_name=stack_name)
if self.request_json:
return json.dumps({
'DeleteChangeSetResponse': {
'DeleteChangeSetResult': {},
}
})
else:
template = self.response_template(DELETE_CHANGE_SET_RESPONSE_TEMPLATE)
return template.render()
def describe_change_set(self):
stack_name = self._get_param('StackName')
change_set_name = self._get_param('ChangeSetName')
change_set = self.cloudformation_backend.describe_change_set(
change_set_name=change_set_name,
stack_name=stack_name,
)
template = self.response_template(DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE)
return template.render(change_set=change_set)
@amzn_request_id
def execute_change_set(self):
stack_name = self._get_param('StackName')
@ -187,6 +212,11 @@ class CloudFormationResponse(BaseResponse):
template = self.response_template(DESCRIBE_STACK_EVENTS_RESPONSE)
return template.render(stack=stack)
def list_change_sets(self):
change_sets = self.cloudformation_backend.list_change_sets()
template = self.response_template(LIST_CHANGE_SETS_RESPONSE)
return template.render(change_sets=change_sets)
def list_stacks(self):
stacks = self.cloudformation_backend.list_stacks()
template = self.response_template(LIST_STACKS_RESPONSE)
@ -312,6 +342,175 @@ class CloudFormationResponse(BaseResponse):
template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE)
return template.render(description=description)
def create_stack_set(self):
stackset_name = self._get_param('StackSetName')
stack_body = self._get_param('TemplateBody')
template_url = self._get_param('TemplateURL')
# role_arn = self._get_param('RoleARN')
parameters_list = self._get_list_prefix("Parameters.member")
tags = dict((item['key'], item['value'])
for item in self._get_list_prefix("Tags.member"))
# Copy-Pasta - Hack dict-comprehension
parameters = dict([
(parameter['parameter_key'], parameter['parameter_value'])
for parameter
in parameters_list
])
if template_url:
stack_body = self._get_stack_from_s3_url(template_url)
stackset = self.cloudformation_backend.create_stack_set(
name=stackset_name,
template=stack_body,
parameters=parameters,
tags=tags,
# role_arn=role_arn,
)
if self.request_json:
return json.dumps({
'CreateStackSetResponse': {
'CreateStackSetResult': {
'StackSetId': stackset.stackset_id,
}
}
})
else:
template = self.response_template(CREATE_STACK_SET_RESPONSE_TEMPLATE)
return template.render(stackset=stackset)
def create_stack_instances(self):
stackset_name = self._get_param('StackSetName')
accounts = self._get_multi_param('Accounts.member')
regions = self._get_multi_param('Regions.member')
parameters = self._get_multi_param('ParameterOverrides.member')
self.cloudformation_backend.create_stack_instances(stackset_name, accounts, regions, parameters)
template = self.response_template(CREATE_STACK_INSTANCES_TEMPLATE)
return template.render()
def delete_stack_set(self):
stackset_name = self._get_param('StackSetName')
self.cloudformation_backend.delete_stack_set(stackset_name)
template = self.response_template(DELETE_STACK_SET_RESPONSE_TEMPLATE)
return template.render()
def delete_stack_instances(self):
stackset_name = self._get_param('StackSetName')
accounts = self._get_multi_param('Accounts.member')
regions = self._get_multi_param('Regions.member')
operation = self.cloudformation_backend.delete_stack_instances(stackset_name, accounts, regions)
template = self.response_template(DELETE_STACK_INSTANCES_TEMPLATE)
return template.render(operation=operation)
def describe_stack_set(self):
stackset_name = self._get_param('StackSetName')
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
if not stackset.admin_role:
stackset.admin_role = 'arn:aws:iam::123456789012:role/AWSCloudFormationStackSetAdministrationRole'
if not stackset.execution_role:
stackset.execution_role = 'AWSCloudFormationStackSetExecutionRole'
template = self.response_template(DESCRIBE_STACK_SET_RESPONSE_TEMPLATE)
return template.render(stackset=stackset)
def describe_stack_instance(self):
stackset_name = self._get_param('StackSetName')
account = self._get_param('StackInstanceAccount')
region = self._get_param('StackInstanceRegion')
instance = self.cloudformation_backend.get_stack_set(stackset_name).instances.get_instance(account, region)
template = self.response_template(DESCRIBE_STACK_INSTANCE_TEMPLATE)
rendered = template.render(instance=instance)
return rendered
def list_stack_sets(self):
stacksets = self.cloudformation_backend.stacksets
template = self.response_template(LIST_STACK_SETS_TEMPLATE)
return template.render(stacksets=stacksets)
def list_stack_instances(self):
stackset_name = self._get_param('StackSetName')
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
template = self.response_template(LIST_STACK_INSTANCES_TEMPLATE)
return template.render(stackset=stackset)
def list_stack_set_operations(self):
stackset_name = self._get_param('StackSetName')
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
template = self.response_template(LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE)
return template.render(stackset=stackset)
def stop_stack_set_operation(self):
stackset_name = self._get_param('StackSetName')
operation_id = self._get_param('OperationId')
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
stackset.update_operation(operation_id, 'STOPPED')
template = self.response_template(STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE)
return template.render()
def describe_stack_set_operation(self):
stackset_name = self._get_param('StackSetName')
operation_id = self._get_param('OperationId')
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
operation = stackset.get_operation(operation_id)
template = self.response_template(DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE)
return template.render(stackset=stackset, operation=operation)
def list_stack_set_operation_results(self):
stackset_name = self._get_param('StackSetName')
operation_id = self._get_param('OperationId')
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
operation = stackset.get_operation(operation_id)
template = self.response_template(LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE)
return template.render(operation=operation)
def update_stack_set(self):
stackset_name = self._get_param('StackSetName')
operation_id = self._get_param('OperationId')
description = self._get_param('Description')
execution_role = self._get_param('ExecutionRoleName')
admin_role = self._get_param('AdministrationRoleARN')
accounts = self._get_multi_param('Accounts.member')
regions = self._get_multi_param('Regions.member')
template_body = self._get_param('TemplateBody')
template_url = self._get_param('TemplateURL')
if template_url:
template_body = self._get_stack_from_s3_url(template_url)
tags = dict((item['key'], item['value'])
for item in self._get_list_prefix("Tags.member"))
parameters_list = self._get_list_prefix("Parameters.member")
parameters = dict([
(parameter['parameter_key'], parameter['parameter_value'])
for parameter
in parameters_list
])
operation = self.cloudformation_backend.update_stack_set(
stackset_name=stackset_name,
template=template_body,
description=description,
parameters=parameters,
tags=tags,
admin_role=admin_role,
execution_role=execution_role,
accounts=accounts,
regions=regions,
operation_id=operation_id
)
template = self.response_template(UPDATE_STACK_SET_RESPONSE_TEMPLATE)
return template.render(operation=operation)
def update_stack_instances(self):
stackset_name = self._get_param('StackSetName')
accounts = self._get_multi_param('Accounts.member')
regions = self._get_multi_param('Regions.member')
parameters = self._get_multi_param('ParameterOverrides.member')
operation = self.cloudformation_backend.get_stack_set(stackset_name).update_instances(accounts, regions, parameters)
template = self.response_template(UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE)
return template.render(operation=operation)
VALIDATE_STACK_RESPONSE_TEMPLATE = """<ValidateTemplateResponse>
<ValidateTemplateResult>
@ -354,6 +553,66 @@ CREATE_CHANGE_SET_RESPONSE_TEMPLATE = """<CreateStackResponse>
</CreateStackResponse>
"""
DELETE_CHANGE_SET_RESPONSE_TEMPLATE = """<DeleteChangeSetResponse>
<DeleteChangeSetResult>
</DeleteChangeSetResult>
<ResponseMetadata>
<RequestId>3d3200a1-810e-3023-6cc3-example</RequestId>
</ResponseMetadata>
</DeleteChangeSetResponse>
"""
DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE = """<DescribeChangeSetResponse>
<DescribeChangeSetResult>
<ChangeSetId>{{ change_set.change_set_id }}</ChangeSetId>
<ChangeSetName>{{ change_set.change_set_name }}</ChangeSetName>
<StackId>{{ change_set.stack_id }}</StackId>
<StackName>{{ change_set.stack_name }}</StackName>
<Description>{{ change_set.description }}</Description>
<Parameters>
{% for param_name, param_value in change_set.stack_parameters.items() %}
<member>
<ParameterKey>{{ param_name }}</ParameterKey>
<ParameterValue>{{ param_value }}</ParameterValue>
</member>
{% endfor %}
</Parameters>
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
<Status>{{ change_set.status }}</Status>
<StatusReason>{{ change_set.status_reason }}</StatusReason>
{% if change_set.notification_arns %}
<NotificationARNs>
{% for notification_arn in change_set.notification_arns %}
<member>{{ notification_arn }}</member>
{% endfor %}
</NotificationARNs>
{% else %}
<NotificationARNs/>
{% endif %}
{% if change_set.role_arn %}
<RoleARN>{{ change_set.role_arn }}</RoleARN>
{% endif %}
{% if change_set.changes %}
<Changes>
{% for change in change_set.changes %}
<member>
<Type>Resource</Type>
<ResourceChange>
<Action>{{ change.action }}</Action>
<LogicalResourceId>{{ change.logical_resource_id }}</LogicalResourceId>
<ResourceType>{{ change.resource_type }}</ResourceType>
</ResourceChange>
</member>
{% endfor %}
</Changes>
{% endif %}
{% if next_token %}
<NextToken>{{ next_token }}</NextToken>
{% endif %}
</DescribeChangeSetResult>
</DescribeChangeSetResponse>"""
EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE = """<ExecuteChangeSetResponse>
<ExecuteChangeSetResult>
<ExecuteChangeSetResult/>
@ -395,7 +654,11 @@ DESCRIBE_STACKS_TEMPLATE = """<DescribeStacksResponse>
{% for param_name, param_value in stack.stack_parameters.items() %}
<member>
<ParameterKey>{{ param_name }}</ParameterKey>
{% if param_name in stack.resource_map.no_echo_parameter_keys %}
<ParameterValue>****</ParameterValue>
{% else %}
<ParameterValue>{{ param_value }}</ParameterValue>
{% endif %}
</member>
{% endfor %}
</Parameters>
@ -479,6 +742,27 @@ DESCRIBE_STACK_EVENTS_RESPONSE = """<DescribeStackEventsResponse xmlns="http://c
</DescribeStackEventsResponse>"""
LIST_CHANGE_SETS_RESPONSE = """<ListChangeSetsResponse>
<ListChangeSetsResult>
<Summaries>
{% for change_set in change_sets %}
<member>
<StackId>{{ change_set.stack_id }}</StackId>
<StackName>{{ change_set.stack_name }}</StackName>
<ChangeSetId>{{ change_set.change_set_id }}</ChangeSetId>
<ChangeSetName>{{ change_set.change_set_name }}</ChangeSetName>
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
<Status>{{ change_set.status }}</Status>
<StatusReason>{{ change_set.status_reason }}</StatusReason>
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
<Description>{{ change_set.description }}</Description>
</member>
{% endfor %}
</Summaries>
</ListChangeSetsResult>
</ListChangeSetsResponse>"""
LIST_STACKS_RESPONSE = """<ListStacksResponse>
<ListStacksResult>
<StackSummaries>
@ -553,3 +837,236 @@ LIST_EXPORTS_RESPONSE = """<ListExportsResponse xmlns="http://cloudformation.ama
<RequestId>5ccc7dcd-744c-11e5-be70-example</RequestId>
</ResponseMetadata>
</ListExportsResponse>"""
CREATE_STACK_SET_RESPONSE_TEMPLATE = """<CreateStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<CreateStackSetResult>
<StackSetId>{{ stackset.stackset_id }}</StackSetId>
</CreateStackSetResult>
<ResponseMetadata>
<RequestId>f457258c-391d-41d1-861f-example</RequestId>
</ResponseMetadata>
</CreateStackSetResponse>
"""
DESCRIBE_STACK_SET_RESPONSE_TEMPLATE = """<DescribeStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<DescribeStackSetResult>
<StackSet>
<Capabilities/>
<StackSetARN>{{ stackset.arn }}</StackSetARN>
<ExecutionRoleName>{{ stackset.execution_role }}</ExecutionRoleName>
<AdministrationRoleARN>{{ stackset.admin_role }}</AdministrationRoleARN>
<StackSetId>{{ stackset.id }}</StackSetId>
<TemplateBody>{{ stackset.template }}</TemplateBody>
<StackSetName>{{ stackset.name }}</StackSetName>
<Parameters>
{% for param_name, param_value in stackset.parameters.items() %}
<member>
<ParameterKey>{{ param_name }}</ParameterKey>
<ParameterValue>{{ param_value }}</ParameterValue>
</member>
{% endfor %}
</Parameters>
<Tags>
{% for tag_key, tag_value in stackset.tags.items() %}
<member>
<Key>{{ tag_key }}</Key>
<Value>{{ tag_value }}</Value>
</member>
{% endfor %}
</Tags>
<Status>{{ stackset.status }}</Status>
</StackSet>
</DescribeStackSetResult>
<ResponseMetadata>
<RequestId>d8b64e11-5332-46e1-9603-example</RequestId>
</ResponseMetadata>
</DescribeStackSetResponse>"""
DELETE_STACK_SET_RESPONSE_TEMPLATE = """<DeleteStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<DeleteStackSetResult/>
<ResponseMetadata>
<RequestId>c35ec2d0-d69f-4c4d-9bd7-example</RequestId>
</ResponseMetadata>
</DeleteStackSetResponse>"""
CREATE_STACK_INSTANCES_TEMPLATE = """<CreateStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<CreateStackInstancesResult>
<OperationId>1459ad6d-63cc-4c96-a73e-example</OperationId>
</CreateStackInstancesResult>
<ResponseMetadata>
<RequestId>6b29f7e3-69be-4d32-b374-example</RequestId>
</ResponseMetadata>
</CreateStackInstancesResponse>
"""
LIST_STACK_INSTANCES_TEMPLATE = """<ListStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<ListStackInstancesResult>
<Summaries>
{% for instance in stackset.stack_instances %}
<member>
<StackId>{{ instance.StackId }}</StackId>
<StackSetId>{{ instance.StackSetId }}</StackSetId>
<Region>{{ instance.Region }}</Region>
<Account>{{ instance.Account }}</Account>
<Status>{{ instance.Status }}</Status>
</member>
{% endfor %}
</Summaries>
</ListStackInstancesResult>
<ResponseMetadata>
<RequestId>83c27e73-b498-410f-993c-example</RequestId>
</ResponseMetadata>
</ListStackInstancesResponse>
"""
DELETE_STACK_INSTANCES_TEMPLATE = """<DeleteStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<DeleteStackInstancesResult>
<OperationId>{{ operation.OperationId }}</OperationId>
</DeleteStackInstancesResult>
<ResponseMetadata>
<RequestId>e5325090-66f6-4ecd-a531-example</RequestId>
</ResponseMetadata>
</DeleteStackInstancesResponse>
"""
DESCRIBE_STACK_INSTANCE_TEMPLATE = """<DescribeStackInstanceResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<DescribeStackInstanceResult>
<StackInstance>
<StackId>{{ instance.StackId }}</StackId>
<StackSetId>{{ instance.StackSetId }}</StackSetId>
{% if instance.ParameterOverrides %}
<ParameterOverrides>
{% for override in instance.ParameterOverrides %}
{% if override['ParameterKey'] or override['ParameterValue'] %}
<member>
<ParameterKey>{{ override.ParameterKey }}</ParameterKey>
<UsePreviousValue>false</UsePreviousValue>
<ParameterValue>{{ override.ParameterValue }}</ParameterValue>
</member>
{% endif %}
{% endfor %}
</ParameterOverrides>
{% else %}
<ParameterOverrides/>
{% endif %}
<Region>{{ instance.Region }}</Region>
<Account>{{ instance.Account }}</Account>
<Status>{{ instance.Status }}</Status>
</StackInstance>
</DescribeStackInstanceResult>
<ResponseMetadata>
<RequestId>c6c7be10-0343-4319-8a25-example</RequestId>
</ResponseMetadata>
</DescribeStackInstanceResponse>
"""
LIST_STACK_SETS_TEMPLATE = """<ListStackSetsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<ListStackSetsResult>
<Summaries>
{% for key, value in stacksets.items() %}
<member>
<StackSetName>{{ value.name }}</StackSetName>
<StackSetId>{{ value.id }}</StackSetId>
<Status>{{ value.status }}</Status>
</member>
{% endfor %}
</Summaries>
</ListStackSetsResult>
<ResponseMetadata>
<RequestId>4dcacb73-841e-4ed8-b335-example</RequestId>
</ResponseMetadata>
</ListStackSetsResponse>
"""
UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE = """<UpdateStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<UpdateStackInstancesResult>
<OperationId>{{ operation }}</OperationId>
</UpdateStackInstancesResult>
<ResponseMetadata>
<RequestId>bdbf8e94-19b6-4ce4-af85-example</RequestId>
</ResponseMetadata>
</UpdateStackInstancesResponse>
"""
UPDATE_STACK_SET_RESPONSE_TEMPLATE = """<UpdateStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<UpdateStackSetResult>
<OperationId>{{ operation.OperationId }}</OperationId>
</UpdateStackSetResult>
<ResponseMetadata>
<RequestId>adac907b-17e3-43e6-a254-example</RequestId>
</ResponseMetadata>
</UpdateStackSetResponse>
"""
LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE = """<ListStackSetOperationsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<ListStackSetOperationsResult>
<Summaries>
{% for operation in stackset.operations %}
<member>
<CreationTimestamp>{{ operation.CreationTimestamp }}</CreationTimestamp>
<OperationId>{{ operation.OperationId }}</OperationId>
<Action>{{ operation.Action }}</Action>
<EndTimestamp>{{ operation.EndTimestamp }}</EndTimestamp>
<Status>{{ operation.Status }}</Status>
</member>
{% endfor %}
</Summaries>
</ListStackSetOperationsResult>
<ResponseMetadata>
<RequestId>65b9d9be-08bb-4a43-9a21-example</RequestId>
</ResponseMetadata>
</ListStackSetOperationsResponse>
"""
STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE = """<StopStackSetOperationResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<StopStackSetOperationResult/>
<ResponseMetadata>
<RequestId>2188554a-07c6-4396-b2c5-example</RequestId>
</ResponseMetadata> </StopStackSetOperationResponse>
"""
DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE = """<DescribeStackSetOperationResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<DescribeStackSetOperationResult>
<StackSetOperation>
<ExecutionRoleName>{{ stackset.execution_role }}</ExecutionRoleName>
<AdministrationRoleARN>arn:aws:iam::123456789012:role/{{ stackset.admin_role }}</AdministrationRoleARN>
<StackSetId>{{ stackset.id }}</StackSetId>
<CreationTimestamp>{{ operation.CreationTimestamp }}</CreationTimestamp>
<OperationId>{{ operation.OperationId }}</OperationId>
<Action>{{ operation.Action }}</Action>
<OperationPreferences>
<RegionOrder/>
</OperationPreferences>
<EndTimestamp>{{ operation.EndTimestamp }}</EndTimestamp>
<Status>{{ operation.Status }}</Status>
</StackSetOperation>
</DescribeStackSetOperationResult>
<ResponseMetadata>
<RequestId>2edc27b6-9ce2-486a-a192-example</RequestId>
</ResponseMetadata>
</DescribeStackSetOperationResponse>
"""
LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE = """<ListStackSetOperationResultsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
<ListStackSetOperationResultsResult>
<Summaries>
{% for instance in operation.Instances %}
{% for account, region in instance.items() %}
<member>
<AccountGateResult>
<StatusReason>Function not found: arn:aws:lambda:us-west-2:123456789012:function:AWSCloudFormationStackSetAccountGate</StatusReason>
<Status>SKIPPED</Status>
</AccountGateResult>
<Region>{{ region }}</Region>
<Account>{{ account }}</Account>
<Status>{{ operation.Status }}</Status>
</member>
{% endfor %}
{% endfor %}
</Summaries>
</ListStackSetOperationResultsResult>
<ResponseMetadata>
<RequestId>ac05a9ce-5f98-4197-a29b-example</RequestId>
</ResponseMetadata>
</ListStackSetOperationResultsResponse>
"""

View File

@ -4,13 +4,14 @@ import six
import random
import yaml
import os
import string
from cfnlint import decode, core
def generate_stack_id(stack_name):
def generate_stack_id(stack_name, region="us-east-1", account="123456789"):
random_id = uuid.uuid4()
return "arn:aws:cloudformation:us-east-1:123456789:stack/{0}/{1}".format(stack_name, random_id)
return "arn:aws:cloudformation:{}:{}:stack/{}/{}".format(region, account, stack_name, random_id)
def generate_changeset_id(changeset_name, region_name):
@ -18,9 +19,18 @@ def generate_changeset_id(changeset_name, region_name):
return 'arn:aws:cloudformation:{0}:123456789:changeSet/{1}/{2}'.format(region_name, changeset_name, random_id)
def generate_stackset_id(stackset_name):
random_id = uuid.uuid4()
return '{}:{}'.format(stackset_name, random_id)
def generate_stackset_arn(stackset_id, region_name):
return 'arn:aws:cloudformation:{}:123456789012:stackset/{}'.format(region_name, stackset_id)
def random_suffix():
size = 12
chars = list(range(10)) + ['A-Z']
chars = list(range(10)) + list(string.ascii_uppercase)
return ''.join(six.text_type(random.choice(chars)) for x in range(size))

View File

@ -275,7 +275,7 @@ GET_METRIC_STATISTICS_TEMPLATE = """<GetMetricStatisticsResponse xmlns="http://m
<Label>{{ label }}</Label>
<Datapoints>
{% for datapoint in datapoints %}
<Datapoint>
<member>
{% if datapoint.sum is not none %}
<Sum>{{ datapoint.sum }}</Sum>
{% endif %}
@ -302,7 +302,7 @@ GET_METRIC_STATISTICS_TEMPLATE = """<GetMetricStatisticsResponse xmlns="http://m
<Timestamp>{{ datapoint.timestamp }}</Timestamp>
<Unit>{{ datapoint.unit }}</Unit>
</Datapoint>
</member>
{% endfor %}
</Datapoints>
</GetMetricStatisticsResult>

View File

@ -287,6 +287,18 @@ class CognitoIdpUser(BaseModel):
return user_json
def update_attributes(self, new_attributes):
def flatten_attrs(attrs):
return {attr['Name']: attr['Value'] for attr in attrs}
def expand_attrs(attrs):
return [{'Name': k, 'Value': v} for k, v in attrs.items()]
flat_attributes = flatten_attrs(self.attributes)
flat_attributes.update(flatten_attrs(new_attributes))
self.attributes = expand_attrs(flat_attributes)
class CognitoIdpBackend(BaseBackend):
@ -426,6 +438,19 @@ class CognitoIdpBackend(BaseBackend):
return identity_provider
def update_identity_provider(self, user_pool_id, name, extended_config):
user_pool = self.user_pools.get(user_pool_id)
if not user_pool:
raise ResourceNotFoundError(user_pool_id)
identity_provider = user_pool.identity_providers.get(name)
if not identity_provider:
raise ResourceNotFoundError(name)
identity_provider.extended_config.update(extended_config)
return identity_provider
def delete_identity_provider(self, user_pool_id, name):
user_pool = self.user_pools.get(user_pool_id)
if not user_pool:
@ -660,6 +685,17 @@ class CognitoIdpBackend(BaseBackend):
else:
raise NotAuthorizedError(access_token)
def admin_update_user_attributes(self, user_pool_id, username, attributes):
user_pool = self.user_pools.get(user_pool_id)
if not user_pool:
raise ResourceNotFoundError(user_pool_id)
if username not in user_pool.users:
raise UserNotFoundError(username)
user = user_pool.users[username]
user.update_attributes(attributes)
cognitoidp_backends = {}
for region in boto.cognito.identity.regions():

View File

@ -143,6 +143,14 @@ class CognitoIdpResponse(BaseResponse):
"IdentityProvider": identity_provider.to_json(extended=True)
})
def update_identity_provider(self):
user_pool_id = self._get_param("UserPoolId")
name = self._get_param("ProviderName")
identity_provider = cognitoidp_backends[self.region].update_identity_provider(user_pool_id, name, self.parameters)
return json.dumps({
"IdentityProvider": identity_provider.to_json(extended=True)
})
def delete_identity_provider(self):
user_pool_id = self._get_param("UserPoolId")
name = self._get_param("ProviderName")
@ -344,6 +352,13 @@ class CognitoIdpResponse(BaseResponse):
cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password)
return ""
def admin_update_user_attributes(self):
user_pool_id = self._get_param("UserPoolId")
username = self._get_param("Username")
attributes = self._get_param("UserAttributes")
cognitoidp_backends[self.region].admin_update_user_attributes(user_pool_id, username, attributes)
return ""
class CognitoIdpJsonWebKeyResponse(BaseResponse):

4
moto/config/__init__.py Normal file
View File

@ -0,0 +1,4 @@
from .models import config_backends
from ..core.models import base_decorator
mock_config = base_decorator(config_backends)

149
moto/config/exceptions.py Normal file
View File

@ -0,0 +1,149 @@
from __future__ import unicode_literals
from moto.core.exceptions import JsonRESTError
class NameTooLongException(JsonRESTError):
code = 400
def __init__(self, name, location):
message = '1 validation error detected: Value \'{name}\' at \'{location}\' failed to satisfy' \
' constraint: Member must have length less than or equal to 256'.format(name=name, location=location)
super(NameTooLongException, self).__init__("ValidationException", message)
class InvalidConfigurationRecorderNameException(JsonRESTError):
code = 400
def __init__(self, name):
message = 'The configuration recorder name \'{name}\' is not valid, blank string.'.format(name=name)
super(InvalidConfigurationRecorderNameException, self).__init__("InvalidConfigurationRecorderNameException",
message)
class MaxNumberOfConfigurationRecordersExceededException(JsonRESTError):
code = 400
def __init__(self, name):
message = 'Failed to put configuration recorder \'{name}\' because the maximum number of ' \
'configuration recorders: 1 is reached.'.format(name=name)
super(MaxNumberOfConfigurationRecordersExceededException, self).__init__(
"MaxNumberOfConfigurationRecordersExceededException", message)
class InvalidRecordingGroupException(JsonRESTError):
code = 400
def __init__(self):
message = 'The recording group provided is not valid'
super(InvalidRecordingGroupException, self).__init__("InvalidRecordingGroupException", message)
class InvalidResourceTypeException(JsonRESTError):
code = 400
def __init__(self, bad_list, good_list):
message = '{num} validation error detected: Value \'{bad_list}\' at ' \
'\'configurationRecorder.recordingGroup.resourceTypes\' failed to satisfy constraint: ' \
'Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]'.format(
num=len(bad_list), bad_list=bad_list, good_list=good_list)
# For PY2:
message = str(message)
super(InvalidResourceTypeException, self).__init__("ValidationException", message)
class NoSuchConfigurationRecorderException(JsonRESTError):
code = 400
def __init__(self, name):
message = 'Cannot find configuration recorder with the specified name \'{name}\'.'.format(name=name)
super(NoSuchConfigurationRecorderException, self).__init__("NoSuchConfigurationRecorderException", message)
class InvalidDeliveryChannelNameException(JsonRESTError):
code = 400
def __init__(self, name):
message = 'The delivery channel name \'{name}\' is not valid, blank string.'.format(name=name)
super(InvalidDeliveryChannelNameException, self).__init__("InvalidDeliveryChannelNameException",
message)
class NoSuchBucketException(JsonRESTError):
"""We are *only* validating that there is value that is not '' here."""
code = 400
def __init__(self):
message = 'Cannot find a S3 bucket with an empty bucket name.'
super(NoSuchBucketException, self).__init__("NoSuchBucketException", message)
class InvalidS3KeyPrefixException(JsonRESTError):
code = 400
def __init__(self):
message = 'The s3 key prefix \'\' is not valid, empty s3 key prefix.'
super(InvalidS3KeyPrefixException, self).__init__("InvalidS3KeyPrefixException", message)
class InvalidSNSTopicARNException(JsonRESTError):
"""We are *only* validating that there is value that is not '' here."""
code = 400
def __init__(self):
message = 'The sns topic arn \'\' is not valid.'
super(InvalidSNSTopicARNException, self).__init__("InvalidSNSTopicARNException", message)
class InvalidDeliveryFrequency(JsonRESTError):
code = 400
def __init__(self, value, good_list):
message = '1 validation error detected: Value \'{value}\' at ' \
'\'deliveryChannel.configSnapshotDeliveryProperties.deliveryFrequency\' failed to satisfy ' \
'constraint: Member must satisfy enum value set: {good_list}'.format(value=value, good_list=good_list)
super(InvalidDeliveryFrequency, self).__init__("InvalidDeliveryFrequency", message)
class MaxNumberOfDeliveryChannelsExceededException(JsonRESTError):
code = 400
def __init__(self, name):
message = 'Failed to put delivery channel \'{name}\' because the maximum number of ' \
'delivery channels: 1 is reached.'.format(name=name)
super(MaxNumberOfDeliveryChannelsExceededException, self).__init__(
"MaxNumberOfDeliveryChannelsExceededException", message)
class NoSuchDeliveryChannelException(JsonRESTError):
code = 400
def __init__(self, name):
message = 'Cannot find delivery channel with specified name \'{name}\'.'.format(name=name)
super(NoSuchDeliveryChannelException, self).__init__("NoSuchDeliveryChannelException", message)
class NoAvailableConfigurationRecorderException(JsonRESTError):
code = 400
def __init__(self):
message = 'Configuration recorder is not available to put delivery channel.'
super(NoAvailableConfigurationRecorderException, self).__init__("NoAvailableConfigurationRecorderException",
message)
class NoAvailableDeliveryChannelException(JsonRESTError):
code = 400
def __init__(self):
message = 'Delivery channel is not available to start configuration recorder.'
super(NoAvailableDeliveryChannelException, self).__init__("NoAvailableDeliveryChannelException", message)
class LastDeliveryChannelDeleteFailedException(JsonRESTError):
code = 400
def __init__(self, name):
message = 'Failed to delete last specified delivery channel with name \'{name}\', because there, ' \
'because there is a running configuration recorder.'.format(name=name)
super(LastDeliveryChannelDeleteFailedException, self).__init__("LastDeliveryChannelDeleteFailedException", message)

335
moto/config/models.py Normal file
View File

@ -0,0 +1,335 @@
import json
import time
import pkg_resources
from datetime import datetime
from boto3 import Session
from moto.config.exceptions import InvalidResourceTypeException, InvalidDeliveryFrequency, \
InvalidConfigurationRecorderNameException, NameTooLongException, \
MaxNumberOfConfigurationRecordersExceededException, InvalidRecordingGroupException, \
NoSuchConfigurationRecorderException, NoAvailableConfigurationRecorderException, \
InvalidDeliveryChannelNameException, NoSuchBucketException, InvalidS3KeyPrefixException, \
InvalidSNSTopicARNException, MaxNumberOfDeliveryChannelsExceededException, NoAvailableDeliveryChannelException, \
NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException
from moto.core import BaseBackend, BaseModel
DEFAULT_ACCOUNT_ID = 123456789012
def datetime2int(date):
return int(time.mktime(date.timetuple()))
def snake_to_camels(original):
parts = original.split('_')
camel_cased = parts[0].lower() + ''.join(p.title() for p in parts[1:])
camel_cased = camel_cased.replace('Arn', 'ARN') # Config uses 'ARN' instead of 'Arn'
return camel_cased
class ConfigEmptyDictable(BaseModel):
"""Base class to make serialization easy. This assumes that the sub-class will NOT return 'None's in the JSON."""
def to_dict(self):
data = {}
for item, value in self.__dict__.items():
if value is not None:
if isinstance(value, ConfigEmptyDictable):
data[snake_to_camels(item)] = value.to_dict()
else:
data[snake_to_camels(item)] = value
return data
class ConfigRecorderStatus(ConfigEmptyDictable):
def __init__(self, name):
self.name = name
self.recording = False
self.last_start_time = None
self.last_stop_time = None
self.last_status = None
self.last_error_code = None
self.last_error_message = None
self.last_status_change_time = None
def start(self):
self.recording = True
self.last_status = 'PENDING'
self.last_start_time = datetime2int(datetime.utcnow())
self.last_status_change_time = datetime2int(datetime.utcnow())
def stop(self):
self.recording = False
self.last_stop_time = datetime2int(datetime.utcnow())
self.last_status_change_time = datetime2int(datetime.utcnow())
class ConfigDeliverySnapshotProperties(ConfigEmptyDictable):
def __init__(self, delivery_frequency):
self.delivery_frequency = delivery_frequency
class ConfigDeliveryChannel(ConfigEmptyDictable):
def __init__(self, name, s3_bucket_name, prefix=None, sns_arn=None, snapshot_properties=None):
self.name = name
self.s3_bucket_name = s3_bucket_name
self.s3_key_prefix = prefix
self.sns_topic_arn = sns_arn
self.config_snapshot_delivery_properties = snapshot_properties
class RecordingGroup(ConfigEmptyDictable):
def __init__(self, all_supported=True, include_global_resource_types=False, resource_types=None):
self.all_supported = all_supported
self.include_global_resource_types = include_global_resource_types
self.resource_types = resource_types
class ConfigRecorder(ConfigEmptyDictable):
def __init__(self, role_arn, recording_group, name='default', status=None):
self.name = name
self.role_arn = role_arn
self.recording_group = recording_group
if not status:
self.status = ConfigRecorderStatus(name)
else:
self.status = status
class ConfigBackend(BaseBackend):
def __init__(self):
self.recorders = {}
self.delivery_channels = {}
@staticmethod
def _validate_resource_types(resource_list):
# Load the service file:
resource_package = 'botocore'
resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json'))
conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path))
# Verify that each entry exists in the supported list:
bad_list = []
for resource in resource_list:
# For PY2:
r_str = str(resource)
if r_str not in conifg_schema['shapes']['ResourceType']['enum']:
bad_list.append(r_str)
if bad_list:
raise InvalidResourceTypeException(bad_list, conifg_schema['shapes']['ResourceType']['enum'])
@staticmethod
def _validate_delivery_snapshot_properties(properties):
# Load the service file:
resource_package = 'botocore'
resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json'))
conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path))
# Verify that the deliveryFrequency is set to an acceptable value:
if properties.get('deliveryFrequency', None) not in \
conifg_schema['shapes']['MaximumExecutionFrequency']['enum']:
raise InvalidDeliveryFrequency(properties.get('deliveryFrequency', None),
conifg_schema['shapes']['MaximumExecutionFrequency']['enum'])
def put_configuration_recorder(self, config_recorder):
# Validate the name:
if not config_recorder.get('name'):
raise InvalidConfigurationRecorderNameException(config_recorder.get('name'))
if len(config_recorder.get('name')) > 256:
raise NameTooLongException(config_recorder.get('name'), 'configurationRecorder.name')
# We're going to assume that the passed in Role ARN is correct.
# Config currently only allows 1 configuration recorder for an account:
if len(self.recorders) == 1 and not self.recorders.get(config_recorder['name']):
raise MaxNumberOfConfigurationRecordersExceededException(config_recorder['name'])
# Is this updating an existing one?
recorder_status = None
if self.recorders.get(config_recorder['name']):
recorder_status = self.recorders[config_recorder['name']].status
# Validate the Recording Group:
if config_recorder.get('recordingGroup') is None:
recording_group = RecordingGroup()
else:
rg = config_recorder['recordingGroup']
# If an empty dict is passed in, then bad:
if not rg:
raise InvalidRecordingGroupException()
# Can't have both the resource types specified and the other flags as True.
if rg.get('resourceTypes') and (
rg.get('allSupported', False) or
rg.get('includeGlobalResourceTypes', False)):
raise InvalidRecordingGroupException()
# Must supply resourceTypes if 'allSupported' is not supplied:
if not rg.get('allSupported') and not rg.get('resourceTypes'):
raise InvalidRecordingGroupException()
# Validate that the list provided is correct:
self._validate_resource_types(rg.get('resourceTypes', []))
recording_group = RecordingGroup(
all_supported=rg.get('allSupported', True),
include_global_resource_types=rg.get('includeGlobalResourceTypes', False),
resource_types=rg.get('resourceTypes', [])
)
self.recorders[config_recorder['name']] = \
ConfigRecorder(config_recorder['roleARN'], recording_group, name=config_recorder['name'],
status=recorder_status)
def describe_configuration_recorders(self, recorder_names):
recorders = []
if recorder_names:
for rn in recorder_names:
if not self.recorders.get(rn):
raise NoSuchConfigurationRecorderException(rn)
# Format the recorder:
recorders.append(self.recorders[rn].to_dict())
else:
for recorder in self.recorders.values():
recorders.append(recorder.to_dict())
return recorders
def describe_configuration_recorder_status(self, recorder_names):
recorders = []
if recorder_names:
for rn in recorder_names:
if not self.recorders.get(rn):
raise NoSuchConfigurationRecorderException(rn)
# Format the recorder:
recorders.append(self.recorders[rn].status.to_dict())
else:
for recorder in self.recorders.values():
recorders.append(recorder.status.to_dict())
return recorders
def put_delivery_channel(self, delivery_channel):
# Must have a configuration recorder:
if not self.recorders:
raise NoAvailableConfigurationRecorderException()
# Validate the name:
if not delivery_channel.get('name'):
raise InvalidDeliveryChannelNameException(delivery_channel.get('name'))
if len(delivery_channel.get('name')) > 256:
raise NameTooLongException(delivery_channel.get('name'), 'deliveryChannel.name')
# We are going to assume that the bucket exists -- but will verify if the bucket provided is blank:
if not delivery_channel.get('s3BucketName'):
raise NoSuchBucketException()
# We are going to assume that the bucket has the correct policy attached to it. We are only going to verify
# if the prefix provided is not an empty string:
if delivery_channel.get('s3KeyPrefix', None) == '':
raise InvalidS3KeyPrefixException()
# Ditto for SNS -- Only going to assume that the ARN provided is not an empty string:
if delivery_channel.get('snsTopicARN', None) == '':
raise InvalidSNSTopicARNException()
# Config currently only allows 1 delivery channel for an account:
if len(self.delivery_channels) == 1 and not self.delivery_channels.get(delivery_channel['name']):
raise MaxNumberOfDeliveryChannelsExceededException(delivery_channel['name'])
if not delivery_channel.get('configSnapshotDeliveryProperties'):
dp = None
else:
# Validate the config snapshot delivery properties:
self._validate_delivery_snapshot_properties(delivery_channel['configSnapshotDeliveryProperties'])
dp = ConfigDeliverySnapshotProperties(
delivery_channel['configSnapshotDeliveryProperties']['deliveryFrequency'])
self.delivery_channels[delivery_channel['name']] = \
ConfigDeliveryChannel(delivery_channel['name'], delivery_channel['s3BucketName'],
prefix=delivery_channel.get('s3KeyPrefix', None),
sns_arn=delivery_channel.get('snsTopicARN', None),
snapshot_properties=dp)
def describe_delivery_channels(self, channel_names):
channels = []
if channel_names:
for cn in channel_names:
if not self.delivery_channels.get(cn):
raise NoSuchDeliveryChannelException(cn)
# Format the delivery channel:
channels.append(self.delivery_channels[cn].to_dict())
else:
for channel in self.delivery_channels.values():
channels.append(channel.to_dict())
return channels
def start_configuration_recorder(self, recorder_name):
if not self.recorders.get(recorder_name):
raise NoSuchConfigurationRecorderException(recorder_name)
# Must have a delivery channel available as well:
if not self.delivery_channels:
raise NoAvailableDeliveryChannelException()
# Start recording:
self.recorders[recorder_name].status.start()
def stop_configuration_recorder(self, recorder_name):
if not self.recorders.get(recorder_name):
raise NoSuchConfigurationRecorderException(recorder_name)
# Stop recording:
self.recorders[recorder_name].status.stop()
def delete_configuration_recorder(self, recorder_name):
if not self.recorders.get(recorder_name):
raise NoSuchConfigurationRecorderException(recorder_name)
del self.recorders[recorder_name]
def delete_delivery_channel(self, channel_name):
if not self.delivery_channels.get(channel_name):
raise NoSuchDeliveryChannelException(channel_name)
# Check if a channel is recording -- if so, bad -- (there can only be 1 recorder):
for recorder in self.recorders.values():
if recorder.status.recording:
raise LastDeliveryChannelDeleteFailedException(channel_name)
del self.delivery_channels[channel_name]
config_backends = {}
boto3_session = Session()
for region in boto3_session.get_available_regions('config'):
config_backends[region] = ConfigBackend()

53
moto/config/responses.py Normal file
View File

@ -0,0 +1,53 @@
import json
from moto.core.responses import BaseResponse
from .models import config_backends
class ConfigResponse(BaseResponse):
@property
def config_backend(self):
return config_backends[self.region]
def put_configuration_recorder(self):
self.config_backend.put_configuration_recorder(self._get_param('ConfigurationRecorder'))
return ""
def describe_configuration_recorders(self):
recorders = self.config_backend.describe_configuration_recorders(self._get_param('ConfigurationRecorderNames'))
schema = {'ConfigurationRecorders': recorders}
return json.dumps(schema)
def describe_configuration_recorder_status(self):
recorder_statuses = self.config_backend.describe_configuration_recorder_status(
self._get_param('ConfigurationRecorderNames'))
schema = {'ConfigurationRecordersStatus': recorder_statuses}
return json.dumps(schema)
def put_delivery_channel(self):
self.config_backend.put_delivery_channel(self._get_param('DeliveryChannel'))
return ""
def describe_delivery_channels(self):
delivery_channels = self.config_backend.describe_delivery_channels(self._get_param('DeliveryChannelNames'))
schema = {'DeliveryChannels': delivery_channels}
return json.dumps(schema)
def describe_delivery_channel_status(self):
raise NotImplementedError()
def delete_delivery_channel(self):
self.config_backend.delete_delivery_channel(self._get_param('DeliveryChannelName'))
return ""
def delete_configuration_recorder(self):
self.config_backend.delete_configuration_recorder(self._get_param('ConfigurationRecorderName'))
return ""
def start_configuration_recorder(self):
self.config_backend.start_configuration_recorder(self._get_param('ConfigurationRecorderName'))
return ""
def stop_configuration_recorder(self):
self.config_backend.stop_configuration_recorder(self._get_param('ConfigurationRecorderName'))
return ""

10
moto/config/urls.py Normal file
View File

@ -0,0 +1,10 @@
from __future__ import unicode_literals
from .responses import ConfigResponse
url_bases = [
"https?://config.(.+).amazonaws.com",
]
url_paths = {
'{0}/$': ConfigResponse.dispatch,
}

View File

@ -152,11 +152,18 @@ class BaseResponse(_TemplateEnvironmentMixin):
for key, value in flat.items():
querystring[key] = [value]
elif self.body:
try:
querystring.update(parse_qs(raw_body, keep_blank_values=True))
except UnicodeEncodeError:
pass # ignore encoding errors, as the body may not contain a legitimate querystring
if not querystring:
querystring.update(headers)
try:
querystring = _decode_dict(querystring)
except UnicodeDecodeError:
pass # ignore decoding errors, as the body may not contain a legitimate querystring
self.uri = full_url
self.path = urlparse(full_url).path
self.querystring = querystring

View File

@ -280,7 +280,7 @@ def amzn_request_id(f):
# Update request ID in XML
try:
body = body.replace('{{ requestid }}', request_id)
body = re.sub(r'(?<=<RequestId>).*(?=<\/RequestId>)', request_id, body)
except Exception: # Will just ignore if it cant work on bytes (which are str's on python2)
pass

View File

@ -0,0 +1,2 @@
class InvalidIndexNameError(ValueError):
pass

View File

@ -13,6 +13,7 @@ from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time
from moto.core.exceptions import JsonRESTError
from .comparisons import get_comparison_func, get_filter_expression, Op
from .exceptions import InvalidIndexNameError
class DynamoJsonEncoder(json.JSONEncoder):
@ -66,6 +67,8 @@ class DynamoType(object):
return int(self.value)
except ValueError:
return float(self.value)
elif self.is_set():
return set(self.value)
else:
return self.value
@ -291,6 +294,19 @@ class Item(BaseModel):
# TODO: implement other data types
raise NotImplementedError(
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
elif action == 'DELETE':
if set(update_action['Value'].keys()) == set(['SS']):
existing = self.attrs.get(attribute_name, DynamoType({"SS": {}}))
new_set = set(existing.value).difference(set(new_value))
self.attrs[attribute_name] = DynamoType({
"SS": list(new_set)
})
else:
raise NotImplementedError(
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
else:
raise NotImplementedError(
'%s action not support for update_with_attribute_updates' % action)
class StreamRecord(BaseModel):
@ -401,6 +417,25 @@ class Table(BaseModel):
}
self.set_stream_specification(streams)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
params = {}
if 'KeySchema' in properties:
params['schema'] = properties['KeySchema']
if 'AttributeDefinitions' in properties:
params['attr'] = properties['AttributeDefinitions']
if 'GlobalSecondaryIndexes' in properties:
params['global_indexes'] = properties['GlobalSecondaryIndexes']
if 'ProvisionedThroughput' in properties:
params['throughput'] = properties['ProvisionedThroughput']
if 'LocalSecondaryIndexes' in properties:
params['indexes'] = properties['LocalSecondaryIndexes']
table = dynamodb_backends[region_name].create_table(name=properties['TableName'], **params)
return table
def _generate_arn(self, name):
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
@ -509,14 +544,11 @@ class Table(BaseModel):
elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value:
raise ValueError("The conditional request failed")
elif 'ComparisonOperator' in val:
comparison_func = get_comparison_func(
val['ComparisonOperator'])
dynamo_types = [
DynamoType(ele) for ele in
val.get("AttributeValueList", [])
]
for t in dynamo_types:
if not comparison_func(current_attr[key].value, t.value):
if not current_attr[key].compare(val['ComparisonOperator'], dynamo_types):
raise ValueError('The conditional request failed')
if range_value:
self.items[hash_value][range_value] = item
@ -571,8 +603,9 @@ class Table(BaseModel):
exclusive_start_key, scan_index_forward, projection_expression,
index_name=None, filter_expression=None, **filter_kwargs):
results = []
if index_name:
all_indexes = (self.global_indexes or []) + (self.indexes or [])
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
if index_name not in indexes_by_name:
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
@ -587,24 +620,28 @@ class Table(BaseModel):
raise ValueError('Missing Hash Key. KeySchema: %s' %
index['KeySchema'])
possible_results = []
for item in self.all_items():
if not isinstance(item, Item):
continue
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
if item_hash_key and item_hash_key == hash_key:
possible_results.append(item)
else:
possible_results = [item for item in list(self.all_items()) if isinstance(
item, Item) and item.hash_key == hash_key]
if index_name:
try:
index_range_key = [key for key in index[
'KeySchema'] if key['KeyType'] == 'RANGE'][0]
except IndexError:
index_range_key = None
possible_results = []
for item in self.all_items():
if not isinstance(item, Item):
continue
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
if index_range_key is None:
if item_hash_key and item_hash_key == hash_key:
possible_results.append(item)
else:
item_range_key = item.attrs.get(index_range_key['AttributeName'])
if item_hash_key and item_hash_key == hash_key and item_range_key:
possible_results.append(item)
else:
possible_results = [item for item in list(self.all_items()) if isinstance(
item, Item) and item.hash_key == hash_key]
if range_comparison:
if index_name and not index_range_key:
raise ValueError(
@ -668,11 +705,39 @@ class Table(BaseModel):
else:
yield hash_set
def scan(self, filters, limit, exclusive_start_key, filter_expression=None):
def all_indexes(self):
return (self.global_indexes or []) + (self.indexes or [])
def has_idx_items(self, index_name):
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
idx = indexes_by_name[index_name]
idx_col_set = set([i['AttributeName'] for i in idx['KeySchema']])
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
if idx_col_set.issubset(set(item.attrs)):
yield item
else:
if idx_col_set.issubset(set(hash_set.attrs)):
yield hash_set
def scan(self, filters, limit, exclusive_start_key, filter_expression=None, index_name=None, projection_expression=None):
results = []
scanned_count = 0
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
for item in self.all_items():
if index_name:
if index_name not in indexes_by_name:
raise InvalidIndexNameError('The table does not have the specified index: %s' % index_name)
items = self.has_idx_items(index_name)
else:
items = self.all_items()
for item in items:
scanned_count += 1
passes_all_conditions = True
for attribute_name, (comparison_operator, comparison_objs) in filters.items():
@ -698,11 +763,19 @@ class Table(BaseModel):
if passes_all_conditions:
results.append(item)
if projection_expression:
expressions = [x.strip() for x in projection_expression.split(',')]
results = copy.deepcopy(results)
for result in results:
for attr in list(result.attrs):
if attr not in expressions:
result.attrs.pop(attr)
results, last_evaluated_key = self._trim_results(results, limit,
exclusive_start_key)
exclusive_start_key, index_name)
return results, scanned_count, last_evaluated_key
def _trim_results(self, results, limit, exclusive_start_key):
def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None):
if exclusive_start_key is not None:
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
range_key = exclusive_start_key.get(self.range_key_attr)
@ -722,6 +795,14 @@ class Table(BaseModel):
if results[-1].range_key is not None:
last_evaluated_key[self.range_key_attr] = results[-1].range_key
if scaned_index:
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
idx = indexes_by_name[scaned_index]
idx_col_list = [i['AttributeName'] for i in idx['KeySchema']]
for col in idx_col_list:
last_evaluated_key[col] = results[-1].attrs[col]
return results, last_evaluated_key
def lookup(self, *args, **kwargs):
@ -889,7 +970,7 @@ class DynamoDBBackend(BaseBackend):
return table.query(hash_key, range_comparison, range_values, limit,
exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs)
def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values):
def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name, projection_expression):
table = self.tables.get(table_name)
if not table:
return None, None, None
@ -904,7 +985,9 @@ class DynamoDBBackend(BaseBackend):
else:
filter_expression = Op(None, None) # Will always eval to true
return table.scan(scan_filters, limit, exclusive_start_key, filter_expression)
projection_expression = ','.join([expr_names.get(attr, attr) for attr in projection_expression.replace(' ', '').split(',')])
return table.scan(scan_filters, limit, exclusive_start_key, filter_expression, index_name, projection_expression)
def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names,
expression_attribute_values, expected=None):
@ -946,14 +1029,11 @@ class DynamoDBBackend(BaseBackend):
elif 'Value' in val and DynamoType(val['Value']).value != item_attr[key].value:
raise ValueError("The conditional request failed")
elif 'ComparisonOperator' in val:
comparison_func = get_comparison_func(
val['ComparisonOperator'])
dynamo_types = [
DynamoType(ele) for ele in
val.get("AttributeValueList", [])
]
for t in dynamo_types:
if not comparison_func(item_attr[key].value, t.value):
if not item_attr[key].compare(val['ComparisonOperator'], dynamo_types):
raise ValueError('The conditional request failed')
# Update does not fail on new items, so create one

View File

@ -5,6 +5,7 @@ import re
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores, amzn_request_id
from .exceptions import InvalidIndexNameError
from .models import dynamodb_backends, dynamo_json_dump
@ -31,6 +32,67 @@ def get_empty_str_error():
))
def condition_expression_to_expected(condition_expression, expression_attribute_names, expression_attribute_values):
"""
Limited condition expression syntax parsing.
Supports Global Negation ex: NOT(inner expressions).
Supports simple AND conditions ex: cond_a AND cond_b and cond_c.
Atomic expressions supported are attribute_exists(key), attribute_not_exists(key) and #key = :value.
"""
expected = {}
if condition_expression and 'OR' not in condition_expression:
reverse_re = re.compile('^NOT\s*\((.*)\)$')
reverse_m = reverse_re.match(condition_expression.strip())
reverse = False
if reverse_m:
reverse = True
condition_expression = reverse_m.group(1)
cond_items = [c.strip() for c in condition_expression.split('AND')]
if cond_items:
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
not_exists_re = re.compile(
'^attribute_not_exists\s*\((.*)\)$')
equals_re = re.compile('^(#?\w+)\s*=\s*(\:?\w+)')
for cond in cond_items:
exists_m = exists_re.match(cond)
not_exists_m = not_exists_re.match(cond)
equals_m = equals_re.match(cond)
if exists_m:
attribute_name = expression_attribute_names_lookup(exists_m.group(1), expression_attribute_names)
expected[attribute_name] = {'Exists': True if not reverse else False}
elif not_exists_m:
attribute_name = expression_attribute_names_lookup(not_exists_m.group(1), expression_attribute_names)
expected[attribute_name] = {'Exists': False if not reverse else True}
elif equals_m:
attribute_name = expression_attribute_names_lookup(equals_m.group(1), expression_attribute_names)
attribute_value = expression_attribute_values_lookup(equals_m.group(2), expression_attribute_values)
expected[attribute_name] = {
'AttributeValueList': [attribute_value],
'ComparisonOperator': 'EQ' if not reverse else 'NEQ'}
return expected
def expression_attribute_names_lookup(attribute_name, expression_attribute_names):
if attribute_name.startswith('#') and attribute_name in expression_attribute_names:
return expression_attribute_names[attribute_name]
else:
return attribute_name
def expression_attribute_values_lookup(attribute_value, expression_attribute_values):
if isinstance(attribute_value, six.string_types) and \
attribute_value.startswith(':') and\
attribute_value in expression_attribute_values:
return expression_attribute_values[attribute_value]
else:
return attribute_value
class DynamoHandler(BaseResponse):
def get_endpoint_name(self, headers):
@ -95,8 +157,16 @@ class DynamoHandler(BaseResponse):
body = self.body
# get the table name
table_name = body['TableName']
# get the throughput
throughput = body["ProvisionedThroughput"]
# check billing mode and get the throughput
if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST":
if "ProvisionedThroughput" in body.keys():
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
return self.error(er,
'ProvisionedThroughput cannot be specified \
when BillingMode is PAY_PER_REQUEST')
throughput = None
else: # Provisioned (default billing mode)
throughput = body.get("ProvisionedThroughput")
# getting the schema
key_schema = body['KeySchema']
# getting attribute definition
@ -220,24 +290,13 @@ class DynamoHandler(BaseResponse):
# expression
if not expected:
condition_expression = self.body.get('ConditionExpression')
if condition_expression and 'OR' not in condition_expression:
cond_items = [c.strip()
for c in condition_expression.split('AND')]
if cond_items:
expected = {}
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
expected = condition_expression_to_expected(condition_expression,
expression_attribute_names,
expression_attribute_values)
if expected:
overwrite = False
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
not_exists_re = re.compile(
'^attribute_not_exists\s*\((.*)\)$')
for cond in cond_items:
exists_m = exists_re.match(cond)
not_exists_m = not_exists_re.match(cond)
if exists_m:
expected[exists_m.group(1)] = {'Exists': True}
elif not_exists_m:
expected[not_exists_m.group(1)] = {'Exists': False}
try:
result = self.dynamodb_backend.put_item(name, item, expected, overwrite)
@ -499,9 +558,10 @@ class DynamoHandler(BaseResponse):
filter_expression = self.body.get('FilterExpression')
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
projection_expression = self.body.get('ProjectionExpression', '')
exclusive_start_key = self.body.get('ExclusiveStartKey')
limit = self.body.get("Limit")
index_name = self.body.get('IndexName')
try:
items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters,
@ -509,7 +569,12 @@ class DynamoHandler(BaseResponse):
exclusive_start_key,
filter_expression,
expression_attribute_names,
expression_attribute_values)
expression_attribute_values,
index_name,
projection_expression)
except InvalidIndexNameError as err:
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
return self.error(er, str(err))
except ValueError as err:
er = 'com.amazonaws.dynamodb.v20111205#ValidationError'
return self.error(er, 'Bad Filter Expression: {0}'.format(err))
@ -590,23 +655,11 @@ class DynamoHandler(BaseResponse):
# expression
if not expected:
condition_expression = self.body.get('ConditionExpression')
if condition_expression and 'OR' not in condition_expression:
cond_items = [c.strip()
for c in condition_expression.split('AND')]
if cond_items:
expected = {}
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
not_exists_re = re.compile(
'^attribute_not_exists\s*\((.*)\)$')
for cond in cond_items:
exists_m = exists_re.match(cond)
not_exists_m = not_exists_re.match(cond)
if exists_m:
expected[exists_m.group(1)] = {'Exists': True}
elif not_exists_m:
expected[not_exists_m.group(1)] = {'Exists': False}
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
expected = condition_expression_to_expected(condition_expression,
expression_attribute_names,
expression_attribute_values)
# Support spaces between operators in an update expression
# E.g. `a = b + c` -> `a=b+c`

View File

@ -58,6 +58,14 @@ class InvalidKeyPairDuplicateError(EC2ClientError):
.format(key))
class InvalidKeyPairFormatError(EC2ClientError):
def __init__(self):
super(InvalidKeyPairFormatError, self).__init__(
"InvalidKeyPair.Format",
"Key is not in valid OpenSSH public key format")
class InvalidVPCIdError(EC2ClientError):
def __init__(self, vpc_id):
@ -324,6 +332,15 @@ class InvalidParameterValueErrorTagNull(EC2ClientError):
"Tag value cannot be null. Use empty string instead.")
class InvalidParameterValueErrorUnknownAttribute(EC2ClientError):
def __init__(self, parameter_value):
super(InvalidParameterValueErrorUnknownAttribute, self).__init__(
"InvalidParameterValue",
"Value ({0}) for parameter attribute is invalid. Unknown attribute."
.format(parameter_value))
class InvalidInternetGatewayIdError(EC2ClientError):
def __init__(self, internet_gateway_id):
@ -420,3 +437,89 @@ class OperationNotPermitted(EC2ClientError):
"The vpc CIDR block with association ID {} may not be disassociated. "
"It is the primary IPv4 CIDR block of the VPC".format(association_id)
)
class InvalidAvailabilityZoneError(EC2ClientError):
def __init__(self, availability_zone_value, valid_availability_zones):
super(InvalidAvailabilityZoneError, self).__init__(
"InvalidParameterValue",
"Value ({0}) for parameter availabilityZone is invalid. "
"Subnets can currently only be created in the following availability zones: {1}.".format(availability_zone_value, valid_availability_zones)
)
class NetworkAclEntryAlreadyExistsError(EC2ClientError):
def __init__(self, rule_number):
super(NetworkAclEntryAlreadyExistsError, self).__init__(
"NetworkAclEntryAlreadyExists",
"The network acl entry identified by {} already exists.".format(rule_number)
)
class InvalidSubnetRangeError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidSubnetRangeError, self).__init__(
"InvalidSubnet.Range",
"The CIDR '{}' is invalid.".format(cidr_block)
)
class InvalidCIDRBlockParameterError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidCIDRBlockParameterError, self).__init__(
"InvalidParameterValue",
"Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block)
)
class InvalidDestinationCIDRBlockParameterError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidDestinationCIDRBlockParameterError, self).__init__(
"InvalidParameterValue",
"Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block)
)
class InvalidSubnetConflictError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidSubnetConflictError, self).__init__(
"InvalidSubnet.Conflict",
"The CIDR '{}' conflicts with another subnet".format(cidr_block)
)
class InvalidVPCRangeError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidVPCRangeError, self).__init__(
"InvalidVpc.Range",
"The CIDR '{}' is invalid.".format(cidr_block)
)
# accept exception
class OperationNotPermitted2(EC2ClientError):
def __init__(self, client_region, pcx_id, acceptor_region):
super(OperationNotPermitted2, self).__init__(
"OperationNotPermitted",
"Incorrect region ({0}) specified for this request."
"VPC peering connection {1} must be accepted in region {2}".format(client_region, pcx_id, acceptor_region)
)
# reject exception
class OperationNotPermitted3(EC2ClientError):
def __init__(self, client_region, pcx_id, acceptor_region):
super(OperationNotPermitted3, self).__init__(
"OperationNotPermitted",
"Incorrect region ({0}) specified for this request."
"VPC peering connection {1} must be accepted or rejected in region {2}".format(client_region,
pcx_id,
acceptor_region)
)

300
moto/ec2/models.py Executable file → Normal file
View File

@ -20,6 +20,7 @@ from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest
from boto.ec2.launchspecification import LaunchSpecification
from moto.compat import OrderedDict
from moto.core import BaseBackend
from moto.core.models import Model, BaseModel
@ -35,20 +36,25 @@ from .exceptions import (
InvalidAMIIdError,
InvalidAMIAttributeItemValueError,
InvalidAssociationIdError,
InvalidAvailabilityZoneError,
InvalidCIDRBlockParameterError,
InvalidCIDRSubnetError,
InvalidCustomerGatewayIdError,
InvalidDestinationCIDRBlockParameterError,
InvalidDHCPOptionsIdError,
InvalidDomainError,
InvalidID,
InvalidInstanceIdError,
InvalidInternetGatewayIdError,
InvalidKeyPairDuplicateError,
InvalidKeyPairFormatError,
InvalidKeyPairNameError,
InvalidNetworkAclIdError,
InvalidNetworkAttachmentIdError,
InvalidNetworkInterfaceIdError,
InvalidParameterValueError,
InvalidParameterValueErrorTagNull,
InvalidParameterValueErrorUnknownAttribute,
InvalidPermissionNotFoundError,
InvalidPermissionDuplicateError,
InvalidRouteTableIdError,
@ -56,20 +62,26 @@ from .exceptions import (
InvalidSecurityGroupDuplicateError,
InvalidSecurityGroupNotFoundError,
InvalidSnapshotIdError,
InvalidSubnetConflictError,
InvalidSubnetIdError,
InvalidSubnetRangeError,
InvalidVolumeIdError,
InvalidVolumeAttachmentError,
InvalidVpcCidrBlockAssociationIdError,
InvalidVPCPeeringConnectionIdError,
InvalidVPCPeeringConnectionStateTransitionError,
InvalidVPCIdError,
InvalidVPCRangeError,
InvalidVpnGatewayIdError,
InvalidVpnConnectionIdError,
MalformedAMIIdError,
MalformedDHCPOptionsIdError,
MissingParameterError,
MotoNotImplementedError,
NetworkAclEntryAlreadyExistsError,
OperationNotPermitted,
OperationNotPermitted2,
OperationNotPermitted3,
ResourceAlreadyAssociatedError,
RulesPerSecurityGroupLimitExceededError,
TagLimitExceeded)
@ -118,6 +130,8 @@ from .utils import (
random_customer_gateway_id,
is_tag_filter,
tag_filter_matches,
rsa_public_key_parse,
rsa_public_key_fingerprint
)
INSTANCE_TYPES = json.load(
@ -134,6 +148,8 @@ def utc_date_and_time():
def validate_resource_ids(resource_ids):
if not resource_ids:
raise MissingParameterError(parameter='resourceIdSet')
for resource_id in resource_ids:
if not is_valid_resource_id(resource_id):
raise InvalidID(resource_id=resource_id)
@ -189,7 +205,7 @@ class NetworkInterface(TaggedEC2Resource):
self.ec2_backend = ec2_backend
self.id = random_eni_id()
self.device_index = device_index
self.private_ip_address = private_ip_address
self.private_ip_address = private_ip_address or random_private_ip()
self.subnet = subnet
self.instance = None
self.attachment_id = None
@ -368,6 +384,10 @@ class NetworkInterfaceBackend(object):
class Instance(TaggedEC2Resource, BotoInstance):
VALID_ATTRIBUTES = {'instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination',
'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping',
'productCodes', 'sourceDestCheck', 'groupSet', 'ebsOptimized', 'sriovNetSupport'}
def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs):
super(Instance, self).__init__()
self.ec2_backend = ec2_backend
@ -388,7 +408,10 @@ class Instance(TaggedEC2Resource, BotoInstance):
self.ebs_optimized = kwargs.get("ebs_optimized", False)
self.source_dest_check = "true"
self.launch_time = utc_date_and_time()
self.ami_launch_index = kwargs.get("ami_launch_index", 0)
self.disable_api_termination = kwargs.get("disable_api_termination", False)
self.instance_initiated_shutdown_behavior = kwargs.get("instance_initiated_shutdown_behavior", "stop")
self.sriov_net_support = "simple"
self._spot_fleet_id = kwargs.get("spot_fleet_id", None)
associate_public_ip = kwargs.get("associate_public_ip", False)
if in_ec2_classic:
@ -719,6 +742,7 @@ class InstanceBackend(object):
instance_tags = tags.get('instance', {})
for index in range(count):
kwargs["ami_launch_index"] = index
new_instance = Instance(
self,
image_id,
@ -771,14 +795,22 @@ class InstanceBackend(object):
setattr(instance, key, value)
return instance
def modify_instance_security_groups(self, instance_id, new_group_list):
def modify_instance_security_groups(self, instance_id, new_group_id_list):
instance = self.get_instance(instance_id)
new_group_list = []
for new_group_id in new_group_id_list:
new_group_list.append(self.get_security_group_from_id(new_group_id))
setattr(instance, 'security_groups', new_group_list)
return instance
def describe_instance_attribute(self, instance_id, key):
if key == 'group_set':
def describe_instance_attribute(self, instance_id, attribute):
if attribute not in Instance.VALID_ATTRIBUTES:
raise InvalidParameterValueErrorUnknownAttribute(attribute)
if attribute == 'groupSet':
key = 'security_groups'
else:
key = camelcase_to_underscores(attribute)
instance = self.get_instance(instance_id)
value = getattr(instance, key)
return instance, value
@ -904,7 +936,14 @@ class KeyPairBackend(object):
def import_key_pair(self, key_name, public_key_material):
if key_name in self.keypairs:
raise InvalidKeyPairDuplicateError(key_name)
keypair = KeyPair(key_name, **random_key_pair())
try:
rsa_public_key = rsa_public_key_parse(public_key_material)
except ValueError:
raise InvalidKeyPairFormatError()
fingerprint = rsa_public_key_fingerprint(rsa_public_key)
keypair = KeyPair(key_name, material=public_key_material, fingerprint=fingerprint)
self.keypairs[key_name] = keypair
return keypair
@ -1265,17 +1304,107 @@ class Region(object):
class Zone(object):
def __init__(self, name, region_name):
def __init__(self, name, region_name, zone_id):
self.name = name
self.region_name = region_name
self.zone_id = zone_id
class RegionsAndZonesBackend(object):
regions = [Region(ri.name, ri.endpoint) for ri in boto.ec2.regions()]
zones = dict(
(region, [Zone(region + c, region) for c in 'abc'])
for region in [r.name for r in regions])
zones = {
'ap-south-1': [
Zone(region_name="ap-south-1", name="ap-south-1a", zone_id="aps1-az1"),
Zone(region_name="ap-south-1", name="ap-south-1b", zone_id="aps1-az3")
],
'eu-west-3': [
Zone(region_name="eu-west-3", name="eu-west-3a", zone_id="euw3-az1"),
Zone(region_name="eu-west-3", name="eu-west-3b", zone_id="euw3-az2"),
Zone(region_name="eu-west-3", name="eu-west-3c", zone_id="euw3-az3")
],
'eu-north-1': [
Zone(region_name="eu-north-1", name="eu-north-1a", zone_id="eun1-az1"),
Zone(region_name="eu-north-1", name="eu-north-1b", zone_id="eun1-az2"),
Zone(region_name="eu-north-1", name="eu-north-1c", zone_id="eun1-az3")
],
'eu-west-2': [
Zone(region_name="eu-west-2", name="eu-west-2a", zone_id="euw2-az2"),
Zone(region_name="eu-west-2", name="eu-west-2b", zone_id="euw2-az3"),
Zone(region_name="eu-west-2", name="eu-west-2c", zone_id="euw2-az1")
],
'eu-west-1': [
Zone(region_name="eu-west-1", name="eu-west-1a", zone_id="euw1-az3"),
Zone(region_name="eu-west-1", name="eu-west-1b", zone_id="euw1-az1"),
Zone(region_name="eu-west-1", name="eu-west-1c", zone_id="euw1-az2")
],
'ap-northeast-3': [
Zone(region_name="ap-northeast-3", name="ap-northeast-2a", zone_id="apne3-az1")
],
'ap-northeast-2': [
Zone(region_name="ap-northeast-2", name="ap-northeast-2a", zone_id="apne2-az1"),
Zone(region_name="ap-northeast-2", name="ap-northeast-2c", zone_id="apne2-az3")
],
'ap-northeast-1': [
Zone(region_name="ap-northeast-1", name="ap-northeast-1a", zone_id="apne1-az4"),
Zone(region_name="ap-northeast-1", name="ap-northeast-1c", zone_id="apne1-az1"),
Zone(region_name="ap-northeast-1", name="ap-northeast-1d", zone_id="apne1-az2")
],
'sa-east-1': [
Zone(region_name="sa-east-1", name="sa-east-1a", zone_id="sae1-az1"),
Zone(region_name="sa-east-1", name="sa-east-1c", zone_id="sae1-az3")
],
'ca-central-1': [
Zone(region_name="ca-central-1", name="ca-central-1a", zone_id="cac1-az1"),
Zone(region_name="ca-central-1", name="ca-central-1b", zone_id="cac1-az2")
],
'ap-southeast-1': [
Zone(region_name="ap-southeast-1", name="ap-southeast-1a", zone_id="apse1-az1"),
Zone(region_name="ap-southeast-1", name="ap-southeast-1b", zone_id="apse1-az2"),
Zone(region_name="ap-southeast-1", name="ap-southeast-1c", zone_id="apse1-az3")
],
'ap-southeast-2': [
Zone(region_name="ap-southeast-2", name="ap-southeast-2a", zone_id="apse2-az1"),
Zone(region_name="ap-southeast-2", name="ap-southeast-2b", zone_id="apse2-az3"),
Zone(region_name="ap-southeast-2", name="ap-southeast-2c", zone_id="apse2-az2")
],
'eu-central-1': [
Zone(region_name="eu-central-1", name="eu-central-1a", zone_id="euc1-az2"),
Zone(region_name="eu-central-1", name="eu-central-1b", zone_id="euc1-az3"),
Zone(region_name="eu-central-1", name="eu-central-1c", zone_id="euc1-az1")
],
'us-east-1': [
Zone(region_name="us-east-1", name="us-east-1a", zone_id="use1-az6"),
Zone(region_name="us-east-1", name="us-east-1b", zone_id="use1-az1"),
Zone(region_name="us-east-1", name="us-east-1c", zone_id="use1-az2"),
Zone(region_name="us-east-1", name="us-east-1d", zone_id="use1-az4"),
Zone(region_name="us-east-1", name="us-east-1e", zone_id="use1-az3"),
Zone(region_name="us-east-1", name="us-east-1f", zone_id="use1-az5")
],
'us-east-2': [
Zone(region_name="us-east-2", name="us-east-2a", zone_id="use2-az1"),
Zone(region_name="us-east-2", name="us-east-2b", zone_id="use2-az2"),
Zone(region_name="us-east-2", name="us-east-2c", zone_id="use2-az3")
],
'us-west-1': [
Zone(region_name="us-west-1", name="us-west-1a", zone_id="usw1-az3"),
Zone(region_name="us-west-1", name="us-west-1b", zone_id="usw1-az1")
],
'us-west-2': [
Zone(region_name="us-west-2", name="us-west-2a", zone_id="usw2-az2"),
Zone(region_name="us-west-2", name="us-west-2b", zone_id="usw2-az1"),
Zone(region_name="us-west-2", name="us-west-2c", zone_id="usw2-az3")
],
'cn-north-1': [
Zone(region_name="cn-north-1", name="cn-north-1a", zone_id="cnn1-az1"),
Zone(region_name="cn-north-1", name="cn-north-1b", zone_id="cnn1-az2")
],
'us-gov-west-1': [
Zone(region_name="us-gov-west-1", name="us-gov-west-1a", zone_id="usgw1-az1"),
Zone(region_name="us-gov-west-1", name="us-gov-west-1b", zone_id="usgw1-az2"),
Zone(region_name="us-gov-west-1", name="us-gov-west-1c", zone_id="usgw1-az3")
]
}
def describe_regions(self, region_names=[]):
if len(region_names) == 0:
@ -1875,6 +2004,8 @@ class Snapshot(TaggedEC2Resource):
return str(self.encrypted).lower()
elif filter_name == 'status':
return self.status
elif filter_name == 'owner-id':
return self.owner_id
else:
return super(Snapshot, self).get_filter_value(
filter_name, 'DescribeSnapshots')
@ -2116,22 +2247,28 @@ class VPC(TaggedEC2Resource):
class VPCBackend(object):
__refs__ = defaultdict(list)
vpc_refs = defaultdict(set)
def __init__(self):
self.vpcs = {}
self.__refs__[self.__class__].append(weakref.ref(self))
self.vpc_refs[self.__class__].add(weakref.ref(self))
super(VPCBackend, self).__init__()
@classmethod
def get_instances(cls):
for inst_ref in cls.__refs__[cls]:
def get_vpc_refs(cls):
for inst_ref in cls.vpc_refs[cls]:
inst = inst_ref()
if inst is not None:
yield inst
def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False):
vpc_id = random_vpc_id()
try:
vpc_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False)
except ValueError:
raise InvalidCIDRBlockParameterError(cidr_block)
if vpc_cidr_block.prefixlen < 16 or vpc_cidr_block.prefixlen > 28:
raise InvalidVPCRangeError(cidr_block)
vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block)
self.vpcs[vpc_id] = vpc
@ -2155,7 +2292,7 @@ class VPCBackend(object):
# get vpc by vpc id and aws region
def get_cross_vpc(self, vpc_id, peer_region):
for vpcs in self.get_instances():
for vpcs in self.get_vpc_refs():
if vpcs.region_name == peer_region:
match_vpc = vpcs.get_vpc(vpc_id)
return match_vpc
@ -2276,15 +2413,31 @@ class VPCPeeringConnection(TaggedEC2Resource):
class VPCPeeringConnectionBackend(object):
# for cross region vpc reference
vpc_pcx_refs = defaultdict(set)
def __init__(self):
self.vpc_pcxs = {}
self.vpc_pcx_refs[self.__class__].add(weakref.ref(self))
super(VPCPeeringConnectionBackend, self).__init__()
@classmethod
def get_vpc_pcx_refs(cls):
for inst_ref in cls.vpc_pcx_refs[cls]:
inst = inst_ref()
if inst is not None:
yield inst
def create_vpc_peering_connection(self, vpc, peer_vpc):
vpc_pcx_id = random_vpc_peering_connection_id()
vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc)
vpc_pcx._status.pending()
self.vpc_pcxs[vpc_pcx_id] = vpc_pcx
# insert cross region peering info
if vpc.ec2_backend.region_name != peer_vpc.ec2_backend.region_name:
for vpc_pcx_cx in peer_vpc.ec2_backend.get_vpc_pcx_refs():
if vpc_pcx_cx.region_name == peer_vpc.ec2_backend.region_name:
vpc_pcx_cx.vpc_pcxs[vpc_pcx_id] = vpc_pcx
return vpc_pcx
def get_all_vpc_peering_connections(self):
@ -2302,6 +2455,11 @@ class VPCPeeringConnectionBackend(object):
def accept_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
# if cross region need accepter from another region
pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name
pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name
if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region:
raise OperationNotPermitted2(self.region_name, vpc_pcx.id, pcx_acp_region)
if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.accept()
@ -2309,6 +2467,11 @@ class VPCPeeringConnectionBackend(object):
def reject_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
# if cross region need accepter from another region
pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name
pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name
if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region:
raise OperationNotPermitted3(self.region_name, vpc_pcx.id, pcx_acp_region)
if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.reject()
@ -2317,15 +2480,18 @@ class VPCPeeringConnectionBackend(object):
class Subnet(TaggedEC2Resource):
def __init__(self, ec2_backend, subnet_id, vpc_id, cidr_block, availability_zone, default_for_az,
map_public_ip_on_launch):
map_public_ip_on_launch, owner_id=111122223333, assign_ipv6_address_on_creation=False):
self.ec2_backend = ec2_backend
self.id = subnet_id
self.vpc_id = vpc_id
self.cidr_block = cidr_block
self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block))
self.cidr = ipaddress.IPv4Network(six.text_type(self.cidr_block), strict=False)
self._availability_zone = availability_zone
self.default_for_az = default_for_az
self.map_public_ip_on_launch = map_public_ip_on_launch
self.owner_id = owner_id
self.assign_ipv6_address_on_creation = assign_ipv6_address_on_creation
self.ipv6_cidr_block_associations = []
# Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8)
self._subnet_ip_generator = self.cidr.hosts()
@ -2355,7 +2521,7 @@ class Subnet(TaggedEC2Resource):
@property
def availability_zone(self):
return self._availability_zone
return self._availability_zone.name
@property
def physical_resource_id(self):
@ -2452,19 +2618,38 @@ class SubnetBackend(object):
return subnets[subnet_id]
raise InvalidSubnetIdError(subnet_id)
def create_subnet(self, vpc_id, cidr_block, availability_zone):
def create_subnet(self, vpc_id, cidr_block, availability_zone, context=None):
subnet_id = random_subnet_id()
self.get_vpc(vpc_id) # Validate VPC exists
vpc = self.get_vpc(vpc_id) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's
vpc_cidr_block = ipaddress.IPv4Network(six.text_type(vpc.cidr_block), strict=False)
try:
subnet_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False)
except ValueError:
raise InvalidCIDRBlockParameterError(cidr_block)
if not (vpc_cidr_block.network_address <= subnet_cidr_block.network_address and
vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address):
raise InvalidSubnetRangeError(cidr_block)
for subnet in self.get_all_subnets(filters={'vpc-id': vpc_id}):
if subnet.cidr.overlaps(subnet_cidr_block):
raise InvalidSubnetConflictError(cidr_block)
# if this is the first subnet for an availability zone,
# consider it the default
default_for_az = str(availability_zone not in self.subnets).lower()
map_public_ip_on_launch = default_for_az
subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone,
default_for_az, map_public_ip_on_launch)
if availability_zone is None:
availability_zone = 'us-east-1a'
try:
availability_zone_data = next(zone for zones in RegionsAndZonesBackend.zones.values() for zone in zones if zone.name == availability_zone)
except StopIteration:
raise InvalidAvailabilityZoneError(availability_zone, ", ".join([zone.name for zones in RegionsAndZonesBackend.zones.values() for zone in zones]))
subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone_data,
default_for_az, map_public_ip_on_launch,
owner_id=context.get_current_user() if context else '111122223333', assign_ipv6_address_on_creation=False)
# AWS associates a new subnet with the default Network ACL
self.associate_default_network_acl_with_subnet(subnet_id)
self.associate_default_network_acl_with_subnet(subnet_id, vpc_id)
self.subnets[availability_zone][subnet_id] = subnet
return subnet
@ -2489,11 +2674,12 @@ class SubnetBackend(object):
return subnets.pop(subnet_id, None)
raise InvalidSubnetIdError(subnet_id)
def modify_subnet_attribute(self, subnet_id, map_public_ip):
def modify_subnet_attribute(self, subnet_id, attr_name, attr_value):
subnet = self.get_subnet(subnet_id)
if map_public_ip not in ('true', 'false'):
raise InvalidParameterValueError(map_public_ip)
subnet.map_public_ip_on_launch = map_public_ip
if attr_name in ('map_public_ip_on_launch', 'assign_ipv6_address_on_creation'):
setattr(subnet, attr_name, attr_value)
else:
raise InvalidParameterValueError(attr_name)
class SubnetRouteTableAssociation(object):
@ -2714,6 +2900,11 @@ class RouteBackend(object):
elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id:
gateway = self.get_internet_gateway(gateway_id)
try:
ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False)
except ValueError:
raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block)
route = Route(route_table, destination_cidr_block, local=local,
gateway=gateway,
instance=self.get_instance(
@ -2879,7 +3070,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
def __init__(self, ec2_backend, spot_request_id, price, image_id, type,
valid_from, valid_until, launch_group, availability_zone_group,
key_name, security_groups, user_data, instance_type, placement,
kernel_id, ramdisk_id, monitoring_enabled, subnet_id, spot_fleet_id,
kernel_id, ramdisk_id, monitoring_enabled, subnet_id, tags, spot_fleet_id,
**kwargs):
super(SpotInstanceRequest, self).__init__(**kwargs)
ls = LaunchSpecification()
@ -2903,6 +3094,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
ls.monitored = monitoring_enabled
ls.subnet_id = subnet_id
self.spot_fleet_id = spot_fleet_id
self.tags = tags
if security_groups:
for group_name in security_groups:
@ -2936,6 +3128,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
security_group_names=[],
security_group_ids=self.launch_specification.groups,
spot_fleet_id=self.spot_fleet_id,
tags=self.tags,
)
instance = reservation.instances[0]
return instance
@ -2951,15 +3144,16 @@ class SpotRequestBackend(object):
valid_until, launch_group, availability_zone_group,
key_name, security_groups, user_data,
instance_type, placement, kernel_id, ramdisk_id,
monitoring_enabled, subnet_id, spot_fleet_id=None):
monitoring_enabled, subnet_id, tags=None, spot_fleet_id=None):
requests = []
tags = tags or {}
for _ in range(count):
spot_request_id = random_spot_request_id()
request = SpotInstanceRequest(self,
spot_request_id, price, image_id, type, valid_from, valid_until,
launch_group, availability_zone_group, key_name, security_groups,
user_data, instance_type, placement, kernel_id, ramdisk_id,
monitoring_enabled, subnet_id, spot_fleet_id)
monitoring_enabled, subnet_id, tags, spot_fleet_id)
self.spot_instance_requests[spot_request_id] = request
requests.append(request)
return requests
@ -2979,8 +3173,8 @@ class SpotRequestBackend(object):
class SpotFleetLaunchSpec(object):
def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id,
instance_type, key_name, monitoring, spot_price, subnet_id, user_data,
weighted_capacity):
instance_type, key_name, monitoring, spot_price, subnet_id, tag_specifications,
user_data, weighted_capacity):
self.ebs_optimized = ebs_optimized
self.group_set = group_set
self.iam_instance_profile = iam_instance_profile
@ -2990,6 +3184,7 @@ class SpotFleetLaunchSpec(object):
self.monitoring = monitoring
self.spot_price = spot_price
self.subnet_id = subnet_id
self.tag_specifications = tag_specifications
self.user_data = user_data
self.weighted_capacity = float(weighted_capacity)
@ -3020,6 +3215,7 @@ class SpotFleetRequest(TaggedEC2Resource):
monitoring=spec.get('monitoring._enabled'),
spot_price=spec.get('spot_price', self.spot_price),
subnet_id=spec['subnet_id'],
tag_specifications=self._parse_tag_specifications(spec),
user_data=spec.get('user_data'),
weighted_capacity=spec['weighted_capacity'],
)
@ -3102,6 +3298,7 @@ class SpotFleetRequest(TaggedEC2Resource):
monitoring_enabled=launch_spec.monitoring,
subnet_id=launch_spec.subnet_id,
spot_fleet_id=self.id,
tags=launch_spec.tag_specifications,
)
self.spot_requests.extend(requests)
self.fulfilled_capacity += added_weight
@ -3124,6 +3321,25 @@ class SpotFleetRequest(TaggedEC2Resource):
self.spot_requests = [req for req in self.spot_requests if req.instance.id not in instance_ids]
self.ec2_backend.terminate_instances(instance_ids)
def _parse_tag_specifications(self, spec):
try:
tag_spec_num = max([int(key.split('.')[1]) for key in spec if key.startswith("tag_specification_set")])
except ValueError: # no tag specifications
return {}
tag_specifications = {}
for si in range(1, tag_spec_num + 1):
resource_type = spec["tag_specification_set.{si}._resource_type".format(si=si)]
tags = [key for key in spec if key.startswith("tag_specification_set.{si}._tag".format(si=si))]
tag_num = max([int(key.split('.')[3]) for key in tags])
tag_specifications[resource_type] = dict((
spec["tag_specification_set.{si}._tag.{ti}._key".format(si=si, ti=ti)],
spec["tag_specification_set.{si}._tag.{ti}._value".format(si=si, ti=ti)],
) for ti in range(1, tag_num + 1))
return tag_specifications
class SpotFleetBackend(object):
def __init__(self):
@ -3560,8 +3776,22 @@ class NetworkAclBackend(object):
self.get_vpc(vpc_id)
network_acl = NetworkAcl(self, network_acl_id, vpc_id, default)
self.network_acls[network_acl_id] = network_acl
if default:
self.add_default_entries(network_acl_id)
return network_acl
def add_default_entries(self, network_acl_id):
default_acl_entries = [
{'rule_number': "100", 'rule_action': 'allow', 'egress': 'true'},
{'rule_number': "32767", 'rule_action': 'deny', 'egress': 'true'},
{'rule_number': "100", 'rule_action': 'allow', 'egress': 'false'},
{'rule_number': "32767", 'rule_action': 'deny', 'egress': 'false'}
]
for entry in default_acl_entries:
self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1',
rule_action=entry['rule_action'], egress=entry['egress'], cidr_block='0.0.0.0/0',
icmp_code=None, icmp_type=None, port_range_from=None, port_range_to=None)
def get_all_network_acls(self, network_acl_ids=None, filters=None):
network_acls = self.network_acls.values()
@ -3586,12 +3816,14 @@ class NetworkAclBackend(object):
icmp_code, icmp_type, port_range_from,
port_range_to):
network_acl = self.get_network_acl(network_acl_id)
if any(entry.egress == egress and entry.rule_number == rule_number for entry in network_acl.network_acl_entries):
raise NetworkAclEntryAlreadyExistsError(rule_number)
network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number,
protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type,
port_range_from, port_range_to)
network_acl = self.get_network_acl(network_acl_id)
network_acl.network_acl_entries.append(network_acl_entry)
return network_acl_entry
@ -3636,9 +3868,9 @@ class NetworkAclBackend(object):
new_acl.associations[new_assoc_id] = association
return association
def associate_default_network_acl_with_subnet(self, subnet_id):
def associate_default_network_acl_with_subnet(self, subnet_id, vpc_id):
association_id = random_network_acl_subnet_association_id()
acl = next(acl for acl in self.network_acls.values() if acl.default)
acl = next(acl for acl in self.network_acls.values() if acl.default and acl.vpc_id == vpc_id)
acl.associations[association_id] = NetworkAclAssociation(self, association_id,
subnet_id, acl.id)

View File

@ -150,6 +150,7 @@ CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.co
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>creating</status>
<createTime>{{ volume.create_time}}</createTime>
{% if volume.get_tags() %}
<tagSet>
{% for tag in volume.get_tags() %}
<item>
@ -160,6 +161,7 @@ CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.co
</item>
{% endfor %}
</tagSet>
{% endif %}
<volumeType>standard</volumeType>
</CreateVolumeResponse>"""
@ -191,6 +193,7 @@ DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazon
</item>
{% endif %}
</attachmentSet>
{% if volume.get_tags() %}
<tagSet>
{% for tag in volume.get_tags() %}
<item>
@ -201,6 +204,7 @@ DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazon
</item>
{% endfor %}
</tagSet>
{% endif %}
<volumeType>standard</volumeType>
</item>
{% endfor %}

View File

@ -46,6 +46,7 @@ class InstanceResponse(BaseResponse):
associate_public_ip = self._get_param('AssociatePublicIpAddress')
key_name = self._get_param('KeyName')
ebs_optimized = self._get_param('EbsOptimized')
instance_initiated_shutdown_behavior = self._get_param("InstanceInitiatedShutdownBehavior")
tags = self._parse_tag_specification("TagSpecification")
region_name = self.region
@ -55,7 +56,7 @@ class InstanceResponse(BaseResponse):
instance_type=instance_type, placement=placement, region_name=region_name, subnet_id=subnet_id,
owner_id=owner_id, key_name=key_name, security_group_ids=security_group_ids,
nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip,
tags=tags, ebs_optimized=ebs_optimized)
tags=tags, ebs_optimized=ebs_optimized, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior)
template = self.response_template(EC2_RUN_INSTANCES)
return template.render(reservation=new_reservation)
@ -113,12 +114,11 @@ class InstanceResponse(BaseResponse):
# TODO this and modify below should raise IncorrectInstanceState if
# instance not in stopped state
attribute = self._get_param('Attribute')
key = camelcase_to_underscores(attribute)
instance_id = self._get_param('InstanceId')
instance, value = self.ec2_backend.describe_instance_attribute(
instance_id, key)
instance_id, attribute)
if key == "group_set":
if attribute == "groupSet":
template = self.response_template(
EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE)
else:
@ -244,7 +244,7 @@ EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc
<reason/>
<keyName>{{ instance.key_name }}</keyName>
<ebsOptimized>{{ instance.ebs_optimized }}</ebsOptimized>
<amiLaunchIndex>0</amiLaunchIndex>
<amiLaunchIndex>{{ instance.ami_launch_index }}</amiLaunchIndex>
<instanceType>{{ instance.instance_type }}</instanceType>
<launchTime>{{ instance.launch_time }}</launchTime>
<placement>
@ -384,7 +384,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
<reason>{{ instance._reason }}</reason>
<keyName>{{ instance.key_name }}</keyName>
<ebsOptimized>{{ instance.ebs_optimized }}</ebsOptimized>
<amiLaunchIndex>0</amiLaunchIndex>
<amiLaunchIndex>{{ instance.ami_launch_index }}</amiLaunchIndex>
<productCodes/>
<instanceType>{{ instance.instance_type }}</instanceType>
<launchTime>{{ instance.launch_time }}</launchTime>
@ -450,6 +450,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
</blockDeviceMapping>
<virtualizationType>{{ instance.virtualization_type }}</virtualizationType>
<clientToken>ABCDE1234567890123</clientToken>
{% if instance.get_tags() %}
<tagSet>
{% for tag in instance.get_tags() %}
<item>
@ -460,6 +461,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
</item>
{% endfor %}
</tagSet>
{% endif %}
<hypervisor>xen</hypervisor>
<networkInterfaceSet>
{% for nic in instance.nics.values() %}
@ -595,7 +597,9 @@ EC2_DESCRIBE_INSTANCE_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="h
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceId>{{ instance.id }}</instanceId>
<{{ attribute }}>
{% if value is not none %}
<value>{{ value }}</value>
{% endif %}
</{{ attribute }}>
</DescribeInstanceAttributeResponse>"""
@ -603,9 +607,9 @@ EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """<DescribeInstanceAttributeResponse
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceId>{{ instance.id }}</instanceId>
<{{ attribute }}>
{% for sg_id in value %}
{% for sg in value %}
<item>
<groupId>{{ sg_id }}</groupId>
<groupId>{{ sg.id }}</groupId>
</item>
{% endfor %}
</{{ attribute }}>

View File

@ -107,6 +107,21 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """<DescribeSpotFleetRequestsResponse xmlns="http
</item>
{% endfor %}
</groupSet>
<tagSpecificationSet>
{% for resource_type in launch_spec.tag_specifications %}
<item>
<resourceType>{{ resource_type }}</resourceType>
<tag>
{% for key, value in launch_spec.tag_specifications[resource_type].items() %}
<item>
<key>{{ key }}</key>
<value>{{ value }}</value>
</item>
{% endfor %}
</tag>
</item>
{% endfor %}
</tagSpecificationSet>
</item>
{% endfor %}
</launchSpecifications>

View File

@ -1,6 +1,7 @@
from __future__ import unicode_literals
import random
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from moto.ec2.utils import filters_from_querystring
@ -16,6 +17,7 @@ class Subnets(BaseResponse):
vpc_id,
cidr_block,
availability_zone,
context=self,
)
template = self.response_template(CREATE_SUBNET_RESPONSE)
return template.render(subnet=subnet)
@ -35,8 +37,13 @@ class Subnets(BaseResponse):
def modify_subnet_attribute(self):
subnet_id = self._get_param('SubnetId')
map_public_ip = self._get_param('MapPublicIpOnLaunch.Value')
self.ec2_backend.modify_subnet_attribute(subnet_id, map_public_ip)
for attribute in ('MapPublicIpOnLaunch', 'AssignIpv6AddressOnCreation'):
if self.querystring.get('%s.Value' % attribute):
attr_name = camelcase_to_underscores(attribute)
attr_value = self.querystring.get('%s.Value' % attribute)[0]
self.ec2_backend.modify_subnet_attribute(
subnet_id, attr_name, attr_value)
return MODIFY_SUBNET_ATTRIBUTE_RESPONSE
@ -49,17 +56,14 @@ CREATE_SUBNET_RESPONSE = """
<vpcId>{{ subnet.vpc_id }}</vpcId>
<cidrBlock>{{ subnet.cidr_block }}</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>{{ subnet.availability_zone }}</availabilityZone>
<tagSet>
{% for tag in subnet.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<availabilityZone>{{ subnet._availability_zone.name }}</availabilityZone>
<availabilityZoneId>{{ subnet._availability_zone.zone_id }}</availabilityZoneId>
<defaultForAz>{{ subnet.default_for_az }}</defaultForAz>
<mapPublicIpOnLaunch>{{ subnet.map_public_ip_on_launch }}</mapPublicIpOnLaunch>
<ownerId>{{ subnet.owner_id }}</ownerId>
<assignIpv6AddressOnCreation>{{ subnet.assign_ipv6_address_on_creation }}</assignIpv6AddressOnCreation>
<ipv6CidrBlockAssociationSet>{{ subnet.ipv6_cidr_block_associations }}</ipv6CidrBlockAssociationSet>
<subnetArn>arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }}</subnetArn>
</subnet>
</CreateSubnetResponse>"""
@ -80,9 +84,15 @@ DESCRIBE_SUBNETS_RESPONSE = """
<vpcId>{{ subnet.vpc_id }}</vpcId>
<cidrBlock>{{ subnet.cidr_block }}</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>{{ subnet.availability_zone }}</availabilityZone>
<availabilityZone>{{ subnet._availability_zone.name }}</availabilityZone>
<availabilityZoneId>{{ subnet._availability_zone.zone_id }}</availabilityZoneId>
<defaultForAz>{{ subnet.default_for_az }}</defaultForAz>
<mapPublicIpOnLaunch>{{ subnet.map_public_ip_on_launch }}</mapPublicIpOnLaunch>
<ownerId>{{ subnet.owner_id }}</ownerId>
<assignIpv6AddressOnCreation>{{ subnet.assign_ipv6_address_on_creation }}</assignIpv6AddressOnCreation>
<ipv6CidrBlockAssociationSet>{{ subnet.ipv6_cidr_block_associations }}</ipv6CidrBlockAssociationSet>
<subnetArn>arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }}</subnetArn>
{% if subnet.get_tags() %}
<tagSet>
{% for tag in subnet.get_tags() %}
<item>
@ -93,6 +103,7 @@ DESCRIBE_SUBNETS_RESPONSE = """
</item>
{% endfor %}
</tagSet>
{% endif %}
</item>
{% endfor %}
</subnetSet>

View File

@ -74,7 +74,7 @@ CREATE_VPC_PEERING_CONNECTION_RESPONSE = """
"""
DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnectionSet>
{% for vpc_pcx in vpc_pcxs %}
@ -88,12 +88,17 @@ DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """
<accepterVpcInfo>
<ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>true</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>
<message>{{ vpc_pcx._status.message }}</message>
</status>
<expirationTime>2014-02-17T16:00:50.000Z</expirationTime>
<tagSet/>
</item>
{% endfor %}
@ -109,19 +114,24 @@ DELETE_VPC_PEERING_CONNECTION_RESPONSE = """
"""
ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnection>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>123456789012</ownerId>
<ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>777788889999</ownerId>
<ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>false</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>

View File

@ -1,10 +1,19 @@
from __future__ import unicode_literals
import base64
import hashlib
import fnmatch
import random
import re
import six
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import sshpubkeys.exceptions
from sshpubkeys.keys import SSHKey
EC2_RESOURCE_TO_PREFIX = {
'customer-gateway': 'cgw',
'dhcp-options': 'dopt',
@ -453,23 +462,19 @@ def simple_aws_filter_to_re(filter_string):
def random_key_pair():
def random_hex():
return chr(random.choice(list(range(48, 58)) + list(range(97, 102))))
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend())
private_key_material = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key())
def random_fingerprint():
return ':'.join([random_hex() + random_hex() for i in range(20)])
def random_material():
return ''.join([
chr(random.choice(list(range(65, 91)) + list(range(48, 58)) +
list(range(97, 102))))
for i in range(1000)
])
material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \
"-----END RSA PRIVATE KEY-----"
return {
'fingerprint': random_fingerprint(),
'material': material
'fingerprint': public_key_fingerprint,
'material': private_key_material.decode('ascii')
}
@ -535,3 +540,28 @@ def generate_instance_identity_document(instance):
}
return document
def rsa_public_key_parse(key_material):
try:
if not isinstance(key_material, six.binary_type):
key_material = key_material.encode("ascii")
decoded_key = base64.b64decode(key_material).decode("ascii")
public_key = SSHKey(decoded_key)
except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError):
raise ValueError('bad key')
if not public_key.rsa:
raise ValueError('bad key')
return public_key.rsa
def rsa_public_key_fingerprint(rsa_public_key):
key_data = rsa_public_key.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
fingerprint_hex = hashlib.md5(key_data).hexdigest()
fingerprint = re.sub(r'([a-f0-9]{2})(?!$)', r'\1:', fingerprint_hex)
return fingerprint

View File

@ -1,7 +1,9 @@
from __future__ import unicode_literals
import hashlib
import re
from copy import copy
from datetime import datetime
from random import random
from botocore.exceptions import ParamValidationError
@ -105,7 +107,7 @@ class Image(BaseObject):
self.repository = repository
self.registry_id = registry_id
self.image_digest = digest
self.image_pushed_at = None
self.image_pushed_at = str(datetime.utcnow().isoformat())
def _create_digest(self):
image_contents = 'docker_image{0}'.format(int(random() * 10 ** 6))
@ -119,6 +121,12 @@ class Image(BaseObject):
def get_image_manifest(self):
return self.image_manifest
def remove_tag(self, tag):
if tag is not None and tag in self.image_tags:
self.image_tags.remove(tag)
if self.image_tags:
self.image_tag = self.image_tags[-1]
def update_tag(self, tag):
self.image_tag = tag
if tag not in self.image_tags and tag is not None:
@ -151,7 +159,7 @@ class Image(BaseObject):
response_object['repositoryName'] = self.repository
response_object['registryId'] = self.registry_id
response_object['imageSizeInBytes'] = self.image_size_in_bytes
response_object['imagePushedAt'] = '2017-05-09'
response_object['imagePushedAt'] = self.image_pushed_at
return {k: v for k, v in response_object.items() if v is not None and v != []}
@property
@ -165,6 +173,13 @@ class Image(BaseObject):
response_object['registryId'] = self.registry_id
return {k: v for k, v in response_object.items() if v is not None and v != [None]}
@property
def response_batch_delete_image(self):
response_object = {}
response_object['imageDigest'] = self.get_image_digest()
response_object['imageTag'] = self.image_tag
return {k: v for k, v in response_object.items() if v is not None and v != [None]}
class ECRBackend(BaseBackend):
@ -310,6 +325,106 @@ class ECRBackend(BaseBackend):
return response
def batch_delete_image(self, repository_name, registry_id=None, image_ids=None):
if repository_name in self.repositories:
repository = self.repositories[repository_name]
else:
raise RepositoryNotFoundException(
repository_name, registry_id or DEFAULT_REGISTRY_ID
)
if not image_ids:
raise ParamValidationError(
msg='Missing required parameter in input: "imageIds"'
)
response = {
"imageIds": [],
"failures": []
}
for image_id in image_ids:
image_found = False
# Is request missing both digest and tag?
if "imageDigest" not in image_id and "imageTag" not in image_id:
response["failures"].append(
{
"imageId": {},
"failureCode": "MissingDigestAndTag",
"failureReason": "Invalid request parameters: both tag and digest cannot be null",
}
)
continue
# If we have a digest, is it valid?
if "imageDigest" in image_id:
pattern = re.compile("^[0-9a-zA-Z_+\.-]+:[0-9a-fA-F]{64}")
if not pattern.match(image_id.get("imageDigest")):
response["failures"].append(
{
"imageId": {
"imageDigest": image_id.get("imageDigest", "null")
},
"failureCode": "InvalidImageDigest",
"failureReason": "Invalid request parameters: image digest should satisfy the regex '[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+'",
}
)
continue
for num, image in enumerate(repository.images):
# Search by matching both digest and tag
if "imageDigest" in image_id and "imageTag" in image_id:
if (
image_id["imageDigest"] == image.get_image_digest() and
image_id["imageTag"] in image.image_tags
):
image_found = True
for image_tag in reversed(image.image_tags):
repository.images[num].image_tag = image_tag
response["imageIds"].append(
image.response_batch_delete_image
)
repository.images[num].remove_tag(image_tag)
del repository.images[num]
# Search by matching digest
elif "imageDigest" in image_id and image.get_image_digest() == image_id["imageDigest"]:
image_found = True
for image_tag in reversed(image.image_tags):
repository.images[num].image_tag = image_tag
response["imageIds"].append(image.response_batch_delete_image)
repository.images[num].remove_tag(image_tag)
del repository.images[num]
# Search by matching tag
elif "imageTag" in image_id and image_id["imageTag"] in image.image_tags:
image_found = True
repository.images[num].image_tag = image_id["imageTag"]
response["imageIds"].append(image.response_batch_delete_image)
if len(image.image_tags) > 1:
repository.images[num].remove_tag(image_id["imageTag"])
else:
repository.images.remove(image)
if not image_found:
failure_response = {
"imageId": {},
"failureCode": "ImageNotFound",
"failureReason": "Requested image not found",
}
if "imageDigest" in image_id:
failure_response["imageId"]["imageDigest"] = image_id.get("imageDigest", "null")
if "imageTag" in image_id:
failure_response["imageId"]["imageTag"] = image_id.get("imageTag", "null")
response["failures"].append(failure_response)
return response
ecr_backends = {}
for region, ec2_backend in ec2_backends.items():

View File

@ -84,9 +84,12 @@ class ECRResponse(BaseResponse):
'ECR.batch_check_layer_availability is not yet implemented')
def batch_delete_image(self):
if self.is_not_dryrun('BatchDeleteImage'):
raise NotImplementedError(
'ECR.batch_delete_image is not yet implemented')
repository_str = self._get_param('repositoryName')
registry_id = self._get_param('registryId')
image_ids = self._get_param('imageIds')
response = self.ecr_backend.batch_delete_image(repository_str, registry_id, image_ids)
return json.dumps(response)
def batch_get_image(self):
repository_str = self._get_param('repositoryName')

View File

@ -94,6 +94,12 @@ class Cluster(BaseObject):
# no-op when nothing changed between old and new resources
return original_resource
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
return self.arn
raise UnformattedGetAttTemplateException()
class TaskDefinition(BaseObject):
@ -271,6 +277,12 @@ class Service(BaseObject):
else:
return ecs_backend.update_service(cluster_name, service_name, task_definition, desired_count)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Name':
return self.name
raise UnformattedGetAttTemplateException()
class ContainerInstance(BaseObject):
@ -358,6 +370,20 @@ class ContainerInstance(BaseObject):
return formatted_attr
class ClusterFailure(BaseObject):
def __init__(self, reason, cluster_name):
self.reason = reason
self.arn = "arn:aws:ecs:us-east-1:012345678910:cluster/{0}".format(
cluster_name)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['reason'] = self.reason
response_object['arn'] = self.arn
return response_object
class ContainerInstanceFailure(BaseObject):
def __init__(self, reason, container_instance_id):
@ -419,6 +445,7 @@ class EC2ContainerServiceBackend(BaseBackend):
def describe_clusters(self, list_clusters_name=None):
list_clusters = []
failures = []
if list_clusters_name is None:
if 'default' in self.clusters:
list_clusters.append(self.clusters['default'].response_object)
@ -429,9 +456,8 @@ class EC2ContainerServiceBackend(BaseBackend):
list_clusters.append(
self.clusters[cluster_name].response_object)
else:
raise Exception(
"{0} is not a cluster".format(cluster_name))
return list_clusters
failures.append(ClusterFailure('MISSING', cluster_name))
return list_clusters, failures
def delete_cluster(self, cluster_str):
cluster_name = cluster_str.split('/')[-1]
@ -673,12 +699,15 @@ class EC2ContainerServiceBackend(BaseBackend):
return service
def list_services(self, cluster_str):
def list_services(self, cluster_str, scheduling_strategy=None):
cluster_name = cluster_str.split('/')[-1]
service_arns = []
for key, value in self.services.items():
if cluster_name + ':' in key:
service_arns.append(self.services[key].arn)
service = self.services[key]
if scheduling_strategy is None or service.scheduling_strategy == scheduling_strategy:
service_arns.append(service.arn)
return sorted(service_arns)
def describe_services(self, cluster_str, service_names_or_arns):

View File

@ -45,10 +45,10 @@ class EC2ContainerServiceResponse(BaseResponse):
def describe_clusters(self):
list_clusters_name = self._get_param('clusters')
clusters = self.ecs_backend.describe_clusters(list_clusters_name)
clusters, failures = self.ecs_backend.describe_clusters(list_clusters_name)
return json.dumps({
'clusters': clusters,
'failures': []
'failures': [cluster.response_object for cluster in failures]
})
def delete_cluster(self):
@ -163,7 +163,8 @@ class EC2ContainerServiceResponse(BaseResponse):
def list_services(self):
cluster_str = self._get_param('cluster')
service_arns = self.ecs_backend.list_services(cluster_str)
scheduling_strategy = self._get_param('schedulingStrategy')
service_arns = self.ecs_backend.list_services(cluster_str, scheduling_strategy)
return json.dumps({
'serviceArns': service_arns
# ,

View File

@ -97,7 +97,8 @@ class FakeCluster(BaseModel):
visible_to_all_users='false',
release_label=None,
requested_ami_version=None,
running_ami_version=None):
running_ami_version=None,
custom_ami_id=None):
self.id = cluster_id or random_cluster_id()
emr_backend.clusters[self.id] = self
self.emr_backend = emr_backend
@ -162,6 +163,7 @@ class FakeCluster(BaseModel):
self.release_label = release_label
self.requested_ami_version = requested_ami_version
self.running_ami_version = running_ami_version
self.custom_ami_id = custom_ami_id
self.role = job_flow_role or 'EMRJobflowDefault'
self.service_role = service_role

View File

@ -267,6 +267,18 @@ class ElasticMapReduceResponse(BaseResponse):
else:
kwargs['running_ami_version'] = '1.0.0'
custom_ami_id = self._get_param('CustomAmiId')
if custom_ami_id:
kwargs['custom_ami_id'] = custom_ami_id
if release_label and release_label < 'emr-5.7.0':
message = 'Custom AMI is not allowed'
raise EmrError(error_type='ValidationException',
message=message, template='error_json')
elif ami_version:
message = 'Custom AMI is not supported in this version of EMR'
raise EmrError(error_type='ValidationException',
message=message, template='error_json')
cluster = self.backend.run_job_flow(**kwargs)
applications = self._get_list_prefix('Applications.member')
@ -375,6 +387,9 @@ DESCRIBE_CLUSTER_TEMPLATE = """<DescribeClusterResponse xmlns="http://elasticmap
</member>
{% endfor %}
</Configurations>
{% if cluster.custom_ami_id is not none %}
<CustomAmiId>{{ cluster.custom_ami_id }}</CustomAmiId>
{% endif %}
<Ec2InstanceAttributes>
<AdditionalMasterSecurityGroups>
{% for each in cluster.additional_master_security_groups %}

View File

@ -56,6 +56,14 @@ class GlueBackend(BaseBackend):
database = self.get_database(database_name)
return [table for table_name, table in database.tables.items()]
def delete_table(self, database_name, table_name):
database = self.get_database(database_name)
try:
del database.tables[table_name]
except KeyError:
raise TableNotFoundException(table_name)
return {}
class FakeDatabase(BaseModel):
@ -130,6 +138,12 @@ class FakeTable(BaseModel):
raise PartitionAlreadyExistsException()
self.partitions[key] = partition
def delete_partition(self, values):
try:
del self.partitions[str(values)]
except KeyError:
raise PartitionNotFoundException()
class FakePartition(BaseModel):
def __init__(self, database_name, table_name, partiton_input):

View File

@ -4,6 +4,11 @@ import json
from moto.core.responses import BaseResponse
from .models import glue_backend
from .exceptions import (
PartitionAlreadyExistsException,
PartitionNotFoundException,
TableNotFoundException
)
class GlueResponse(BaseResponse):
@ -84,6 +89,34 @@ class GlueResponse(BaseResponse):
]
})
def delete_table(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('Name')
resp = self.glue_backend.delete_table(database_name, table_name)
return json.dumps(resp)
def batch_delete_table(self):
database_name = self.parameters.get('DatabaseName')
errors = []
for table_name in self.parameters.get('TablesToDelete'):
try:
self.glue_backend.delete_table(database_name, table_name)
except TableNotFoundException:
errors.append({
"TableName": table_name,
"ErrorDetail": {
"ErrorCode": "EntityNotFoundException",
"ErrorMessage": "Table not found"
}
})
out = {}
if errors:
out["Errors"] = errors
return json.dumps(out)
def get_partitions(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
@ -118,6 +151,30 @@ class GlueResponse(BaseResponse):
return ""
def batch_create_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
table = self.glue_backend.get_table(database_name, table_name)
errors_output = []
for part_input in self.parameters.get('PartitionInputList'):
try:
table.create_partition(part_input)
except PartitionAlreadyExistsException:
errors_output.append({
'PartitionValues': part_input['Values'],
'ErrorDetail': {
'ErrorCode': 'AlreadyExistsException',
'ErrorMessage': 'Partition already exists.'
}
})
out = {}
if errors_output:
out["Errors"] = errors_output
return json.dumps(out)
def update_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
@ -128,3 +185,38 @@ class GlueResponse(BaseResponse):
table.update_partition(part_to_update, part_input)
return ""
def delete_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
part_to_delete = self.parameters.get('PartitionValues')
table = self.glue_backend.get_table(database_name, table_name)
table.delete_partition(part_to_delete)
return ""
def batch_delete_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
table = self.glue_backend.get_table(database_name, table_name)
errors_output = []
for part_input in self.parameters.get('PartitionsToDelete'):
values = part_input.get('Values')
try:
table.delete_partition(values)
except PartitionNotFoundException:
errors_output.append({
'PartitionValues': values,
'ErrorDetail': {
'ErrorCode': 'EntityNotFoundException',
'ErrorMessage': 'Partition not found',
}
})
out = {}
if errors_output:
out['Errors'] = errors_output
return json.dumps(out)

File diff suppressed because it is too large Load Diff

View File

@ -26,9 +26,70 @@ class IAMReportNotPresentException(RESTError):
"ReportNotPresent", message)
class IAMLimitExceededException(RESTError):
code = 400
def __init__(self, message):
super(IAMLimitExceededException, self).__init__(
"LimitExceeded", message)
class MalformedCertificate(RESTError):
code = 400
def __init__(self, cert):
super(MalformedCertificate, self).__init__(
'MalformedCertificate', 'Certificate {cert} is malformed'.format(cert=cert))
class MalformedPolicyDocument(RESTError):
code = 400
def __init__(self, message=""):
super(MalformedPolicyDocument, self).__init__(
'MalformedPolicyDocument', message)
class DuplicateTags(RESTError):
code = 400
def __init__(self):
super(DuplicateTags, self).__init__(
'InvalidInput', 'Duplicate tag keys found. Please note that Tag keys are case insensitive.')
class TagKeyTooBig(RESTError):
code = 400
def __init__(self, tag, param='tags.X.member.key'):
super(TagKeyTooBig, self).__init__(
'ValidationError', "1 validation error detected: Value '{}' at '{}' failed to satisfy "
"constraint: Member must have length less than or equal to 128.".format(tag, param))
class TagValueTooBig(RESTError):
code = 400
def __init__(self, tag):
super(TagValueTooBig, self).__init__(
'ValidationError', "1 validation error detected: Value '{}' at 'tags.X.member.value' failed to satisfy "
"constraint: Member must have length less than or equal to 256.".format(tag))
class InvalidTagCharacters(RESTError):
code = 400
def __init__(self, tag, param='tags.X.member.key'):
message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format(tag, param)
message += "constraint: Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+"
super(InvalidTagCharacters, self).__init__('ValidationError', message)
class TooManyTags(RESTError):
code = 400
def __init__(self, tags, param='tags'):
super(TooManyTags, self).__init__(
'ValidationError', "1 validation error detected: Value '{}' at '{}' failed to satisfy "
"constraint: Member must have length less than or equal to 50.".format(tags, param))

View File

@ -3,16 +3,19 @@ import base64
import sys
from datetime import datetime
import json
import re
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import pytz
from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_without_milliseconds
from moto.core.utils import iso_8601_datetime_without_milliseconds, iso_8601_datetime_with_milliseconds
from moto.iam.policy_validation import IAMPolicyDocumentValidator
from .aws_managed_policies import aws_managed_policies_data
from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException, MalformedCertificate
from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException, IAMLimitExceededException, \
MalformedCertificate, DuplicateTags, TagKeyTooBig, InvalidTagCharacters, TooManyTags, TagValueTooBig
from .utils import random_access_key, random_alphanumeric, random_resource_id, random_policy_id
ACCOUNT_ID = 123456789012
@ -25,14 +28,17 @@ class MFADevice(object):
serial_number,
authentication_code_1,
authentication_code_2):
self.enable_date = datetime.now(pytz.utc)
self.enable_date = datetime.utcnow()
self.serial_number = serial_number
self.authentication_code_1 = authentication_code_1
self.authentication_code_2 = authentication_code_2
@property
def enabled_iso_8601(self):
return iso_8601_datetime_without_milliseconds(self.enable_date)
class Policy(BaseModel):
is_attachable = False
def __init__(self,
@ -40,18 +46,41 @@ class Policy(BaseModel):
default_version_id=None,
description=None,
document=None,
path=None):
path=None,
create_date=None,
update_date=None):
self.name = name
self.attachment_count = 0
self.description = description or ''
self.id = random_policy_id()
self.path = path or '/'
self.default_version_id = default_version_id or 'v1'
self.versions = [PolicyVersion(self.arn, document, True)]
self.create_datetime = datetime.now(pytz.utc)
self.update_datetime = datetime.now(pytz.utc)
if default_version_id:
self.default_version_id = default_version_id
self.next_version_num = int(default_version_id.lstrip('v')) + 1
else:
self.default_version_id = 'v1'
self.next_version_num = 2
self.versions = [PolicyVersion(self.arn, document, True, self.default_version_id, update_date)]
self.create_date = create_date if create_date is not None else datetime.utcnow()
self.update_date = update_date if update_date is not None else datetime.utcnow()
def update_default_version(self, new_default_version_id):
for version in self.versions:
if version.version_id == self.default_version_id:
version.is_default = False
break
self.default_version_id = new_default_version_id
@property
def created_iso_8601(self):
return iso_8601_datetime_with_milliseconds(self.create_date)
@property
def updated_iso_8601(self):
return iso_8601_datetime_with_milliseconds(self.update_date)
class SAMLProvider(BaseModel):
@ -69,13 +98,19 @@ class PolicyVersion(object):
def __init__(self,
policy_arn,
document,
is_default=False):
is_default=False,
version_id='v1',
create_date=None):
self.policy_arn = policy_arn
self.document = document or {}
self.is_default = is_default
self.version_id = 'v1'
self.version_id = version_id
self.create_datetime = datetime.now(pytz.utc)
self.create_date = create_date if create_date is not None else datetime.utcnow()
@property
def created_iso_8601(self):
return iso_8601_datetime_with_milliseconds(self.create_date)
class ManagedPolicy(Policy):
@ -104,7 +139,9 @@ class AWSManagedPolicy(ManagedPolicy):
return cls(name,
default_version_id=data.get('DefaultVersionId'),
path=data.get('Path'),
document=data.get('Document'))
document=json.dumps(data.get('Document')),
create_date=datetime.strptime(data.get('CreateDate'), "%Y-%m-%dT%H:%M:%S+00:00"),
update_date=datetime.strptime(data.get('UpdateDate'), "%Y-%m-%dT%H:%M:%S+00:00"))
@property
def arn(self):
@ -124,14 +161,21 @@ class InlinePolicy(Policy):
class Role(BaseModel):
def __init__(self, role_id, name, assume_role_policy_document, path):
def __init__(self, role_id, name, assume_role_policy_document, path, permissions_boundary):
self.id = role_id
self.name = name
self.assume_role_policy_document = assume_role_policy_document
self.path = path or '/'
self.policies = {}
self.managed_policies = {}
self.create_date = datetime.now(pytz.utc)
self.create_date = datetime.utcnow()
self.tags = {}
self.description = ""
self.permissions_boundary = permissions_boundary
@property
def created_iso_8601(self):
return iso_8601_datetime_with_milliseconds(self.create_date)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
@ -141,6 +185,7 @@ class Role(BaseModel):
role_name=resource_name,
assume_role_policy_document=properties['AssumeRolePolicyDocument'],
path=properties.get('Path', '/'),
permissions_boundary=properties.get('PermissionsBoundary', '')
)
policies = properties.get('Policies', [])
@ -175,6 +220,9 @@ class Role(BaseModel):
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
def get_tags(self):
return [self.tags[tag] for tag in self.tags]
class InstanceProfile(BaseModel):
@ -183,7 +231,11 @@ class InstanceProfile(BaseModel):
self.name = name
self.path = path or '/'
self.roles = roles if roles else []
self.create_date = datetime.now(pytz.utc)
self.create_date = datetime.utcnow()
@property
def created_iso_8601(self):
return iso_8601_datetime_with_milliseconds(self.create_date)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
@ -207,7 +259,7 @@ class InstanceProfile(BaseModel):
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
return self.arn
raise UnformattedGetAttTemplateException()
@ -235,25 +287,31 @@ class SigningCertificate(BaseModel):
self.id = id
self.user_name = user_name
self.body = body
self.upload_date = datetime.strftime(datetime.utcnow(), "%Y-%m-%d-%H-%M-%S")
self.upload_date = datetime.utcnow()
self.status = 'Active'
@property
def uploaded_iso_8601(self):
return iso_8601_datetime_without_milliseconds(self.upload_date)
class AccessKey(BaseModel):
def __init__(self, user_name):
self.user_name = user_name
self.access_key_id = random_access_key()
self.secret_access_key = random_alphanumeric(32)
self.access_key_id = "AKIA" + random_access_key()
self.secret_access_key = random_alphanumeric(40)
self.status = 'Active'
self.create_date = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%dT%H:%M:%SZ"
)
self.last_used = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%dT%H:%M:%SZ"
)
self.create_date = datetime.utcnow()
self.last_used = datetime.utcnow()
@property
def created_iso_8601(self):
return iso_8601_datetime_without_milliseconds(self.create_date)
@property
def last_used_iso_8601(self):
return iso_8601_datetime_without_milliseconds(self.last_used)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
@ -268,15 +326,16 @@ class Group(BaseModel):
self.name = name
self.id = random_resource_id()
self.path = path
self.created = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%d-%H-%M-%S"
)
self.create_date = datetime.utcnow()
self.users = []
self.managed_policies = {}
self.policies = {}
@property
def created_iso_8601(self):
return iso_8601_datetime_with_milliseconds(self.create_date)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
@ -291,10 +350,6 @@ class Group(BaseModel):
else:
return "arn:aws:iam::{0}:group/{1}/{2}".format(ACCOUNT_ID, self.path, self.name)
@property
def create_date(self):
return self.created
def get_policy(self, policy_name):
try:
policy_json = self.policies[policy_name]
@ -320,7 +375,7 @@ class User(BaseModel):
self.name = name
self.id = random_resource_id()
self.path = path if path else "/"
self.created = datetime.utcnow()
self.create_date = datetime.utcnow()
self.mfa_devices = {}
self.policies = {}
self.managed_policies = {}
@ -335,7 +390,7 @@ class User(BaseModel):
@property
def created_iso_8601(self):
return iso_8601_datetime_without_milliseconds(self.created)
return iso_8601_datetime_with_milliseconds(self.create_date)
def get_policy(self, policy_name):
policy_json = None
@ -406,7 +461,7 @@ class User(BaseModel):
def to_csv(self):
date_format = '%Y-%m-%dT%H:%M:%S+00:00'
date_created = self.created
date_created = self.create_date
# aagrawal,arn:aws:iam::509284790694:user/aagrawal,2014-09-01T22:28:48+00:00,true,2014-11-12T23:36:49+00:00,2014-09-03T18:59:00+00:00,N/A,false,true,2014-09-01T22:28:48+00:00,false,N/A,false,N/A,false,N/A
if not self.password:
password_enabled = 'false'
@ -458,16 +513,28 @@ class IAMBackend(BaseBackend):
self.managed_policies = self._init_managed_policies()
self.account_aliases = []
self.saml_providers = {}
self.policy_arn_regex = re.compile(
r'^arn:aws:iam::[0-9]*:policy/.*$')
super(IAMBackend, self).__init__()
def _init_managed_policies(self):
return dict((p.name, p) for p in aws_managed_policies)
return dict((p.arn, p) for p in aws_managed_policies)
def attach_role_policy(self, policy_arn, role_name):
arns = dict((p.arn, p) for p in self.managed_policies.values())
policy = arns[policy_arn]
policy.attach_to(self.get_role(role_name))
def update_role_description(self, role_name, role_description):
role = self.get_role(role_name)
role.description = role_description
return role
def update_role(self, role_name, role_description):
role = self.get_role(role_name)
role.description = role_description
return role
def detach_role_policy(self, policy_arn, role_name):
arns = dict((p.arn, p) for p in self.managed_policies.values())
try:
@ -509,6 +576,9 @@ class IAMBackend(BaseBackend):
policy.detach_from(self.get_user(user_name))
def create_policy(self, description, path, policy_document, policy_name):
iam_policy_document_validator = IAMPolicyDocumentValidator(policy_document)
iam_policy_document_validator.validate()
policy = ManagedPolicy(
policy_name,
description=description,
@ -565,9 +635,12 @@ class IAMBackend(BaseBackend):
return policies, marker
def create_role(self, role_name, assume_role_policy_document, path):
def create_role(self, role_name, assume_role_policy_document, path, permissions_boundary):
role_id = random_resource_id()
role = Role(role_id, role_name, assume_role_policy_document, path)
if permissions_boundary and not self.policy_arn_regex.match(permissions_boundary):
raise RESTError('InvalidParameterValue', 'Value ({}) for parameter PermissionsBoundary is invalid.'.format(permissions_boundary))
role = Role(role_id, role_name, assume_role_policy_document, path, permissions_boundary)
self.roles[role_id] = role
return role
@ -598,6 +671,9 @@ class IAMBackend(BaseBackend):
def put_role_policy(self, role_name, policy_name, policy_json):
role = self.get_role(role_name)
iam_policy_document_validator = IAMPolicyDocumentValidator(policy_json)
iam_policy_document_validator.validate()
role.put_policy(policy_name, policy_json)
def delete_role_policy(self, role_name, policy_name):
@ -609,20 +685,108 @@ class IAMBackend(BaseBackend):
for p, d in role.policies.items():
if p == policy_name:
return p, d
raise IAMNotFoundException("Policy Document {0} not attached to role {1}".format(policy_name, role_name))
def list_role_policies(self, role_name):
role = self.get_role(role_name)
return role.policies.keys()
def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'):
"""Validates the tag key.
:param all_tags: Dict to check if there is a duplicate tag.
:param tag_key: The tag key to check against.
:param exception_param: The exception parameter to send over to help format the message. This is to reflect
the difference between the tag and untag APIs.
:return:
"""
# Validate that the key length is correct:
if len(tag_key) > 128:
raise TagKeyTooBig(tag_key, param=exception_param)
# Validate that the tag key fits the proper Regex:
# [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+
match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key)
# Kudos if you can come up with a better way of doing a global search :)
if not len(match) or len(match[0]) < len(tag_key):
raise InvalidTagCharacters(tag_key, param=exception_param)
def _check_tag_duplicate(self, all_tags, tag_key):
"""Validates that a tag key is not a duplicate
:param all_tags: Dict to check if there is a duplicate tag.
:param tag_key: The tag key to check against.
:return:
"""
if tag_key in all_tags:
raise DuplicateTags()
def list_role_tags(self, role_name, marker, max_items=100):
role = self.get_role(role_name)
max_items = int(max_items)
tag_index = sorted(role.tags)
start_idx = int(marker) if marker else 0
tag_index = tag_index[start_idx:start_idx + max_items]
if len(role.tags) <= (start_idx + max_items):
marker = None
else:
marker = str(start_idx + max_items)
# Make the tag list of dict's:
tags = [role.tags[tag] for tag in tag_index]
return tags, marker
def tag_role(self, role_name, tags):
if len(tags) > 50:
raise TooManyTags(tags)
role = self.get_role(role_name)
tag_keys = {}
for tag in tags:
# Need to index by the lowercase tag key since the keys are case insensitive, but their case is retained.
ref_key = tag['Key'].lower()
self._check_tag_duplicate(tag_keys, ref_key)
self._validate_tag_key(tag['Key'])
if len(tag['Value']) > 256:
raise TagValueTooBig(tag['Value'])
tag_keys[ref_key] = tag
role.tags.update(tag_keys)
def untag_role(self, role_name, tag_keys):
if len(tag_keys) > 50:
raise TooManyTags(tag_keys, param='tagKeys')
role = self.get_role(role_name)
for key in tag_keys:
ref_key = key.lower()
self._validate_tag_key(key, exception_param='tagKeys')
role.tags.pop(ref_key, None)
def create_policy_version(self, policy_arn, policy_document, set_as_default):
iam_policy_document_validator = IAMPolicyDocumentValidator(policy_document)
iam_policy_document_validator.validate()
policy = self.get_policy(policy_arn)
if not policy:
raise IAMNotFoundException("Policy not found")
if len(policy.versions) >= 5:
raise IAMLimitExceededException("A managed policy can have up to 5 versions. Before you create a new version, you must delete an existing version.")
set_as_default = (set_as_default == "true") # convert it to python bool
version = PolicyVersion(policy_arn, policy_document, set_as_default)
policy.versions.append(version)
version.version_id = 'v{0}'.format(len(policy.versions))
version.version_id = 'v{0}'.format(policy.next_version_num)
policy.next_version_num += 1
if set_as_default:
policy.default_version_id = version.version_id
policy.update_default_version(version.version_id)
return version
def get_policy_version(self, policy_arn, version_id):
@ -645,8 +809,8 @@ class IAMBackend(BaseBackend):
if not policy:
raise IAMNotFoundException("Policy not found")
if version_id == policy.default_version_id:
raise IAMConflictException(
"Cannot delete the default version of a policy")
raise IAMConflictException(code="DeleteConflict",
message="Cannot delete the default version of a policy.")
for i, v in enumerate(policy.versions):
if v.version_id == version_id:
del policy.versions[i]
@ -758,6 +922,9 @@ class IAMBackend(BaseBackend):
def put_group_policy(self, group_name, policy_name, policy_json):
group = self.get_group(group_name)
iam_policy_document_validator = IAMPolicyDocumentValidator(policy_json)
iam_policy_document_validator.validate()
group.put_policy(policy_name, policy_json)
def list_group_policies(self, group_name, marker=None, max_items=None):
@ -796,6 +963,28 @@ class IAMBackend(BaseBackend):
return users
def update_user(self, user_name, new_path=None, new_user_name=None):
try:
user = self.users[user_name]
except KeyError:
raise IAMNotFoundException("User {0} not found".format(user_name))
if new_path:
user.path = new_path
if new_user_name:
user.name = new_user_name
self.users[new_user_name] = self.users.pop(user_name)
def list_roles(self, path_prefix, marker, max_items):
roles = None
try:
roles = self.roles.values()
except KeyError:
raise IAMNotFoundException(
"Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items))
return roles
def upload_signing_certificate(self, user_name, body):
user = self.get_user(user_name)
cert_id = random_resource_id(size=32)
@ -896,6 +1085,9 @@ class IAMBackend(BaseBackend):
def put_user_policy(self, user_name, policy_name, policy_json):
user = self.get_user(user_name)
iam_policy_document_validator = IAMPolicyDocumentValidator(policy_json)
iam_policy_document_validator.validate()
user.put_policy(policy_name, policy_json)
def delete_user_policy(self, user_name, policy_name):
@ -917,7 +1109,7 @@ class IAMBackend(BaseBackend):
if key.access_key_id == access_key_id:
return {
'user_name': key.user_name,
'last_used': key.last_used
'last_used': key.last_used_iso_8601,
}
else:
raise IAMNotFoundException(

View File

@ -0,0 +1,450 @@
import json
import re
from six import string_types
from moto.iam.exceptions import MalformedPolicyDocument
VALID_TOP_ELEMENTS = [
"Version",
"Id",
"Statement",
"Conditions"
]
VALID_VERSIONS = [
"2008-10-17",
"2012-10-17"
]
VALID_STATEMENT_ELEMENTS = [
"Sid",
"Action",
"NotAction",
"Resource",
"NotResource",
"Effect",
"Condition"
]
VALID_EFFECTS = [
"Allow",
"Deny"
]
VALID_CONDITIONS = [
"StringEquals",
"StringNotEquals",
"StringEqualsIgnoreCase",
"StringNotEqualsIgnoreCase",
"StringLike",
"StringNotLike",
"NumericEquals",
"NumericNotEquals",
"NumericLessThan",
"NumericLessThanEquals",
"NumericGreaterThan",
"NumericGreaterThanEquals",
"DateEquals",
"DateNotEquals",
"DateLessThan",
"DateLessThanEquals",
"DateGreaterThan",
"DateGreaterThanEquals",
"Bool",
"BinaryEquals",
"IpAddress",
"NotIpAddress",
"ArnEquals",
"ArnLike",
"ArnNotEquals",
"ArnNotLike",
"Null"
]
VALID_CONDITION_PREFIXES = [
"ForAnyValue:",
"ForAllValues:"
]
VALID_CONDITION_POSTFIXES = [
"IfExists"
]
SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS = {
"iam": 'IAM resource {resource} cannot contain region information.',
"s3": 'Resource {resource} can not contain region information.'
}
VALID_RESOURCE_PATH_STARTING_VALUES = {
"iam": {
"values": ["user/", "federated-user/", "role/", "group/", "instance-profile/", "mfa/", "server-certificate/",
"policy/", "sms-mfa/", "saml-provider/", "oidc-provider/", "report/", "access-report/"],
"error_message": 'IAM resource path must either be "*" or start with {values}.'
}
}
class IAMPolicyDocumentValidator:
def __init__(self, policy_document):
self._policy_document = policy_document
self._policy_json = {}
self._statements = []
self._resource_error = "" # the first resource error found that does not generate a legacy parsing error
def validate(self):
try:
self._validate_syntax()
except Exception:
raise MalformedPolicyDocument("Syntax errors in policy.")
try:
self._validate_version()
except Exception:
raise MalformedPolicyDocument("Policy document must be version 2012-10-17 or greater.")
try:
self._perform_first_legacy_parsing()
self._validate_resources_for_formats()
self._validate_not_resources_for_formats()
except Exception:
raise MalformedPolicyDocument("The policy failed legacy parsing")
try:
self._validate_sid_uniqueness()
except Exception:
raise MalformedPolicyDocument("Statement IDs (SID) in a single policy must be unique.")
try:
self._validate_action_like_exist()
except Exception:
raise MalformedPolicyDocument("Policy statement must contain actions.")
try:
self._validate_resource_exist()
except Exception:
raise MalformedPolicyDocument("Policy statement must contain resources.")
if self._resource_error != "":
raise MalformedPolicyDocument(self._resource_error)
self._validate_actions_for_prefixes()
self._validate_not_actions_for_prefixes()
def _validate_syntax(self):
self._policy_json = json.loads(self._policy_document)
assert isinstance(self._policy_json, dict)
self._validate_top_elements()
self._validate_version_syntax()
self._validate_id_syntax()
self._validate_statements_syntax()
def _validate_top_elements(self):
top_elements = self._policy_json.keys()
for element in top_elements:
assert element in VALID_TOP_ELEMENTS
def _validate_version_syntax(self):
if "Version" in self._policy_json:
assert self._policy_json["Version"] in VALID_VERSIONS
def _validate_version(self):
assert self._policy_json["Version"] == "2012-10-17"
def _validate_sid_uniqueness(self):
sids = []
for statement in self._statements:
if "Sid" in statement:
assert statement["Sid"] not in sids
sids.append(statement["Sid"])
def _validate_statements_syntax(self):
assert "Statement" in self._policy_json
assert isinstance(self._policy_json["Statement"], (dict, list))
if isinstance(self._policy_json["Statement"], dict):
self._statements.append(self._policy_json["Statement"])
else:
self._statements += self._policy_json["Statement"]
assert self._statements
for statement in self._statements:
self._validate_statement_syntax(statement)
@staticmethod
def _validate_statement_syntax(statement):
assert isinstance(statement, dict)
for statement_element in statement.keys():
assert statement_element in VALID_STATEMENT_ELEMENTS
assert ("Resource" not in statement or "NotResource" not in statement)
assert ("Action" not in statement or "NotAction" not in statement)
IAMPolicyDocumentValidator._validate_effect_syntax(statement)
IAMPolicyDocumentValidator._validate_action_syntax(statement)
IAMPolicyDocumentValidator._validate_not_action_syntax(statement)
IAMPolicyDocumentValidator._validate_resource_syntax(statement)
IAMPolicyDocumentValidator._validate_not_resource_syntax(statement)
IAMPolicyDocumentValidator._validate_condition_syntax(statement)
IAMPolicyDocumentValidator._validate_sid_syntax(statement)
@staticmethod
def _validate_effect_syntax(statement):
assert "Effect" in statement
assert isinstance(statement["Effect"], string_types)
assert statement["Effect"].lower() in [allowed_effect.lower() for allowed_effect in VALID_EFFECTS]
@staticmethod
def _validate_action_syntax(statement):
IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "Action")
@staticmethod
def _validate_not_action_syntax(statement):
IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "NotAction")
@staticmethod
def _validate_resource_syntax(statement):
IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "Resource")
@staticmethod
def _validate_not_resource_syntax(statement):
IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "NotResource")
@staticmethod
def _validate_string_or_list_of_strings_syntax(statement, key):
if key in statement:
assert isinstance(statement[key], (string_types, list))
if isinstance(statement[key], list):
for resource in statement[key]:
assert isinstance(resource, string_types)
@staticmethod
def _validate_condition_syntax(statement):
if "Condition" in statement:
assert isinstance(statement["Condition"], dict)
for condition_key, condition_value in statement["Condition"].items():
assert isinstance(condition_value, dict)
for condition_element_key, condition_element_value in condition_value.items():
assert isinstance(condition_element_value, (list, string_types))
if IAMPolicyDocumentValidator._strip_condition_key(condition_key) not in VALID_CONDITIONS:
assert not condition_value # empty dict
@staticmethod
def _strip_condition_key(condition_key):
for valid_prefix in VALID_CONDITION_PREFIXES:
if condition_key.startswith(valid_prefix):
condition_key = condition_key[len(valid_prefix):]
break # strip only the first match
for valid_postfix in VALID_CONDITION_POSTFIXES:
if condition_key.endswith(valid_postfix):
condition_key = condition_key[:-len(valid_postfix)]
break # strip only the first match
return condition_key
@staticmethod
def _validate_sid_syntax(statement):
if "Sid" in statement:
assert isinstance(statement["Sid"], string_types)
def _validate_id_syntax(self):
if "Id" in self._policy_json:
assert isinstance(self._policy_json["Id"], string_types)
def _validate_resource_exist(self):
for statement in self._statements:
assert ("Resource" in statement or "NotResource" in statement)
if "Resource" in statement and isinstance(statement["Resource"], list):
assert statement["Resource"]
elif "NotResource" in statement and isinstance(statement["NotResource"], list):
assert statement["NotResource"]
def _validate_action_like_exist(self):
for statement in self._statements:
assert ("Action" in statement or "NotAction" in statement)
if "Action" in statement and isinstance(statement["Action"], list):
assert statement["Action"]
elif "NotAction" in statement and isinstance(statement["NotAction"], list):
assert statement["NotAction"]
def _validate_actions_for_prefixes(self):
self._validate_action_like_for_prefixes("Action")
def _validate_not_actions_for_prefixes(self):
self._validate_action_like_for_prefixes("NotAction")
def _validate_action_like_for_prefixes(self, key):
for statement in self._statements:
if key in statement:
if isinstance(statement[key], string_types):
self._validate_action_prefix(statement[key])
else:
for action in statement[key]:
self._validate_action_prefix(action)
@staticmethod
def _validate_action_prefix(action):
action_parts = action.split(":")
if len(action_parts) == 1 and action_parts[0] != "*":
raise MalformedPolicyDocument("Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.")
elif len(action_parts) > 2:
raise MalformedPolicyDocument("Actions/Condition can contain only one colon.")
vendor_pattern = re.compile(r'[^a-zA-Z0-9\-.]')
if action_parts[0] != "*" and vendor_pattern.search(action_parts[0]):
raise MalformedPolicyDocument("Vendor {vendor} is not valid".format(vendor=action_parts[0]))
def _validate_resources_for_formats(self):
self._validate_resource_like_for_formats("Resource")
def _validate_not_resources_for_formats(self):
self._validate_resource_like_for_formats("NotResource")
def _validate_resource_like_for_formats(self, key):
for statement in self._statements:
if key in statement:
if isinstance(statement[key], string_types):
self._validate_resource_format(statement[key])
else:
for resource in sorted(statement[key], reverse=True):
self._validate_resource_format(resource)
if self._resource_error == "":
IAMPolicyDocumentValidator._legacy_parse_resource_like(statement, key)
def _validate_resource_format(self, resource):
if resource != "*":
resource_partitions = resource.partition(":")
if resource_partitions[1] == "":
self._resource_error = 'Resource {resource} must be in ARN format or "*".'.format(resource=resource)
return
resource_partitions = resource_partitions[2].partition(":")
if resource_partitions[0] != "aws":
remaining_resource_parts = resource_partitions[2].split(":")
arn1 = remaining_resource_parts[0] if remaining_resource_parts[0] != "" or len(remaining_resource_parts) > 1 else "*"
arn2 = remaining_resource_parts[1] if len(remaining_resource_parts) > 1 else "*"
arn3 = remaining_resource_parts[2] if len(remaining_resource_parts) > 2 else "*"
arn4 = ":".join(remaining_resource_parts[3:]) if len(remaining_resource_parts) > 3 else "*"
self._resource_error = 'Partition "{partition}" is not valid for resource "arn:{partition}:{arn1}:{arn2}:{arn3}:{arn4}".'.format(
partition=resource_partitions[0],
arn1=arn1,
arn2=arn2,
arn3=arn3,
arn4=arn4
)
return
if resource_partitions[1] != ":":
self._resource_error = "Resource vendor must be fully qualified and cannot contain regexes."
return
resource_partitions = resource_partitions[2].partition(":")
service = resource_partitions[0]
if service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() and not resource_partitions[2].startswith(":"):
self._resource_error = SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[service].format(resource=resource)
return
resource_partitions = resource_partitions[2].partition(":")
resource_partitions = resource_partitions[2].partition(":")
if service in VALID_RESOURCE_PATH_STARTING_VALUES.keys():
valid_start = False
for valid_starting_value in VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]:
if resource_partitions[2].startswith(valid_starting_value):
valid_start = True
break
if not valid_start:
self._resource_error = VALID_RESOURCE_PATH_STARTING_VALUES[service]["error_message"].format(
values=", ".join(VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"])
)
def _perform_first_legacy_parsing(self):
"""This method excludes legacy parsing resources, since that have to be done later."""
for statement in self._statements:
self._legacy_parse_statement(statement)
@staticmethod
def _legacy_parse_statement(statement):
assert statement["Effect"] in VALID_EFFECTS # case-sensitive matching
if "Condition" in statement:
for condition_key, condition_value in statement["Condition"].items():
IAMPolicyDocumentValidator._legacy_parse_condition(condition_key, condition_value)
@staticmethod
def _legacy_parse_resource_like(statement, key):
if isinstance(statement[key], string_types):
if statement[key] != "*":
assert statement[key].count(":") >= 5 or "::" not in statement[key]
assert statement[key].split(":")[2] != ""
else: # list
for resource in statement[key]:
if resource != "*":
assert resource.count(":") >= 5 or "::" not in resource
assert resource[2] != ""
@staticmethod
def _legacy_parse_condition(condition_key, condition_value):
stripped_condition_key = IAMPolicyDocumentValidator._strip_condition_key(condition_key)
if stripped_condition_key.startswith("Date"):
for condition_element_key, condition_element_value in condition_value.items():
if isinstance(condition_element_value, string_types):
IAMPolicyDocumentValidator._legacy_parse_date_condition_value(condition_element_value)
else: # it has to be a list
for date_condition_value in condition_element_value:
IAMPolicyDocumentValidator._legacy_parse_date_condition_value(date_condition_value)
@staticmethod
def _legacy_parse_date_condition_value(date_condition_value):
if "t" in date_condition_value.lower() or "-" in date_condition_value:
IAMPolicyDocumentValidator._validate_iso_8601_datetime(date_condition_value.lower())
else: # timestamp
assert 0 <= int(date_condition_value) <= 9223372036854775807
@staticmethod
def _validate_iso_8601_datetime(datetime):
datetime_parts = datetime.partition("t")
negative_year = datetime_parts[0].startswith("-")
date_parts = datetime_parts[0][1:].split("-") if negative_year else datetime_parts[0].split("-")
year = "-" + date_parts[0] if negative_year else date_parts[0]
assert -292275054 <= int(year) <= 292278993
if len(date_parts) > 1:
month = date_parts[1]
assert 1 <= int(month) <= 12
if len(date_parts) > 2:
day = date_parts[2]
assert 1 <= int(day) <= 31
assert len(date_parts) < 4
time_parts = datetime_parts[2].split(":")
if time_parts[0] != "":
hours = time_parts[0]
assert 0 <= int(hours) <= 23
if len(time_parts) > 1:
minutes = time_parts[1]
assert 0 <= int(minutes) <= 59
if len(time_parts) > 2:
if "z" in time_parts[2]:
seconds_with_decimal_fraction = time_parts[2].partition("z")[0]
assert time_parts[2].partition("z")[2] == ""
elif "+" in time_parts[2]:
seconds_with_decimal_fraction = time_parts[2].partition("+")[0]
time_zone_data = time_parts[2].partition("+")[2].partition(":")
time_zone_hours = time_zone_data[0]
assert len(time_zone_hours) == 2
assert 0 <= int(time_zone_hours) <= 23
if time_zone_data[1] == ":":
time_zone_minutes = time_zone_data[2]
assert len(time_zone_minutes) == 2
assert 0 <= int(time_zone_minutes) <= 59
else:
seconds_with_decimal_fraction = time_parts[2]
seconds_with_decimal_fraction_partition = seconds_with_decimal_fraction.partition(".")
seconds = seconds_with_decimal_fraction_partition[0]
assert 0 <= int(seconds) <= 59
if seconds_with_decimal_fraction_partition[1] == ".":
decimal_seconds = seconds_with_decimal_fraction_partition[2]
assert 0 <= int(decimal_seconds) <= 999999999

View File

@ -107,14 +107,79 @@ class IamResponse(BaseResponse):
template = self.response_template(LIST_POLICIES_TEMPLATE)
return template.render(policies=policies, marker=marker)
def list_entities_for_policy(self):
policy_arn = self._get_param('PolicyArn')
# Options 'User'|'Role'|'Group'|'LocalManagedPolicy'|'AWSManagedPolicy
entity = self._get_param('EntityFilter')
path_prefix = self._get_param('PathPrefix')
# policy_usage_filter = self._get_param('PolicyUsageFilter')
marker = self._get_param('Marker')
max_items = self._get_param('MaxItems')
entity_roles = []
entity_groups = []
entity_users = []
if entity == 'User':
users = iam_backend.list_users(path_prefix, marker, max_items)
if users:
for user in users:
for p in user.managed_policies:
if p == policy_arn:
entity_users.append(user.name)
elif entity == 'Role':
roles = iam_backend.list_roles(path_prefix, marker, max_items)
if roles:
for role in roles:
for p in role.managed_policies:
if p == policy_arn:
entity_roles.append(role.name)
elif entity == 'Group':
groups = iam_backend.list_groups()
if groups:
for group in groups:
for p in group.managed_policies:
if p == policy_arn:
entity_groups.append(group.name)
elif entity == 'LocalManagedPolicy' or entity == 'AWSManagedPolicy':
users = iam_backend.list_users(path_prefix, marker, max_items)
if users:
for user in users:
for p in user.managed_policies:
if p == policy_arn:
entity_users.append(user.name)
roles = iam_backend.list_roles(path_prefix, marker, max_items)
if roles:
for role in roles:
for p in role.managed_policies:
if p == policy_arn:
entity_roles.append(role.name)
groups = iam_backend.list_groups()
if groups:
for group in groups:
for p in group.managed_policies:
if p == policy_arn:
entity_groups.append(group.name)
template = self.response_template(LIST_ENTITIES_FOR_POLICY_TEMPLATE)
return template.render(roles=entity_roles, users=entity_users, groups=entity_groups)
def create_role(self):
role_name = self._get_param('RoleName')
path = self._get_param('Path')
assume_role_policy_document = self._get_param(
'AssumeRolePolicyDocument')
permissions_boundary = self._get_param(
'PermissionsBoundary')
role = iam_backend.create_role(
role_name, assume_role_policy_document, path)
role_name, assume_role_policy_document, path, permissions_boundary)
template = self.response_template(CREATE_ROLE_TEMPLATE)
return template.render(role=role)
@ -169,6 +234,20 @@ class IamResponse(BaseResponse):
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name="UpdateAssumeRolePolicyResponse")
def update_role_description(self):
role_name = self._get_param('RoleName')
description = self._get_param('Description')
role = iam_backend.update_role_description(role_name, description)
template = self.response_template(UPDATE_ROLE_DESCRIPTION_TEMPLATE)
return template.render(role=role)
def update_role(self):
role_name = self._get_param('RoleName')
description = self._get_param('Description')
role = iam_backend.update_role(role_name, description)
template = self.response_template(UPDATE_ROLE_TEMPLATE)
return template.render(role=role)
def create_policy_version(self):
policy_arn = self._get_param('PolicyArn')
policy_document = self._get_param('PolicyDocument')
@ -363,6 +442,18 @@ class IamResponse(BaseResponse):
template = self.response_template(LIST_USERS_TEMPLATE)
return template.render(action='List', users=users)
def update_user(self):
user_name = self._get_param('UserName')
new_path = self._get_param('NewPath')
new_user_name = self._get_param('NewUserName')
iam_backend.update_user(user_name, new_path, new_user_name)
if new_user_name:
user = iam_backend.get_user(new_user_name)
else:
user = iam_backend.get_user(user_name)
template = self.response_template(USER_TEMPLATE)
return template.render(action='Update', user=user)
def create_login_profile(self):
user_name = self._get_param('UserName')
password = self._get_param('Password')
@ -554,7 +645,8 @@ class IamResponse(BaseResponse):
policies=account_details['managed_policies'],
users=account_details['users'],
groups=account_details['groups'],
roles=account_details['roles']
roles=account_details['roles'],
get_groups_for_user=iam_backend.get_groups_for_user
)
def create_saml_provider(self):
@ -625,6 +717,65 @@ class IamResponse(BaseResponse):
template = self.response_template(LIST_SIGNING_CERTIFICATES_TEMPLATE)
return template.render(user_name=user_name, certificates=certs)
def list_role_tags(self):
role_name = self._get_param('RoleName')
marker = self._get_param('Marker')
max_items = self._get_param('MaxItems', 100)
tags, marker = iam_backend.list_role_tags(role_name, marker, max_items)
template = self.response_template(LIST_ROLE_TAG_TEMPLATE)
return template.render(tags=tags, marker=marker)
def tag_role(self):
role_name = self._get_param('RoleName')
tags = self._get_multi_param('Tags.member')
iam_backend.tag_role(role_name, tags)
template = self.response_template(TAG_ROLE_TEMPLATE)
return template.render()
def untag_role(self):
role_name = self._get_param('RoleName')
tag_keys = self._get_multi_param('TagKeys.member')
iam_backend.untag_role(role_name, tag_keys)
template = self.response_template(UNTAG_ROLE_TEMPLATE)
return template.render()
LIST_ENTITIES_FOR_POLICY_TEMPLATE = """<ListEntitiesForPolicyResponse>
<ListEntitiesForPolicyResult>
<PolicyRoles>
{% for role in roles %}
<member>
<RoleName>{{ role }}</RoleName>
</member>
{% endfor %}
</PolicyRoles>
<PolicyGroups>
{% for group in groups %}
<member>
<GroupName>{{ group }}</GroupName>
</member>
{% endfor %}
</PolicyGroups>
<IsTruncated>false</IsTruncated>
<PolicyUsers>
{% for user in users %}
<member>
<UserName>{{ user }}</UserName>
</member>
{% endfor %}
</PolicyUsers>
</ListEntitiesForPolicyResult>
<ResponseMetadata>
<RequestId>eb358e22-9d1f-11e4-93eb-190ecEXAMPLE</RequestId>
</ResponseMetadata>
</ListEntitiesForPolicyResponse>"""
ATTACH_ROLE_POLICY_TEMPLATE = """<AttachRolePolicyResponse>
<ResponseMetadata>
@ -667,12 +818,12 @@ CREATE_POLICY_TEMPLATE = """<CreatePolicyResponse>
<Policy>
<Arn>{{ policy.arn }}</Arn>
<AttachmentCount>{{ policy.attachment_count }}</AttachmentCount>
<CreateDate>{{ policy.create_datetime.isoformat() }}</CreateDate>
<CreateDate>{{ policy.created_iso_8601 }}</CreateDate>
<DefaultVersionId>{{ policy.default_version_id }}</DefaultVersionId>
<Path>{{ policy.path }}</Path>
<PolicyId>{{ policy.id }}</PolicyId>
<PolicyName>{{ policy.name }}</PolicyName>
<UpdateDate>{{ policy.update_datetime.isoformat() }}</UpdateDate>
<UpdateDate>{{ policy.updated_iso_8601 }}</UpdateDate>
</Policy>
</CreatePolicyResult>
<ResponseMetadata>
@ -690,8 +841,8 @@ GET_POLICY_TEMPLATE = """<GetPolicyResponse>
<Path>{{ policy.path }}</Path>
<Arn>{{ policy.arn }}</Arn>
<AttachmentCount>{{ policy.attachment_count }}</AttachmentCount>
<CreateDate>{{ policy.create_datetime.isoformat() }}</CreateDate>
<UpdateDate>{{ policy.update_datetime.isoformat() }}</UpdateDate>
<CreateDate>{{ policy.created_iso_8601 }}</CreateDate>
<UpdateDate>{{ policy.updated_iso_8601 }}</UpdateDate>
</Policy>
</GetPolicyResult>
<ResponseMetadata>
@ -778,12 +929,12 @@ LIST_POLICIES_TEMPLATE = """<ListPoliciesResponse>
<member>
<Arn>{{ policy.arn }}</Arn>
<AttachmentCount>{{ policy.attachment_count }}</AttachmentCount>
<CreateDate>{{ policy.create_datetime.isoformat() }}</CreateDate>
<CreateDate>{{ policy.created_iso_8601 }}</CreateDate>
<DefaultVersionId>{{ policy.default_version_id }}</DefaultVersionId>
<Path>{{ policy.path }}</Path>
<PolicyId>{{ policy.id }}</PolicyId>
<PolicyName>{{ policy.name }}</PolicyName>
<UpdateDate>{{ policy.update_datetime.isoformat() }}</UpdateDate>
<UpdateDate>{{ policy.updated_iso_8601 }}</UpdateDate>
</member>
{% endfor %}
</Policies>
@ -807,7 +958,7 @@ CREATE_INSTANCE_PROFILE_TEMPLATE = """<CreateInstanceProfileResponse xmlns="http
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
<Path>{{ profile.path }}</Path>
<Arn>{{ profile.arn }}</Arn>
<CreateDate>{{ profile.create_date }}</CreateDate>
<CreateDate>{{ profile.created_iso_8601 }}</CreateDate>
</InstanceProfile>
</CreateInstanceProfileResult>
<ResponseMetadata>
@ -826,7 +977,7 @@ GET_INSTANCE_PROFILE_TEMPLATE = """<GetInstanceProfileResponse xmlns="https://ia
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
@ -834,7 +985,7 @@ GET_INSTANCE_PROFILE_TEMPLATE = """<GetInstanceProfileResponse xmlns="https://ia
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
<Path>{{ profile.path }}</Path>
<Arn>{{ profile.arn }}</Arn>
<CreateDate>{{ profile.create_date }}</CreateDate>
<CreateDate>{{ profile.created_iso_8601 }}</CreateDate>
</InstanceProfile>
</GetInstanceProfileResult>
<ResponseMetadata>
@ -849,8 +1000,14 @@ CREATE_ROLE_TEMPLATE = """<CreateRoleResponse xmlns="https://iam.amazonaws.com/d
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.permissions_boundary %}
<PermissionsBoundary>
<PermissionsBoundaryType>PermissionsBoundaryPolicy</PermissionsBoundaryType>
<PermissionsBoundaryArn>{{ role.permissions_boundary }}</PermissionsBoundaryArn>
</PermissionsBoundary>
{% endif %}
</Role>
</CreateRoleResult>
<ResponseMetadata>
@ -869,6 +1026,40 @@ GET_ROLE_POLICY_TEMPLATE = """<GetRolePolicyResponse xmlns="https://iam.amazonaw
</ResponseMetadata>
</GetRolePolicyResponse>"""
UPDATE_ROLE_TEMPLATE = """<UpdateRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<UpdateRoleResult>
</UpdateRoleResult>
<ResponseMetadata>
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
</ResponseMetadata>
</UpdateRoleResponse>"""
UPDATE_ROLE_DESCRIPTION_TEMPLATE = """<UpdateRoleDescriptionResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<UpdateRoleDescriptionResult>
<Role>
<Path>{{ role.path }}</Path>
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.tags %}
<Tags>
{% for tag in role.get_tags() %}
<member>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</member>
{% endfor %}
</Tags>
{% endif %}
</Role>
</UpdateRoleDescriptionResult>
<ResponseMetadata>
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
</ResponseMetadata>
</UpdateRoleDescriptionResponse>"""
GET_ROLE_TEMPLATE = """<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetRoleResult>
<Role>
@ -876,8 +1067,18 @@ GET_ROLE_TEMPLATE = """<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/201
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.tags %}
<Tags>
{% for tag in role.get_tags() %}
<member>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</member>
{% endfor %}
</Tags>
{% endif %}
</Role>
</GetRoleResult>
<ResponseMetadata>
@ -907,8 +1108,14 @@ LIST_ROLES_TEMPLATE = """<ListRolesResponse xmlns="https://iam.amazonaws.com/doc
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
{% if role.permissions_boundary %}
<PermissionsBoundary>
<PermissionsBoundaryType>PermissionsBoundaryPolicy</PermissionsBoundaryType>
<PermissionsBoundaryArn>{{ role.permissions_boundary }}</PermissionsBoundaryArn>
</PermissionsBoundary>
{% endif %}
</member>
{% endfor %}
</Roles>
@ -937,8 +1144,8 @@ CREATE_POLICY_VERSION_TEMPLATE = """<CreatePolicyVersionResponse xmlns="https://
<PolicyVersion>
<Document>{{ policy_version.document }}</Document>
<VersionId>{{ policy_version.version_id }}</VersionId>
<IsDefaultVersion>{{ policy_version.is_default }}</IsDefaultVersion>
<CreateDate>{{ policy_version.create_datetime }}</CreateDate>
<IsDefaultVersion>{{ policy_version.is_default | lower }}</IsDefaultVersion>
<CreateDate>{{ policy_version.created_iso_8601 }}</CreateDate>
</PolicyVersion>
</CreatePolicyVersionResult>
<ResponseMetadata>
@ -951,8 +1158,8 @@ GET_POLICY_VERSION_TEMPLATE = """<GetPolicyVersionResponse xmlns="https://iam.am
<PolicyVersion>
<Document>{{ policy_version.document }}</Document>
<VersionId>{{ policy_version.version_id }}</VersionId>
<IsDefaultVersion>{{ policy_version.is_default }}</IsDefaultVersion>
<CreateDate>{{ policy_version.create_datetime }}</CreateDate>
<IsDefaultVersion>{{ policy_version.is_default | lower }}</IsDefaultVersion>
<CreateDate>{{ policy_version.created_iso_8601 }}</CreateDate>
</PolicyVersion>
</GetPolicyVersionResult>
<ResponseMetadata>
@ -968,8 +1175,8 @@ LIST_POLICY_VERSIONS_TEMPLATE = """<ListPolicyVersionsResponse xmlns="https://ia
<member>
<Document>{{ policy_version.document }}</Document>
<VersionId>{{ policy_version.version_id }}</VersionId>
<IsDefaultVersion>{{ policy_version.is_default }}</IsDefaultVersion>
<CreateDate>{{ policy_version.create_datetime }}</CreateDate>
<IsDefaultVersion>{{ policy_version.is_default | lower }}</IsDefaultVersion>
<CreateDate>{{ policy_version.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</Versions>
@ -993,7 +1200,7 @@ LIST_INSTANCE_PROFILES_TEMPLATE = """<ListInstanceProfilesResponse xmlns="https:
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
@ -1001,7 +1208,7 @@ LIST_INSTANCE_PROFILES_TEMPLATE = """<ListInstanceProfilesResponse xmlns="https:
<InstanceProfileName>{{ instance.name }}</InstanceProfileName>
<Path>{{ instance.path }}</Path>
<Arn>{{ instance.arn }}</Arn>
<CreateDate>{{ instance.create_date }}</CreateDate>
<CreateDate>{{ instance.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</InstanceProfiles>
@ -1080,7 +1287,7 @@ CREATE_GROUP_TEMPLATE = """<CreateGroupResponse>
<GroupName>{{ group.name }}</GroupName>
<GroupId>{{ group.id }}</GroupId>
<Arn>{{ group.arn }}</Arn>
<CreateDate>{{ group.create_date }}</CreateDate>
<CreateDate>{{ group.created_iso_8601 }}</CreateDate>
</Group>
</CreateGroupResult>
<ResponseMetadata>
@ -1095,7 +1302,7 @@ GET_GROUP_TEMPLATE = """<GetGroupResponse>
<GroupName>{{ group.name }}</GroupName>
<GroupId>{{ group.id }}</GroupId>
<Arn>{{ group.arn }}</Arn>
<CreateDate>{{ group.create_date }}</CreateDate>
<CreateDate>{{ group.created_iso_8601 }}</CreateDate>
</Group>
<Users>
{% for user in group.users %}
@ -1286,6 +1493,7 @@ CREATE_ACCESS_KEY_TEMPLATE = """<CreateAccessKeyResponse>
<AccessKeyId>{{ key.access_key_id }}</AccessKeyId>
<Status>{{ key.status }}</Status>
<SecretAccessKey>{{ key.secret_access_key }}</SecretAccessKey>
<CreateDate>{{ key.created_iso_8601 }}</CreateDate>
</AccessKey>
</CreateAccessKeyResult>
<ResponseMetadata>
@ -1302,7 +1510,7 @@ LIST_ACCESS_KEYS_TEMPLATE = """<ListAccessKeysResponse>
<UserName>{{ user_name }}</UserName>
<AccessKeyId>{{ key.access_key_id }}</AccessKeyId>
<Status>{{ key.status }}</Status>
<CreateDate>{{ key.create_date }}</CreateDate>
<CreateDate>{{ key.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</AccessKeyMetadata>
@ -1370,7 +1578,7 @@ LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE = """<ListInstanceProfilesForRoleRespon
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
@ -1378,7 +1586,7 @@ LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE = """<ListInstanceProfilesForRoleRespon
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
<Path>{{ profile.path }}</Path>
<Arn>{{ profile.arn }}</Arn>
<CreateDate>{{ profile.create_date }}</CreateDate>
<CreateDate>{{ profile.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</InstanceProfiles>
@ -1461,8 +1669,19 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
<UserDetailList>
{% for user in users %}
<member>
<GroupList />
<AttachedManagedPolicies/>
<GroupList>
{% for group in get_groups_for_user(user.name) %}
<member>{{ group.name }}</member>
{% endfor %}
</GroupList>
<AttachedManagedPolicies>
{% for policy in user.managed_policies %}
<member>
<PolicyName>{{ user.managed_policies[policy].name }}</PolicyName>
<PolicyArn>{{ policy }}</PolicyArn>
</member>
{% endfor %}
</AttachedManagedPolicies>
<UserId>{{ user.id }}</UserId>
<Path>{{ user.path }}</Path>
<UserName>{{ user.name }}</UserName>
@ -1476,33 +1695,55 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
<member>
<GroupId>{{ group.id }}</GroupId>
<AttachedManagedPolicies>
{% for policy in group.managed_policies %}
{% for policy_arn in group.managed_policies %}
<member>
<PolicyName>{{ policy.name }}</PolicyName>
<PolicyArn>{{ policy.arn }}</PolicyArn>
<PolicyName>{{ group.managed_policies[policy_arn].name }}</PolicyName>
<PolicyArn>{{ policy_arn }}</PolicyArn>
</member>
{% endfor %}
</AttachedManagedPolicies>
<GroupName>{{ group.name }}</GroupName>
<Path>{{ group.path }}</Path>
<Arn>{{ group.arn }}</Arn>
<CreateDate>{{ group.create_date }}</CreateDate>
<GroupPolicyList/>
<CreateDate>{{ group.created_iso_8601 }}</CreateDate>
<GroupPolicyList>
{% for policy in group.policies %}
<member>
<PolicyName>{{ policy }}</PolicyName>
<PolicyDocument>{{ group.get_policy(policy) }}</PolicyDocument>
</member>
{% endfor %}
</GroupPolicyList>
</member>
{% endfor %}
</GroupDetailList>
<RoleDetailList>
{% for role in roles %}
<member>
<RolePolicyList/>
<AttachedManagedPolicies>
{% for policy in role.managed_policies %}
<RolePolicyList>
{% for inline_policy in role.policies %}
<member>
<PolicyName>{{ policy.name }}</PolicyName>
<PolicyArn>{{ policy.arn }}</PolicyArn>
<PolicyName>{{ inline_policy }}</PolicyName>
<PolicyDocument>{{ role.policies[inline_policy] }}</PolicyDocument>
</member>
{% endfor %}
</RolePolicyList>
<AttachedManagedPolicies>
{% for policy_arn in role.managed_policies %}
<member>
<PolicyName>{{ role.managed_policies[policy_arn].name }}</PolicyName>
<PolicyArn>{{ policy_arn }}</PolicyArn>
</member>
{% endfor %}
</AttachedManagedPolicies>
<Tags>
{% for tag in role.get_tags() %}
<member>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</member>
{% endfor %}
</Tags>
<InstanceProfileList>
{% for profile in instance_profiles %}
<member>
@ -1514,7 +1755,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
@ -1522,7 +1763,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
<Path>{{ profile.path }}</Path>
<Arn>{{ profile.arn }}</Arn>
<CreateDate>{{ profile.create_date }}</CreateDate>
<CreateDate>{{ profile.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</InstanceProfileList>
@ -1530,7 +1771,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
<Arn>{{ role.arn }}</Arn>
<RoleName>{{ role.name }}</RoleName>
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
<CreateDate>{{ role.create_date }}</CreateDate>
<CreateDate>{{ role.created_iso_8601 }}</CreateDate>
<RoleId>{{ role.id }}</RoleId>
</member>
{% endfor %}
@ -1543,25 +1784,20 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
<PolicyId>{{ policy.id }}</PolicyId>
<Path>{{ policy.path }}</Path>
<PolicyVersionList>
{% for policy_version in policy.versions %}
<member>
<Document>
{"Version":"2012-10-17","Statement":{"Effect":"Allow",
"Action":["iam:CreatePolicy","iam:CreatePolicyVersion",
"iam:DeletePolicy","iam:DeletePolicyVersion","iam:GetPolicy",
"iam:GetPolicyVersion","iam:ListPolicies",
"iam:ListPolicyVersions","iam:SetDefaultPolicyVersion"],
"Resource":"*"}}
</Document>
<IsDefaultVersion>true</IsDefaultVersion>
<VersionId>v1</VersionId>
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
<Document>{{ policy_version.document }}</Document>
<IsDefaultVersion>{{ policy_version.is_default | lower }}</IsDefaultVersion>
<VersionId>{{ policy_version.version_id }}</VersionId>
<CreateDate>{{ policy_version.created_iso_8601 }}</CreateDate>
</member>
{% endfor %}
</PolicyVersionList>
<Arn>{{ policy.arn }}</Arn>
<AttachmentCount>1</AttachmentCount>
<CreateDate>{{ policy.create_datetime }}</CreateDate>
<CreateDate>{{ policy.created_iso_8601 }}</CreateDate>
<IsAttachable>true</IsAttachable>
<UpdateDate>{{ policy.update_datetime }}</UpdateDate>
<UpdateDate>{{ policy.updated_iso_8601 }}</UpdateDate>
</member>
{% endfor %}
</Policies>
@ -1671,3 +1907,38 @@ LIST_SIGNING_CERTIFICATES_TEMPLATE = """<ListSigningCertificatesResponse>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</ListSigningCertificatesResponse>"""
TAG_ROLE_TEMPLATE = """<TagRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
</ResponseMetadata>
</TagRoleResponse>"""
LIST_ROLE_TAG_TEMPLATE = """<ListRoleTagsResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ListRoleTagsResult>
<IsTruncated>{{ 'true' if marker else 'false' }}</IsTruncated>
{% if marker %}
<Marker>{{ marker }}</Marker>
{% endif %}
<Tags>
{% for tag in tags %}
<member>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</member>
{% endfor %}
</Tags>
</ListRoleTagsResult>
<ResponseMetadata>
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
</ResponseMetadata>
</ListRoleTagsResponse>"""
UNTAG_ROLE_TEMPLATE = """<UntagRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
</ResponseMetadata>
</UntagRoleResponse>"""

View File

@ -7,7 +7,7 @@ import six
def random_alphanumeric(length):
return ''.join(six.text_type(
random.choice(
string.ascii_letters + string.digits
string.ascii_letters + string.digits + "+" + "/"
)) for _ in range(length)
)

View File

@ -96,7 +96,7 @@ class FakeThingGroup(BaseModel):
class FakeCertificate(BaseModel):
def __init__(self, certificate_pem, status, region_name):
def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None):
m = hashlib.sha256()
m.update(str(uuid.uuid4()).encode('utf-8'))
self.certificate_id = m.hexdigest()
@ -109,12 +109,18 @@ class FakeCertificate(BaseModel):
self.transfer_data = {}
self.creation_date = time.time()
self.last_modified_date = self.creation_date
self.ca_certificate_id = None
self.ca_certificate_pem = ca_certificate_pem
if ca_certificate_pem:
m.update(str(uuid.uuid4()).encode('utf-8'))
self.ca_certificate_id = m.hexdigest()
def to_dict(self):
return {
'certificateArn': self.arn,
'certificateId': self.certificate_id,
'caCertificateId': self.ca_certificate_id,
'status': self.status,
'creationDate': self.creation_date
}
@ -410,6 +416,12 @@ class IoTBackend(BaseBackend):
def list_certificates(self):
return self.certificates.values()
def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status):
certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status,
self.region_name, ca_certificate_pem)
self.certificates[certificate.certificate_id] = certificate
return certificate
def update_certificate(self, certificate_id, new_status):
cert = self.describe_certificate(certificate_id)
# TODO: validate new_status

View File

@ -183,6 +183,20 @@ class IoTResponse(BaseResponse):
# TODO: implement pagination in the future
return json.dumps(dict(certificates=[_.to_dict() for _ in certificates]))
def register_certificate(self):
certificate_pem = self._get_param("certificatePem")
ca_certificate_pem = self._get_param("caCertificatePem")
set_as_active = self._get_bool_param("setAsActive")
status = self._get_param("status")
cert = self.iot_backend.register_certificate(
certificate_pem=certificate_pem,
ca_certificate_pem=ca_certificate_pem,
set_as_active=set_as_active,
status=status
)
return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn))
def update_certificate(self):
certificate_id = self._get_param("certificateId")
new_status = self._get_param("newStatus")

View File

@ -116,22 +116,19 @@ class Stream(BaseModel):
def __init__(self, stream_name, shard_count, region):
self.stream_name = stream_name
self.shard_count = shard_count
self.creation_datetime = datetime.datetime.now()
self.region = region
self.account_number = "123456789012"
self.shards = {}
self.tags = {}
self.status = "ACTIVE"
if six.PY3:
izip_longest = itertools.zip_longest
else:
izip_longest = itertools.izip_longest
step = 2**128 // shard_count
hash_ranges = itertools.chain(map(lambda i: (i, i * step, (i + 1) * step),
range(shard_count - 1)),
[(shard_count - 1, (shard_count - 1) * step, 2**128)])
for index, start, end in hash_ranges:
for index, start, end in izip_longest(range(shard_count),
range(0, 2**128, 2 **
128 // shard_count),
range(2**128 // shard_count, 2 **
128, 2**128 // shard_count),
fillvalue=2**128):
shard = Shard(index, start, end)
self.shards[shard.shard_id] = shard
@ -183,12 +180,23 @@ class Stream(BaseModel):
"StreamDescription": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": "ACTIVE",
"StreamStatus": self.status,
"HasMoreShards": False,
"Shards": [shard.to_json() for shard in self.shards.values()],
}
}
def to_json_summary(self):
return {
"StreamDescriptionSummary": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": self.status,
"StreamCreationTimestamp": six.text_type(self.creation_datetime),
"OpenShardCount": self.shard_count,
}
}
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
@ -309,6 +317,9 @@ class KinesisBackend(BaseBackend):
else:
raise StreamNotFoundError(stream_name)
def describe_stream_summary(self, stream_name):
return self.describe_stream(stream_name)
def list_streams(self):
return self.streams.values()

View File

@ -33,6 +33,11 @@ class KinesisResponse(BaseResponse):
stream = self.kinesis_backend.describe_stream(stream_name)
return json.dumps(stream.to_json())
def describe_stream_summary(self):
stream_name = self.parameters.get('StreamName')
stream = self.kinesis_backend.describe_stream_summary(stream_name)
return json.dumps(stream.to_json_summary())
def list_streams(self):
streams = self.kinesis_backend.list_streams()
stream_names = [stream.stream_name for stream in streams]

View File

@ -1,8 +1,19 @@
import sys
import base64
from .exceptions import InvalidArgumentError
if sys.version_info[0] == 2:
encode_method = base64.encodestring
decode_method = base64.decodestring
elif sys.version_info[0] == 3:
encode_method = base64.encodebytes
decode_method = base64.decodebytes
else:
raise Exception("Python version is not supported")
def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number,
at_timestamp):
if shard_iterator_type == "AT_SEQUENCE_NUMBER":
@ -22,7 +33,7 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting
def compose_shard_iterator(stream_name, shard, last_sequence_id):
return base64.encodestring(
return encode_method(
"{0}:{1}:{2}".format(
stream_name,
shard.shard_id,
@ -32,4 +43,4 @@ def compose_shard_iterator(stream_name, shard, last_sequence_id):
def decompose_shard_iterator(shard_iterator):
return base64.decodestring(shard_iterator.encode("utf-8")).decode("utf-8").split(":")
return decode_method(shard_iterator.encode("utf-8")).decode("utf-8").split(":")

36
moto/kms/exceptions.py Normal file
View File

@ -0,0 +1,36 @@
from __future__ import unicode_literals
from moto.core.exceptions import JsonRESTError
class NotFoundException(JsonRESTError):
code = 400
def __init__(self, message):
super(NotFoundException, self).__init__(
"NotFoundException", message)
class ValidationException(JsonRESTError):
code = 400
def __init__(self, message):
super(ValidationException, self).__init__(
"ValidationException", message)
class AlreadyExistsException(JsonRESTError):
code = 400
def __init__(self, message):
super(AlreadyExistsException, self).__init__(
"AlreadyExistsException", message)
class NotAuthorizedException(JsonRESTError):
code = 400
def __init__(self):
super(NotAuthorizedException, self).__init__(
"NotAuthorizedException", None)
self.description = '{"__type":"NotAuthorizedException"}'

View File

@ -1,8 +1,9 @@
from __future__ import unicode_literals
import os
import boto.kms
from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_without_milliseconds
from moto.core.utils import iso_8601_datetime_without_milliseconds, unix_time
from .utils import generate_key_id
from collections import defaultdict
from datetime import datetime, timedelta
@ -21,6 +22,7 @@ class Key(BaseModel):
self.account_id = "0123456789012"
self.key_rotation_status = False
self.deletion_date = None
self.tags = {}
@property
def physical_resource_id(self):
@ -35,7 +37,7 @@ class Key(BaseModel):
"KeyMetadata": {
"AWSAccountId": self.account_id,
"Arn": self.arn,
"CreationDate": "2015-01-01 00:00:00",
"CreationDate": "%d" % unix_time(),
"Description": self.description,
"Enabled": self.enabled,
"KeyId": self.id,
@ -63,7 +65,6 @@ class Key(BaseModel):
)
key.key_rotation_status = properties['EnableKeyRotation']
key.enabled = properties['Enabled']
return key
def get_cfn_attribute(self, attribute_name):
@ -84,6 +85,18 @@ class KmsBackend(BaseBackend):
self.keys[key.id] = key
return key
def update_key_description(self, key_id, description):
key = self.keys[self.get_key_id(key_id)]
key.description = description
def tag_resource(self, key_id, tags):
key = self.keys[self.get_key_id(key_id)]
key.tags = tags
def list_resource_tags(self, key_id):
key = self.keys[self.get_key_id(key_id)]
return key.tags
def delete_key(self, key_id):
if key_id in self.keys:
if key_id in self.key_to_aliases:
@ -147,28 +160,39 @@ class KmsBackend(BaseBackend):
return self.keys[self.get_key_id(key_id)].policy
def disable_key(self, key_id):
if key_id in self.keys:
self.keys[key_id].enabled = False
self.keys[key_id].key_state = 'Disabled'
def enable_key(self, key_id):
if key_id in self.keys:
self.keys[key_id].enabled = True
self.keys[key_id].key_state = 'Enabled'
def cancel_key_deletion(self, key_id):
if key_id in self.keys:
self.keys[key_id].key_state = 'Disabled'
self.keys[key_id].deletion_date = None
def schedule_key_deletion(self, key_id, pending_window_in_days):
if key_id in self.keys:
if 7 <= pending_window_in_days <= 30:
self.keys[key_id].enabled = False
self.keys[key_id].key_state = 'PendingDeletion'
self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days)
return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date)
def generate_data_key(self, key_id, encryption_context, number_of_bytes, key_spec, grant_tokens):
key = self.keys[self.get_key_id(key_id)]
if key_spec:
if key_spec == 'AES_128':
bytes = 16
else:
bytes = 32
else:
bytes = number_of_bytes
plaintext = os.urandom(bytes)
return plaintext, key.arn
kms_backends = {}
for region in boto.kms.regions():

View File

@ -5,11 +5,9 @@ import json
import re
import six
from boto.exception import JSONResponseError
from boto.kms.exceptions import AlreadyExistsException, NotFoundException
from moto.core.responses import BaseResponse
from .models import kms_backends
from .exceptions import NotFoundException, ValidationException, AlreadyExistsException, NotAuthorizedException
reserved_aliases = [
'alias/aws/ebs',
@ -38,6 +36,28 @@ class KmsResponse(BaseResponse):
policy, key_usage, description, self.region)
return json.dumps(key.to_dict())
def update_key_description(self):
key_id = self.parameters.get('KeyId')
description = self.parameters.get('Description')
self.kms_backend.update_key_description(key_id, description)
return json.dumps(None)
def tag_resource(self):
key_id = self.parameters.get('KeyId')
tags = self.parameters.get('Tags')
self.kms_backend.tag_resource(key_id, tags)
return json.dumps({})
def list_resource_tags(self):
key_id = self.parameters.get('KeyId')
tags = self.kms_backend.list_resource_tags(key_id)
return json.dumps({
"Tags": tags,
"NextMarker": None,
"Truncated": False,
})
def describe_key(self):
key_id = self.parameters.get('KeyId')
try:
@ -66,36 +86,28 @@ class KmsResponse(BaseResponse):
def create_alias(self):
alias_name = self.parameters['AliasName']
target_key_id = self.parameters['TargetKeyId']
region = self.region
if not alias_name.startswith('alias/'):
raise JSONResponseError(400, 'Bad Request',
body={'message': 'Invalid identifier', '__type': 'ValidationException'})
raise ValidationException('Invalid identifier')
if alias_name in reserved_aliases:
raise JSONResponseError(400, 'Bad Request', body={
'__type': 'NotAuthorizedException'})
raise NotAuthorizedException()
if ':' in alias_name:
raise JSONResponseError(400, 'Bad Request', body={
'message': '{alias_name} contains invalid characters for an alias'.format(**locals()),
'__type': 'ValidationException'})
raise ValidationException('{alias_name} contains invalid characters for an alias'.format(alias_name=alias_name))
if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name):
raise JSONResponseError(400, 'Bad Request', body={
'message': "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$"
.format(**locals()),
'__type': 'ValidationException'})
raise ValidationException("1 validation error detected: Value '{alias_name}' at 'aliasName' "
"failed to satisfy constraint: Member must satisfy regular "
"expression pattern: ^[a-zA-Z0-9:/_-]+$"
.format(alias_name=alias_name))
if self.kms_backend.alias_exists(target_key_id):
raise JSONResponseError(400, 'Bad Request', body={
'message': 'Aliases must refer to keys. Not aliases',
'__type': 'ValidationException'})
raise ValidationException('Aliases must refer to keys. Not aliases')
if self.kms_backend.alias_exists(alias_name):
raise AlreadyExistsException(400, 'Bad Request', body={
'message': 'An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} already exists'
.format(**locals()), '__type': 'AlreadyExistsException'})
raise AlreadyExistsException('An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} '
'already exists'.format(region=self.region, alias_name=alias_name))
self.kms_backend.add_alias(target_key_id, alias_name)
@ -103,16 +115,13 @@ class KmsResponse(BaseResponse):
def delete_alias(self):
alias_name = self.parameters['AliasName']
region = self.region
if not alias_name.startswith('alias/'):
raise JSONResponseError(400, 'Bad Request',
body={'message': 'Invalid identifier', '__type': 'ValidationException'})
raise ValidationException('Invalid identifier')
if not self.kms_backend.alias_exists(alias_name):
raise NotFoundException(400, 'Bad Request', body={
'message': 'Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(**locals()),
'__type': 'NotFoundException'})
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:'
'{alias_name} is not found.'.format(region=self.region, alias_name=alias_name))
self.kms_backend.delete_alias(alias_name)
@ -150,9 +159,8 @@ class KmsResponse(BaseResponse):
try:
self.kms_backend.enable_key_rotation(key_id)
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
@ -162,9 +170,8 @@ class KmsResponse(BaseResponse):
try:
self.kms_backend.disable_key_rotation(key_id)
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def get_key_rotation_status(self):
@ -173,9 +180,8 @@ class KmsResponse(BaseResponse):
try:
rotation_enabled = self.kms_backend.get_key_rotation_status(key_id)
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'KeyRotationEnabled': rotation_enabled})
def put_key_policy(self):
@ -188,9 +194,8 @@ class KmsResponse(BaseResponse):
try:
self.kms_backend.put_key_policy(key_id, policy)
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
@ -203,9 +208,8 @@ class KmsResponse(BaseResponse):
try:
return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)})
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
def list_key_policies(self):
key_id = self.parameters.get('KeyId')
@ -213,9 +217,8 @@ class KmsResponse(BaseResponse):
try:
self.kms_backend.describe_key(key_id)
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'Truncated': False, 'PolicyNames': ['default']})
@ -227,11 +230,17 @@ class KmsResponse(BaseResponse):
value = self.parameters.get("Plaintext")
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8")})
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'})
def decrypt(self):
# TODO refuse decode if EncryptionContext is not the same as when it was encrypted / generated
value = self.parameters.get("CiphertextBlob")
try:
return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")})
except UnicodeDecodeError:
# Generate data key will produce random bytes which when decrypted is still returned as base64
return json.dumps({"Plaintext": value})
def disable_key(self):
key_id = self.parameters.get('KeyId')
@ -239,9 +248,8 @@ class KmsResponse(BaseResponse):
try:
self.kms_backend.disable_key(key_id)
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def enable_key(self):
@ -250,9 +258,8 @@ class KmsResponse(BaseResponse):
try:
self.kms_backend.enable_key(key_id)
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def cancel_key_deletion(self):
@ -261,9 +268,8 @@ class KmsResponse(BaseResponse):
try:
self.kms_backend.cancel_key_deletion(key_id)
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'KeyId': key_id})
def schedule_key_deletion(self):
@ -279,19 +285,62 @@ class KmsResponse(BaseResponse):
'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days)
})
except KeyError:
raise JSONResponseError(404, 'Not Found', body={
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
'__type': 'NotFoundException'})
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
def generate_data_key(self):
key_id = self.parameters.get('KeyId')
encryption_context = self.parameters.get('EncryptionContext')
number_of_bytes = self.parameters.get('NumberOfBytes')
key_spec = self.parameters.get('KeySpec')
grant_tokens = self.parameters.get('GrantTokens')
# Param validation
if key_id.startswith('alias'):
if self.kms_backend.get_key_id_from_alias(key_id) is None:
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(
region=self.region, alias_name=key_id))
else:
if self.kms_backend.get_key_id(key_id) not in self.kms_backend.keys:
raise NotFoundException('Invalid keyId')
if number_of_bytes and (number_of_bytes > 1024 or number_of_bytes < 0):
raise ValidationException("1 validation error detected: Value '2048' at 'numberOfBytes' failed "
"to satisfy constraint: Member must have value less than or "
"equal to 1024")
if key_spec and key_spec not in ('AES_256', 'AES_128'):
raise ValidationException("1 validation error detected: Value 'AES_257' at 'keySpec' failed "
"to satisfy constraint: Member must satisfy enum value set: "
"[AES_256, AES_128]")
if not key_spec and not number_of_bytes:
raise ValidationException("Please specify either number of bytes or key spec.")
if key_spec and number_of_bytes:
raise ValidationException("Please specify either number of bytes or key spec.")
plaintext, key_arn = self.kms_backend.generate_data_key(key_id, encryption_context,
number_of_bytes, key_spec, grant_tokens)
plaintext = base64.b64encode(plaintext).decode()
return json.dumps({
'CiphertextBlob': plaintext,
'Plaintext': plaintext,
'KeyId': key_arn # not alias
})
def generate_data_key_without_plaintext(self):
result = json.loads(self.generate_data_key())
del result['Plaintext']
return json.dumps(result)
def _assert_valid_key_id(key_id):
if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE):
raise JSONResponseError(404, 'Not Found', body={
'message': ' Invalid keyId', '__type': 'NotFoundException'})
raise NotFoundException('Invalid keyId')
def _assert_default_policy(policy_name):
if policy_name != 'default':
raise JSONResponseError(404, 'Not Found', body={
'message': "No such policy exists",
'__type': 'NotFoundException'})
raise NotFoundException("No such policy exists")

View File

@ -137,6 +137,7 @@ class LogGroup:
self.creationTime = unix_time_millis()
self.tags = tags
self.streams = dict() # {name: LogStream}
self.retentionInDays = None # AWS defaults to Never Expire for log group retention
def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams:
@ -201,14 +202,20 @@ class LogGroup:
return events_page, next_token, searched_streams
def to_describe_dict(self):
return {
log_group = {
"arn": self.arn,
"creationTime": self.creationTime,
"logGroupName": self.name,
"metricFilterCount": 0,
"retentionInDays": 30,
"storedBytes": sum(s.storedBytes for s in self.streams.values()),
}
# AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire)
if self.retentionInDays:
log_group["retentionInDays"] = self.retentionInDays
return log_group
def set_retention_policy(self, retention_in_days):
self.retentionInDays = retention_in_days
class LogsBackend(BaseBackend):
@ -242,7 +249,8 @@ class LogsBackend(BaseBackend):
if next_token is None:
next_token = 0
groups = sorted(group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix))
groups = [group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix)]
groups = sorted(groups, key=lambda x: x['creationTime'], reverse=True)
groups_page = groups[next_token:next_token + limit]
next_token += limit
@ -288,5 +296,17 @@ class LogsBackend(BaseBackend):
log_group = self.groups[log_group_name]
return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
def put_retention_policy(self, log_group_name, retention_in_days):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.set_retention_policy(retention_in_days)
def delete_retention_policy(self, log_group_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.set_retention_policy(None)
logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()}

View File

@ -123,3 +123,14 @@ class LogsResponse(BaseResponse):
"nextToken": next_token,
"searchedLogStreams": searched_streams
})
def put_retention_policy(self):
log_group_name = self._get_param('logGroupName')
retention_in_days = self._get_param('retentionInDays')
self.logs_backend.put_retention_policy(log_group_name, retention_in_days)
return ''
def delete_retention_policy(self):
log_group_name = self._get_param('logGroupName')
self.logs_backend.delete_retention_policy(log_group_name)
return ''

View File

@ -47,6 +47,7 @@ class FakeOrganization(BaseModel):
class FakeAccount(BaseModel):
def __init__(self, organization, **kwargs):
self.type = 'ACCOUNT'
self.organization_id = organization.id
self.master_account_id = organization.master_account_id
self.create_account_status_id = utils.make_random_create_account_status_id()
@ -57,6 +58,7 @@ class FakeAccount(BaseModel):
self.status = 'ACTIVE'
self.joined_method = 'CREATED'
self.parent_id = organization.root_id
self.attached_policies = []
@property
def arn(self):
@ -103,6 +105,7 @@ class FakeOrganizationalUnit(BaseModel):
self.name = kwargs.get('Name')
self.parent_id = kwargs.get('ParentId')
self._arn_format = utils.OU_ARN_FORMAT
self.attached_policies = []
@property
def arn(self):
@ -134,6 +137,7 @@ class FakeRoot(FakeOrganizationalUnit):
'Status': 'ENABLED'
}]
self._arn_format = utils.ROOT_ARN_FORMAT
self.attached_policies = []
def describe(self):
return {
@ -144,12 +148,52 @@ class FakeRoot(FakeOrganizationalUnit):
}
class FakeServiceControlPolicy(BaseModel):
def __init__(self, organization, **kwargs):
self.type = 'POLICY'
self.content = kwargs.get('Content')
self.description = kwargs.get('Description')
self.name = kwargs.get('Name')
self.type = kwargs.get('Type')
self.id = utils.make_random_service_control_policy_id()
self.aws_managed = False
self.organization_id = organization.id
self.master_account_id = organization.master_account_id
self._arn_format = utils.SCP_ARN_FORMAT
self.attachments = []
@property
def arn(self):
return self._arn_format.format(
self.master_account_id,
self.organization_id,
self.id
)
def describe(self):
return {
'Policy': {
'PolicySummary': {
'Id': self.id,
'Arn': self.arn,
'Name': self.name,
'Description': self.description,
'Type': self.type,
'AwsManaged': self.aws_managed,
},
'Content': self.content
}
}
class OrganizationsBackend(BaseBackend):
def __init__(self):
self.org = None
self.accounts = []
self.ou = []
self.policies = []
def create_organization(self, **kwargs):
self.org = FakeOrganization(kwargs['FeatureSet'])
@ -292,5 +336,108 @@ class OrganizationsBackend(BaseBackend):
]
)
def create_policy(self, **kwargs):
new_policy = FakeServiceControlPolicy(self.org, **kwargs)
self.policies.append(new_policy)
return new_policy.describe()
def describe_policy(self, **kwargs):
if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if policy is None:
raise RESTError(
'PolicyNotFoundException',
"You specified a policy that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
return policy.describe()
def attach_policy(self, **kwargs):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if (re.compile(utils.ROOT_ID_REGEX).match(kwargs['TargetId']) or
re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId'])):
ou = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None)
if ou is not None:
if ou not in ou.attached_policies:
ou.attached_policies.append(policy)
policy.attachments.append(ou)
else:
raise RESTError(
'OrganizationalUnitNotFoundException',
"You specified an organizational unit that doesn't exist."
)
elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']):
account = next((a for a in self.accounts if a.id == kwargs['TargetId']), None)
if account is not None:
if account not in account.attached_policies:
account.attached_policies.append(policy)
policy.attachments.append(account)
else:
raise RESTError(
'AccountNotFoundException',
"You specified an account that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
def list_policies(self, **kwargs):
return dict(Policies=[
p.describe()['Policy']['PolicySummary'] for p in self.policies
])
def list_policies_for_target(self, **kwargs):
if re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId']):
obj = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None)
if obj is None:
raise RESTError(
'OrganizationalUnitNotFoundException',
"You specified an organizational unit that doesn't exist."
)
elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']):
obj = next((a for a in self.accounts if a.id == kwargs['TargetId']), None)
if obj is None:
raise RESTError(
'AccountNotFoundException',
"You specified an account that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
return dict(Policies=[
p.describe()['Policy']['PolicySummary'] for p in obj.attached_policies
])
def list_targets_for_policy(self, **kwargs):
if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']):
policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None)
if policy is None:
raise RESTError(
'PolicyNotFoundException',
"You specified a policy that doesn't exist."
)
else:
raise RESTError(
'InvalidInputException',
'You specified an invalid value.'
)
objects = [
{
'TargetId': obj.id,
'Arn': obj.arn,
'Name': obj.name,
'Type': obj.type,
} for obj in policy.attachments
]
return dict(Targets=objects)
organizations_backend = OrganizationsBackend()

View File

@ -85,3 +85,33 @@ class OrganizationsResponse(BaseResponse):
return json.dumps(
self.organizations_backend.list_children(**self.request_params)
)
def create_policy(self):
return json.dumps(
self.organizations_backend.create_policy(**self.request_params)
)
def describe_policy(self):
return json.dumps(
self.organizations_backend.describe_policy(**self.request_params)
)
def attach_policy(self):
return json.dumps(
self.organizations_backend.attach_policy(**self.request_params)
)
def list_policies(self):
return json.dumps(
self.organizations_backend.list_policies(**self.request_params)
)
def list_policies_for_target(self):
return json.dumps(
self.organizations_backend.list_policies_for_target(**self.request_params)
)
def list_targets_for_policy(self):
return json.dumps(
self.organizations_backend.list_targets_for_policy(**self.request_params)
)

View File

@ -10,6 +10,7 @@ MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}'
ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}'
ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}'
OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}'
SCP_ARN_FORMAT = 'arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}'
CHARSET = string.ascii_lowercase + string.digits
ORG_ID_SIZE = 10
@ -17,6 +18,15 @@ ROOT_ID_SIZE = 4
ACCOUNT_ID_SIZE = 12
OU_ID_SUFFIX_SIZE = 8
CREATE_ACCOUNT_STATUS_ID_SIZE = 8
SCP_ID_SIZE = 8
EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$"
ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % ORG_ID_SIZE
ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % ROOT_ID_SIZE
OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE)
ACCOUNT_ID_REGEX = r'[0-9]{%s}' % ACCOUNT_ID_SIZE
CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % CREATE_ACCOUNT_STATUS_ID_SIZE
SCP_ID_REGEX = r'p-[a-z0-9]{%s}' % SCP_ID_SIZE
def make_random_org_id():
@ -57,3 +67,10 @@ def make_random_create_account_status_id():
# "car-" followed by from 8 to 32 lower-case letters or digits.
# e.g. 'car-35gxzwrp'
return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE))
def make_random_service_control_policy_id():
# The regex pattern for a policy ID string requires "p-" followed by
# from 8 to 128 lower-case letters or digits.
# e.g. 'p-k2av4a8a'
return 'p-' + ''.join(random.choice(CHARSET) for x in range(SCP_ID_SIZE))

View File

@ -268,10 +268,26 @@ class fakesock(object):
_sent_data = []
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
protocol=0):
self.truesock = (old_socket(family, type, protocol)
if httpretty.allow_net_connect
else None)
proto=0, fileno=None, _sock=None):
"""
Matches both the Python 2 API:
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
https://github.com/python/cpython/blob/2.7/Lib/socket.py
and the Python 3 API:
def __init__(self, family=-1, type=-1, proto=-1, fileno=None):
https://github.com/python/cpython/blob/3.5/Lib/socket.py
"""
if httpretty.allow_net_connect:
if PY3:
self.truesock = old_socket(family, type, proto, fileno)
else:
# If Python 2, if parameters are passed as arguments, instead of kwargs,
# the 4th argument `_sock` will be interpreted as the `fileno`.
# Check if _sock is none, and if so, pass fileno.
self.truesock = old_socket(family, type, proto, fileno or _sock)
else:
self.truesock = None
self._closed = True
self.fd = FakeSockFile()
self.fd.socket = self

View File

@ -29,7 +29,6 @@ import re
from .compat import BaseClass
from .utils import decode_utf8
STATUSES = {
100: "Continue",
101: "Switching Protocols",

View File

@ -531,14 +531,37 @@ class RedshiftBackend(BaseBackend):
setattr(cluster, key, value)
if new_cluster_identifier:
self.delete_cluster(cluster_identifier)
dic = {
"cluster_identifier": cluster_identifier,
"skip_final_snapshot": True,
"final_cluster_snapshot_identifier": None
}
self.delete_cluster(**dic)
cluster.cluster_identifier = new_cluster_identifier
self.clusters[new_cluster_identifier] = cluster
return cluster
def delete_cluster(self, cluster_identifier):
def delete_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs.pop("cluster_identifier")
cluster_skip_final_snapshot = cluster_kwargs.pop("skip_final_snapshot")
cluster_snapshot_identifer = cluster_kwargs.pop("final_cluster_snapshot_identifier")
if cluster_identifier in self.clusters:
if cluster_skip_final_snapshot is False and cluster_snapshot_identifer is None:
raise ClientError(
"InvalidParameterValue",
'FinalSnapshotIdentifier is required for Snapshot copy '
'when SkipFinalSnapshot is False'
)
elif cluster_skip_final_snapshot is False and cluster_snapshot_identifer is not None: # create snapshot
cluster = self.describe_clusters(cluster_identifier)[0]
self.create_cluster_snapshot(
cluster_identifier,
cluster_snapshot_identifer,
cluster.region,
cluster.tags)
return self.clusters.pop(cluster_identifier)
raise ClusterNotFoundError(cluster_identifier)
@ -617,9 +640,12 @@ class RedshiftBackend(BaseBackend):
def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None):
if cluster_identifier:
cluster_snapshots = []
for snapshot in self.snapshots.values():
if snapshot.cluster.cluster_identifier == cluster_identifier:
return [snapshot]
cluster_snapshots.append(snapshot)
if cluster_snapshots:
return cluster_snapshots
raise ClusterNotFoundError(cluster_identifier)
if snapshot_identifier:

View File

@ -240,8 +240,13 @@ class RedshiftResponse(BaseResponse):
})
def delete_cluster(self):
cluster_identifier = self._get_param("ClusterIdentifier")
cluster = self.redshift_backend.delete_cluster(cluster_identifier)
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"final_cluster_snapshot_identifier": self._get_param("FinalClusterSnapshotIdentifier"),
"skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot")
}
cluster = self.redshift_backend.delete_cluster(**request_kwargs)
return self.get_response({
"DeleteClusterResponse": {

View File

@ -0,0 +1,6 @@
from __future__ import unicode_literals
from .models import resourcegroups_backends
from ..core.models import base_decorator
resourcegroups_backend = resourcegroups_backends['us-east-1']
mock_resourcegroups = base_decorator(resourcegroups_backends)

View File

@ -0,0 +1,13 @@
from __future__ import unicode_literals
import json
from werkzeug.exceptions import HTTPException
class BadRequestException(HTTPException):
code = 400
def __init__(self, message, **kwargs):
super(BadRequestException, self).__init__(
description=json.dumps({"Message": message, "Code": "BadRequestException"}), **kwargs
)

View File

@ -0,0 +1,338 @@
from __future__ import unicode_literals
from builtins import str
import boto3
import json
import re
from moto.core import BaseBackend, BaseModel
from .exceptions import BadRequestException
class FakeResourceGroup(BaseModel):
def __init__(self, name, resource_query, description=None, tags=None):
self.errors = []
description = description or ""
tags = tags or {}
if self._validate_description(value=description):
self._description = description
if self._validate_name(value=name):
self._name = name
if self._validate_resource_query(value=resource_query):
self._resource_query = resource_query
if self._validate_tags(value=tags):
self._tags = tags
self._raise_errors()
self.arn = "arn:aws:resource-groups:us-west-1:123456789012:{name}".format(name=name)
@staticmethod
def _format_error(key, value, constraint):
return "Value '{value}' at '{key}' failed to satisfy constraint: {constraint}".format(
constraint=constraint,
key=key,
value=value,
)
def _raise_errors(self):
if self.errors:
errors_len = len(self.errors)
plural = "s" if len(self.errors) > 1 else ""
errors = "; ".join(self.errors)
raise BadRequestException("{errors_len} validation error{plural} detected: {errors}".format(
errors_len=errors_len, plural=plural, errors=errors,
))
def _validate_description(self, value):
errors = []
if len(value) > 511:
errors.append(self._format_error(
key="description",
value=value,
constraint="Member must have length less than or equal to 512",
))
if not re.match(r"^[\sa-zA-Z0-9_.-]*$", value):
errors.append(self._format_error(
key="name",
value=value,
constraint=r"Member must satisfy regular expression pattern: [\sa-zA-Z0-9_\.-]*",
))
if errors:
self.errors += errors
return False
return True
def _validate_name(self, value):
errors = []
if len(value) > 128:
errors.append(self._format_error(
key="name",
value=value,
constraint="Member must have length less than or equal to 128",
))
# Note \ is a character to match not an escape.
if not re.match(r"^[a-zA-Z0-9_\\.-]+$", value):
errors.append(self._format_error(
key="name",
value=value,
constraint=r"Member must satisfy regular expression pattern: [a-zA-Z0-9_\.-]+",
))
if errors:
self.errors += errors
return False
return True
def _validate_resource_query(self, value):
errors = []
if value["Type"] not in {"CLOUDFORMATION_STACK_1_0", "TAG_FILTERS_1_0"}:
errors.append(self._format_error(
key="resourceQuery.type",
value=value,
constraint="Member must satisfy enum value set: [CLOUDFORMATION_STACK_1_0, TAG_FILTERS_1_0]",
))
if len(value["Query"]) > 2048:
errors.append(self._format_error(
key="resourceQuery.query",
value=value,
constraint="Member must have length less than or equal to 2048",
))
if errors:
self.errors += errors
return False
return True
def _validate_tags(self, value):
errors = []
# AWS only outputs one error for all keys and one for all values.
error_keys = None
error_values = None
regex = re.compile(r"^([\\p{L}\\p{Z}\\p{N}_.:/=+\-@]*)$")
for tag_key, tag_value in value.items():
# Validation for len(tag_key) >= 1 is done by botocore.
if len(tag_key) > 128 or re.match(regex, tag_key):
error_keys = self._format_error(
key="tags",
value=value,
constraint=(
"Map value must satisfy constraint: ["
"Member must have length less than or equal to 128, "
"Member must have length greater than or equal to 1, "
r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$"
"]"
),
)
# Validation for len(tag_value) >= 0 is nonsensical.
if len(tag_value) > 256 or re.match(regex, tag_key):
error_values = self._format_error(
key="tags",
value=value,
constraint=(
"Map value must satisfy constraint: ["
"Member must have length less than or equal to 256, "
"Member must have length greater than or equal to 0, "
r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$"
"]"
),
)
if error_keys:
errors.append(error_keys)
if error_values:
errors.append(error_values)
if errors:
self.errors += errors
return False
return True
@property
def description(self):
return self._description
@description.setter
def description(self, value):
if not self._validate_description(value=value):
self._raise_errors()
self._description = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not self._validate_name(value=value):
self._raise_errors()
self._name = value
@property
def resource_query(self):
return self._resource_query
@resource_query.setter
def resource_query(self, value):
if not self._validate_resource_query(value=value):
self._raise_errors()
self._resource_query = value
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
if not self._validate_tags(value=value):
self._raise_errors()
self._tags = value
class ResourceGroups():
def __init__(self):
self.by_name = {}
self.by_arn = {}
def __contains__(self, item):
return item in self.by_name
def append(self, resource_group):
self.by_name[resource_group.name] = resource_group
self.by_arn[resource_group.arn] = resource_group
def delete(self, name):
group = self.by_name[name]
del self.by_name[name]
del self.by_arn[group.arn]
return group
class ResourceGroupsBackend(BaseBackend):
def __init__(self, region_name=None):
super(ResourceGroupsBackend, self).__init__()
self.region_name = region_name
self.groups = ResourceGroups()
@staticmethod
def _validate_resource_query(resource_query):
type = resource_query["Type"]
query = json.loads(resource_query["Query"])
query_keys = set(query.keys())
invalid_json_exception = BadRequestException("Invalid query: Invalid query format: check JSON syntax")
if not isinstance(query["ResourceTypeFilters"], list):
raise invalid_json_exception
if type == "CLOUDFORMATION_STACK_1_0":
if query_keys != {"ResourceTypeFilters", "StackIdentifier"}:
raise invalid_json_exception
stack_identifier = query["StackIdentifier"]
if not isinstance(stack_identifier, str):
raise invalid_json_exception
if not re.match(
r"^arn:aws:cloudformation:[a-z]{2}-[a-z]+-[0-9]+:[0-9]+:stack/[-0-9A-z]+/[-0-9a-f]+$",
stack_identifier,
):
raise BadRequestException(
"Invalid query: Verify that the specified ARN is formatted correctly."
)
# Once checking other resources is implemented.
# if stack_identifier not in self.cloudformation_backend.stacks:
# raise BadRequestException("Invalid query: The specified CloudFormation stack doesn't exist.")
if type == "TAG_FILTERS_1_0":
if query_keys != {"ResourceTypeFilters", "TagFilters"}:
raise invalid_json_exception
tag_filters = query["TagFilters"]
if not isinstance(tag_filters, list):
raise invalid_json_exception
if not tag_filters or len(tag_filters) > 50:
raise BadRequestException(
"Invalid query: The TagFilters list must contain between 1 and 50 elements"
)
for tag_filter in tag_filters:
if not isinstance(tag_filter, dict):
raise invalid_json_exception
if set(tag_filter.keys()) != {"Key", "Values"}:
raise invalid_json_exception
key = tag_filter["Key"]
if not isinstance(key, str):
raise invalid_json_exception
if not key:
raise BadRequestException(
"Invalid query: The TagFilter element cannot have empty or null Key field"
)
if len(key) > 128:
raise BadRequestException("Invalid query: The maximum length for a tag Key is 128")
values = tag_filter["Values"]
if not isinstance(values, list):
raise invalid_json_exception
if len(values) > 20:
raise BadRequestException(
"Invalid query: The TagFilter Values list must contain between 0 and 20 elements"
)
for value in values:
if not isinstance(value, str):
raise invalid_json_exception
if len(value) > 256:
raise BadRequestException(
"Invalid query: The maximum length for a tag Value is 256"
)
@staticmethod
def _validate_tags(tags):
for tag in tags:
if tag.lower().startswith('aws:'):
raise BadRequestException("Tag keys must not start with 'aws:'")
def create_group(self, name, resource_query, description=None, tags=None):
tags = tags or {}
group = FakeResourceGroup(
name=name,
resource_query=resource_query,
description=description,
tags=tags,
)
if name in self.groups:
raise BadRequestException("Cannot create group: group already exists")
if name.upper().startswith("AWS"):
raise BadRequestException("Group name must not start with 'AWS'")
self._validate_tags(tags)
self._validate_resource_query(resource_query)
self.groups.append(group)
return group
def delete_group(self, group_name):
return self.groups.delete(name=group_name)
def get_group(self, group_name):
return self.groups.by_name[group_name]
def get_tags(self, arn):
return self.groups.by_arn[arn].tags
# def list_group_resources(self):
# ...
def list_groups(self, filters=None, max_results=None, next_token=None):
return self.groups.by_name
# def search_resources(self):
# ...
def tag(self, arn, tags):
all_tags = self.groups.by_arn[arn].tags
all_tags.update(tags)
self._validate_tags(all_tags)
self.groups.by_arn[arn].tags = all_tags
def untag(self, arn, keys):
group = self.groups.by_arn[arn]
for key in keys:
del group.tags[key]
def update_group(self, group_name, description=None):
if description:
self.groups.by_name[group_name].description = description
return self.groups.by_name[group_name]
def update_group_query(self, group_name, resource_query):
self._validate_resource_query(resource_query)
self.groups.by_name[group_name].resource_query = resource_query
return self.groups.by_name[group_name]
available_regions = boto3.session.Session().get_available_regions("resource-groups")
resourcegroups_backends = {region: ResourceGroupsBackend(region_name=region) for region in available_regions}

View File

@ -0,0 +1,162 @@
from __future__ import unicode_literals
import json
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from moto.core.responses import BaseResponse
from .models import resourcegroups_backends
class ResourceGroupsResponse(BaseResponse):
SERVICE_NAME = 'resource-groups'
@property
def resourcegroups_backend(self):
return resourcegroups_backends[self.region]
def create_group(self):
name = self._get_param("Name")
description = self._get_param("Description")
resource_query = self._get_param("ResourceQuery")
tags = self._get_param("Tags")
group = self.resourcegroups_backend.create_group(
name=name,
description=description,
resource_query=resource_query,
tags=tags,
)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
"ResourceQuery": group.resource_query,
"Tags": group.tags
})
def delete_group(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.delete_group(group_name=group_name)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
})
def get_group(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.get_group(group_name=group_name)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description,
}
})
def get_group_query(self):
group_name = self._get_param("GroupName")
group = self.resourcegroups_backend.get_group(group_name=group_name)
return json.dumps({
"GroupQuery": {
"GroupName": group.name,
"ResourceQuery": group.resource_query,
}
})
def get_tags(self):
arn = unquote(self._get_param("Arn"))
return json.dumps({
"Arn": arn,
"Tags": self.resourcegroups_backend.get_tags(arn=arn)
})
def list_group_resources(self):
raise NotImplementedError('ResourceGroups.list_group_resources is not yet implemented')
def list_groups(self):
filters = self._get_param("Filters")
if filters:
raise NotImplementedError(
'ResourceGroups.list_groups with filter parameter is not yet implemented'
)
max_results = self._get_int_param("MaxResults", 50)
next_token = self._get_param("NextToken")
groups = self.resourcegroups_backend.list_groups(
filters=filters,
max_results=max_results,
next_token=next_token
)
return json.dumps({
"GroupIdentifiers": [{
"GroupName": group.name,
"GroupArn": group.arn,
} for group in groups.values()],
"Groups": [{
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description,
} for group in groups.values()],
"NextToken": next_token,
})
def search_resources(self):
raise NotImplementedError('ResourceGroups.search_resources is not yet implemented')
def tag(self):
arn = unquote(self._get_param("Arn"))
tags = self._get_param("Tags")
if arn not in self.resourcegroups_backend.groups.by_arn:
raise NotImplementedError(
'ResourceGroups.tag with non-resource-group Arn parameter is not yet implemented'
)
self.resourcegroups_backend.tag(arn=arn, tags=tags)
return json.dumps({
"Arn": arn,
"Tags": tags
})
def untag(self):
arn = unquote(self._get_param("Arn"))
keys = self._get_param("Keys")
if arn not in self.resourcegroups_backend.groups.by_arn:
raise NotImplementedError(
'ResourceGroups.untag with non-resource-group Arn parameter is not yet implemented'
)
self.resourcegroups_backend.untag(arn=arn, keys=keys)
return json.dumps({
"Arn": arn,
"Keys": keys
})
def update_group(self):
group_name = self._get_param("GroupName")
description = self._get_param("Description", "")
group = self.resourcegroups_backend.update_group(group_name=group_name, description=description)
return json.dumps({
"Group": {
"GroupArn": group.arn,
"Name": group.name,
"Description": group.description
},
})
def update_group_query(self):
group_name = self._get_param("GroupName")
resource_query = self._get_param("ResourceQuery")
group = self.resourcegroups_backend.update_group_query(
group_name=group_name,
resource_query=resource_query
)
return json.dumps({
"GroupQuery": {
"GroupName": group.name,
"ResourceQuery": resource_query
}
})

View File

@ -0,0 +1,14 @@
from __future__ import unicode_literals
from .responses import ResourceGroupsResponse
url_bases = [
"https?://resource-groups(-fips)?.(.+).amazonaws.com",
]
url_paths = {
'{0}/groups$': ResourceGroupsResponse.dispatch,
'{0}/groups/(?P<resource_group_name>[^/]+)$': ResourceGroupsResponse.dispatch,
'{0}/groups/(?P<resource_group_name>[^/]+)/query$': ResourceGroupsResponse.dispatch,
'{0}/groups-list$': ResourceGroupsResponse.dispatch,
'{0}/resources/(?P<resource_arn>[^/]+)/tags$': ResourceGroupsResponse.dispatch,
}

View File

@ -24,7 +24,7 @@ class HealthCheck(BaseModel):
self.id = health_check_id
self.ip_address = health_check_args.get("ip_address")
self.port = health_check_args.get("port", 80)
self._type = health_check_args.get("type")
self.type_ = health_check_args.get("type")
self.resource_path = health_check_args.get("resource_path")
self.fqdn = health_check_args.get("fqdn")
self.search_string = health_check_args.get("search_string")
@ -58,7 +58,7 @@ class HealthCheck(BaseModel):
<HealthCheckConfig>
<IPAddress>{{ health_check.ip_address }}</IPAddress>
<Port>{{ health_check.port }}</Port>
<Type>{{ health_check._type }}</Type>
<Type>{{ health_check.type_ }}</Type>
<ResourcePath>{{ health_check.resource_path }}</ResourcePath>
<FullyQualifiedDomainName>{{ health_check.fqdn }}</FullyQualifiedDomainName>
<RequestInterval>{{ health_check.request_interval }}</RequestInterval>
@ -76,7 +76,7 @@ class RecordSet(BaseModel):
def __init__(self, kwargs):
self.name = kwargs.get('Name')
self._type = kwargs.get('Type')
self.type_ = kwargs.get('Type')
self.ttl = kwargs.get('TTL')
self.records = kwargs.get('ResourceRecords', [])
self.set_identifier = kwargs.get('SetIdentifier')
@ -119,7 +119,7 @@ class RecordSet(BaseModel):
properties["HostedZoneId"])
try:
hosted_zone.delete_rrset_by_name(resource_name)
hosted_zone.delete_rrset({'Name': resource_name})
except KeyError:
pass
@ -130,7 +130,7 @@ class RecordSet(BaseModel):
def to_xml(self):
template = Template("""<ResourceRecordSet>
<Name>{{ record_set.name }}</Name>
<Type>{{ record_set._type }}</Type>
<Type>{{ record_set.type_ }}</Type>
{% if record_set.set_identifier %}
<SetIdentifier>{{ record_set.set_identifier }}</SetIdentifier>
{% endif %}
@ -162,7 +162,13 @@ class RecordSet(BaseModel):
self.hosted_zone_name)
if not hosted_zone:
hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id)
hosted_zone.delete_rrset_by_name(self.name)
hosted_zone.delete_rrset({'Name': self.name, 'Type': self.type_})
def reverse_domain_name(domain_name):
if domain_name.endswith('.'): # normalize without trailing dot
domain_name = domain_name[:-1]
return '.'.join(reversed(domain_name.split('.')))
class FakeZone(BaseModel):
@ -183,16 +189,20 @@ class FakeZone(BaseModel):
def upsert_rrset(self, record_set):
new_rrset = RecordSet(record_set)
for i, rrset in enumerate(self.rrsets):
if rrset.name == new_rrset.name:
if rrset.name == new_rrset.name and rrset.type_ == new_rrset.type_:
self.rrsets[i] = new_rrset
break
else:
self.rrsets.append(new_rrset)
return new_rrset
def delete_rrset_by_name(self, name):
def delete_rrset(self, rrset):
self.rrsets = [
record_set for record_set in self.rrsets if record_set.name != name]
record_set
for record_set in self.rrsets
if record_set.name != rrset['Name'] or
(rrset.get('Type') is not None and record_set.type_ != rrset['Type'])
]
def delete_rrset_by_id(self, set_identifier):
self.rrsets = [
@ -200,12 +210,15 @@ class FakeZone(BaseModel):
def get_record_sets(self, start_type, start_name):
record_sets = list(self.rrsets) # Copy the list
if start_type:
record_sets = [
record_set for record_set in record_sets if record_set._type >= start_type]
if start_name:
record_sets = [
record_set for record_set in record_sets if record_set.name >= start_name]
record_set
for record_set in record_sets
if reverse_domain_name(record_set.name) >= reverse_domain_name(start_name)
]
if start_type:
record_sets = [
record_set for record_set in record_sets if record_set.type_ >= start_type]
return record_sets

View File

@ -147,7 +147,7 @@ class Route53(BaseResponse):
the_zone.delete_rrset_by_id(
record_set["SetIdentifier"])
else:
the_zone.delete_rrset_by_name(record_set["Name"])
the_zone.delete_rrset(record_set)
return 200, headers, CHANGE_RRSET_RESPONSE

View File

@ -10,14 +10,18 @@ import random
import string
import tempfile
import sys
import uuid
import six
from bisect import insort
from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from .exceptions import BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, \
EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, DuplicateTagKeys
from .exceptions import (
BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, InvalidRequest,
EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass,
InvalidTargetBucketForLogging, DuplicateTagKeys, CrossLocationLoggingProhibitted
)
from .utils import clean_key_name, _VersionedKeyStore
MAX_BUCKET_NAME_LENGTH = 63
@ -35,7 +39,7 @@ class FakeDeleteMarker(BaseModel):
self.key = key
self.name = key.name
self.last_modified = datetime.datetime.utcnow()
self._version_id = key.version_id + 1
self._version_id = str(uuid.uuid4())
@property
def last_modified_ISO8601(self):
@ -86,10 +90,13 @@ class FakeKey(BaseModel):
new_value = new_value.encode(DEFAULT_TEXT_ENCODING)
self._value_buffer.write(new_value)
def copy(self, new_name=None):
def copy(self, new_name=None, new_is_versioned=None):
r = copy.deepcopy(self)
if new_name is not None:
r.name = new_name
if new_is_versioned is not None:
r._is_versioned = new_is_versioned
r.refresh_version()
return r
def set_metadata(self, metadata, replace=False):
@ -115,15 +122,16 @@ class FakeKey(BaseModel):
self.last_modified = datetime.datetime.utcnow()
self._etag = None # must recalculate etag
if self._is_versioned:
self._version_id += 1
self._version_id = str(uuid.uuid4())
else:
self._is_versioned = 0
self._version_id = None
def restore(self, days):
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
def increment_version(self):
self._version_id += 1
def refresh_version(self):
self._version_id = str(uuid.uuid4())
self.last_modified = datetime.datetime.utcnow()
@property
def etag(self):
@ -458,6 +466,7 @@ class FakeBucket(BaseModel):
self.cors = []
self.logging = {}
self.notification_configuration = None
self.accelerate_configuration = None
@property
def location(self):
@ -552,7 +561,6 @@ class FakeBucket(BaseModel):
self.rules = []
def set_cors(self, rules):
from moto.s3.exceptions import InvalidRequest, MalformedXML
self.cors = []
if len(rules) > 100:
@ -602,7 +610,6 @@ class FakeBucket(BaseModel):
self.logging = {}
return
from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted
# Target bucket must exist in the same account (assuming all moto buckets are in the same account):
if not bucket_backend.buckets.get(logging_config["TargetBucket"]):
raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.")
@ -650,6 +657,13 @@ class FakeBucket(BaseModel):
if region != self.region_name:
raise InvalidNotificationDestination()
def set_accelerate_configuration(self, accelerate_config):
if self.accelerate_configuration is None and accelerate_config == 'Suspended':
# Cannot "suspend" a not active acceleration. Leaves it undefined
return
self.accelerate_configuration = accelerate_config
def set_website_configuration(self, website_configuration):
self.website_configuration = website_configuration
@ -716,17 +730,18 @@ class S3Backend(BaseBackend):
def get_bucket_latest_versions(self, bucket_name):
versions = self.get_bucket_versions(bucket_name)
maximum_version_per_key = {}
latest_modified_per_key = {}
latest_versions = {}
for version in versions:
name = version.name
last_modified = version.last_modified
version_id = version.version_id
maximum_version_per_key[name] = max(
version_id,
maximum_version_per_key.get(name, -1)
latest_modified_per_key[name] = max(
last_modified,
latest_modified_per_key.get(name, datetime.datetime.min)
)
if version_id == maximum_version_per_key[name]:
if last_modified == latest_modified_per_key[name]:
latest_versions[name] = version_id
return latest_versions
@ -774,20 +789,19 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name)
old_key = bucket.keys.get(key_name, None)
if old_key is not None and bucket.is_versioned:
new_version_id = old_key._version_id + 1
else:
new_version_id = 0
new_key = FakeKey(
name=key_name,
value=value,
storage=storage,
etag=etag,
is_versioned=bucket.is_versioned,
version_id=new_version_id)
bucket.keys[key_name] = new_key
version_id=str(uuid.uuid4()) if bucket.is_versioned else None)
keys = [
key for key in bucket.keys.getlist(key_name, [])
if key.version_id != new_key.version_id
] + [new_key]
bucket.keys.setlist(key_name, keys)
return new_key
@ -852,6 +866,15 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name)
bucket.set_notification_configuration(notification_config)
def put_bucket_accelerate_configuration(self, bucket_name, accelerate_configuration):
if accelerate_configuration not in ['Enabled', 'Suspended']:
raise MalformedXML()
bucket = self.get_bucket(bucket_name)
if bucket.name.find('.') != -1:
raise InvalidRequest('PutBucketAccelerateConfiguration')
bucket.set_accelerate_configuration(accelerate_configuration)
def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata)
@ -889,12 +912,11 @@ class S3Backend(BaseBackend):
return multipart.set_part(part_id, value)
def copy_part(self, dest_bucket_name, multipart_id, part_id,
src_bucket_name, src_key_name, start_byte, end_byte):
src_key_name = clean_key_name(src_key_name)
src_bucket = self.get_bucket(src_bucket_name)
src_bucket_name, src_key_name, src_version_id, start_byte, end_byte):
dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id]
src_value = src_bucket.keys[src_key_name].value
src_value = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id).value
if start_byte is not None:
src_value = src_value[start_byte:end_byte + 1]
return multipart.set_part(part_id, src_value)
@ -971,17 +993,15 @@ class S3Backend(BaseBackend):
dest_bucket = self.get_bucket(dest_bucket_name)
key = self.get_key(src_bucket_name, src_key_name,
version_id=src_version_id)
if dest_key_name != src_key_name:
key = key.copy(dest_key_name)
dest_bucket.keys[dest_key_name] = key
# By this point, the destination key must exist, or KeyError
if dest_bucket.is_versioned:
dest_bucket.keys[dest_key_name].increment_version()
new_key = key.copy(dest_key_name, dest_bucket.is_versioned)
if storage is not None:
key.set_storage_class(storage)
new_key.set_storage_class(storage)
if acl is not None:
key.set_acl(acl)
new_key.set_acl(acl)
dest_bucket.keys[dest_key_name] = new_key
def set_bucket_acl(self, bucket_name, acl):
bucket = self.get_bucket(bucket_name)

50
moto/s3/responses.py Executable file → Normal file
View File

@ -19,7 +19,7 @@ from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, Missi
MalformedACLError, InvalidNotificationARN, InvalidNotificationEvent
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \
FakeTag
from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url
from .utils import bucket_name_from_url, clean_key_name, metadata_from_headers, parse_region_from_url
from xml.dom import minidom
@ -257,6 +257,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(bucket=bucket)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif 'versions' in querystring:
delimiter = querystring.get('delimiter', [None])[0]
@ -442,6 +449,15 @@ class ResponseObject(_TemplateEnvironmentMixin):
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(bucket_name, accelerate_status)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
if body:
@ -691,6 +707,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
if 'x-amz-copy-source' in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = src_key.split("?versionId=") if "?versionId=" in src_key else (src_key, None)
src_range = request.headers.get(
'x-amz-copy-source-range', '').split("bytes=")[-1]
@ -700,9 +718,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
except ValueError:
start_byte, end_byte = None, None
if self.backend.get_key(src_bucket, src_key, version_id=src_version_id):
key = self.backend.copy_part(
bucket_name, upload_id, part_number, src_bucket,
src_key, start_byte, end_byte)
src_key, src_version_id, start_byte, end_byte)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
@ -733,7 +755,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key = request.headers.get("x-amz-copy-source")
src_key = clean_key_name(request.headers.get("x-amz-copy-source"))
if isinstance(src_key, six.binary_type):
src_key = src_key.decode('utf-8')
src_key_parsed = urlparse(src_key)
@ -741,8 +763,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
lstrip("/").split("/", 1)
src_version_id = parse_qs(src_key_parsed.query).get(
'versionId', [None])[0]
if self.backend.get_key(src_bucket, src_key, version_id=src_version_id):
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class, acl=acl, src_version_id=src_version_id)
else:
return 404, response_headers, ""
new_key = self.backend.get_key(bucket_name, key_name)
mdirective = request.headers.get('x-amz-metadata-directive')
if mdirective is not None and mdirective == 'REPLACE':
@ -1034,6 +1061,11 @@ class ResponseObject(_TemplateEnvironmentMixin):
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml['AccelerateConfiguration']
return config['Status']
def _key_response_delete(self, bucket_name, query, key_name, headers):
if query.get('uploadId'):
upload_id = query['uploadId'][0]
@ -1303,7 +1335,7 @@ S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{{ key.version_id }}</VersionId>
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
<IsLatest>{% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
@ -1686,3 +1718,13 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""

View File

@ -27,3 +27,18 @@ class InvalidParameterException(SecretsManagerClientError):
super(InvalidParameterException, self).__init__(
'InvalidParameterException',
message)
class ResourceExistsException(SecretsManagerClientError):
def __init__(self, message):
super(ResourceExistsException, self).__init__(
'ResourceExistsException',
message
)
class InvalidRequestException(SecretsManagerClientError):
def __init__(self, message):
super(InvalidRequestException, self).__init__(
'InvalidRequestException',
message)

View File

@ -3,6 +3,7 @@ from __future__ import unicode_literals
import time
import json
import uuid
import datetime
import boto3
@ -10,6 +11,8 @@ from moto.core import BaseBackend, BaseModel
from .exceptions import (
ResourceNotFoundException,
InvalidParameterException,
ResourceExistsException,
InvalidRequestException,
ClientError
)
from .utils import random_password, secret_arn
@ -36,48 +39,130 @@ class SecretsManagerBackend(BaseBackend):
def _is_valid_identifier(self, identifier):
return identifier in self.secrets
def _unix_time_secs(self, dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds()
def get_secret_value(self, secret_id, version_id, version_stage):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException()
secret = self.secrets[secret_id]
if not version_id and version_stage:
# set version_id to match version_stage
versions_dict = self.secrets[secret_id]['versions']
for ver_id, ver_val in versions_dict.items():
if version_stage in ver_val['version_stages']:
version_id = ver_id
break
if not version_id:
raise ResourceNotFoundException()
response = json.dumps({
# TODO check this part
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
secret = self.secrets[secret_id]
version_id = version_id or secret['default_version_id']
secret_version = secret['versions'][version_id]
response_data = {
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"VersionId": secret['version_id'],
"SecretString": secret['secret_string'],
"VersionStages": [
"AWSCURRENT",
],
"CreatedDate": secret['createdate']
})
"VersionId": secret_version['version_id'],
"VersionStages": secret_version['version_stages'],
"CreatedDate": secret_version['createdate'],
}
if 'secret_string' in secret_version:
response_data["SecretString"] = secret_version['secret_string']
if 'secret_binary' in secret_version:
response_data["SecretBinary"] = secret_version['secret_binary']
response = json.dumps(response_data)
return response
def create_secret(self, name, secret_string, tags, **kwargs):
def create_secret(self, name, secret_string=None, secret_binary=None, tags=[], **kwargs):
generated_version_id = str(uuid.uuid4())
# error if secret exists
if name in self.secrets.keys():
raise ResourceExistsException('A resource with the ID you requested already exists.')
secret = {
'secret_string': secret_string,
'secret_id': name,
'name': name,
'createdate': int(time.time()),
'rotation_enabled': False,
'rotation_lambda_arn': '',
'auto_rotate_after_days': 0,
'version_id': generated_version_id,
'tags': tags
}
self.secrets[name] = secret
version_id = self._add_secret(name, secret_string=secret_string, secret_binary=secret_binary, tags=tags)
response = json.dumps({
"ARN": secret_arn(self.region, name),
"Name": name,
"VersionId": generated_version_id,
"VersionId": version_id,
})
return response
def _add_secret(self, secret_id, secret_string=None, secret_binary=None, tags=[], version_id=None, version_stages=None):
if version_stages is None:
version_stages = ['AWSCURRENT']
if not version_id:
version_id = str(uuid.uuid4())
secret_version = {
'createdate': int(time.time()),
'version_id': version_id,
'version_stages': version_stages,
}
if secret_string is not None:
secret_version['secret_string'] = secret_string
if secret_binary is not None:
secret_version['secret_binary'] = secret_binary
if secret_id in self.secrets:
# remove all old AWSPREVIOUS stages
for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values():
if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']:
secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS')
# set old AWSCURRENT secret to AWSPREVIOUS
previous_current_version_id = self.secrets[secret_id]['default_version_id']
self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS']
self.secrets[secret_id]['versions'][version_id] = secret_version
self.secrets[secret_id]['default_version_id'] = version_id
else:
self.secrets[secret_id] = {
'versions': {
version_id: secret_version
},
'default_version_id': version_id,
}
secret = self.secrets[secret_id]
secret['secret_id'] = secret_id
secret['name'] = secret_id
secret['rotation_enabled'] = False
secret['rotation_lambda_arn'] = ''
secret['auto_rotate_after_days'] = 0
secret['tags'] = tags
return version_id
def put_secret_value(self, secret_id, secret_string, version_stages):
version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages)
response = json.dumps({
'ARN': secret_arn(self.region, secret_id),
'Name': secret_id,
'VersionId': version_id,
'VersionStages': version_stages
})
return response
@ -101,7 +186,7 @@ class SecretsManagerBackend(BaseBackend):
"LastRotatedDate": None,
"LastChangedDate": None,
"LastAccessedDate": None,
"DeletedDate": None,
"DeletedDate": secret.get('deleted_date', None),
"Tags": secret['tags']
})
@ -115,6 +200,12 @@ class SecretsManagerBackend(BaseBackend):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
if client_request_token:
token_length = len(client_request_token)
if token_length < 32 or token_length > 64:
@ -144,17 +235,24 @@ class SecretsManagerBackend(BaseBackend):
secret = self.secrets[secret_id]
secret['version_id'] = client_request_token or ''
old_secret_version = secret['versions'][secret['default_version_id']]
new_version_id = client_request_token or str(uuid.uuid4())
self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT'])
secret['rotation_lambda_arn'] = rotation_lambda_arn or ''
if rotation_rules:
secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)
if secret['auto_rotate_after_days'] > 0:
secret['rotation_enabled'] = True
if 'AWSCURRENT' in old_secret_version['version_stages']:
old_secret_version['version_stages'].remove('AWSCURRENT')
response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"VersionId": secret['version_id']
"VersionId": new_version_id
})
return response
@ -188,6 +286,111 @@ class SecretsManagerBackend(BaseBackend):
return response
def list_secret_version_ids(self, secret_id):
secret = self.secrets[secret_id]
version_list = []
for version_id, version in secret['versions'].items():
version_list.append({
'CreatedDate': int(time.time()),
'LastAccessedDate': int(time.time()),
'VersionId': version_id,
'VersionStages': version['version_stages'],
})
response = json.dumps({
'ARN': secret['secret_id'],
'Name': secret['name'],
'NextToken': '',
'Versions': version_list,
})
return response
def list_secrets(self, max_results, next_token):
# TODO implement pagination and limits
secret_list = []
for secret in self.secrets.values():
versions_to_stages = {}
for version_id, version in secret['versions'].items():
versions_to_stages[version_id] = version['version_stages']
secret_list.append({
"ARN": secret_arn(self.region, secret['secret_id']),
"DeletedDate": secret.get('deleted_date', None),
"Description": "",
"KmsKeyId": "",
"LastAccessedDate": None,
"LastChangedDate": None,
"LastRotatedDate": None,
"Name": secret['name'],
"RotationEnabled": secret['rotation_enabled'],
"RotationLambdaARN": secret['rotation_lambda_arn'],
"RotationRules": {
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
},
"SecretVersionsToStages": versions_to_stages,
"Tags": secret['tags']
})
return secret_list, None
def delete_secret(self, secret_id, recovery_window_in_days, force_delete_without_recovery):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
if recovery_window_in_days and force_delete_without_recovery:
raise InvalidParameterException(
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \
use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays."
)
if recovery_window_in_days and (recovery_window_in_days < 7 or recovery_window_in_days > 30):
raise InvalidParameterException(
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \
RecoveryWindowInDays value must be between 7 and 30 days (inclusive)."
)
deletion_date = datetime.datetime.utcnow()
if force_delete_without_recovery:
secret = self.secrets.pop(secret_id, None)
else:
deletion_date += datetime.timedelta(days=recovery_window_in_days or 30)
self.secrets[secret_id]['deleted_date'] = self._unix_time_secs(deletion_date)
secret = self.secrets.get(secret_id, None)
if not secret:
raise ResourceNotFoundException
arn = secret_arn(self.region, secret['secret_id'])
name = secret['name']
return arn, name, self._unix_time_secs(deletion_date)
def restore_secret(self, secret_id):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
self.secrets[secret_id].pop('deleted_date', None)
secret = self.secrets[secret_id]
arn = secret_arn(self.region, secret['secret_id'])
name = secret['name']
return arn, name
available_regions = (
boto3.session.Session().get_available_regions("secretsmanager")

View File

@ -4,6 +4,8 @@ from moto.core.responses import BaseResponse
from .models import secretsmanager_backends
import json
class SecretsManagerResponse(BaseResponse):
@ -19,10 +21,12 @@ class SecretsManagerResponse(BaseResponse):
def create_secret(self):
name = self._get_param('Name')
secret_string = self._get_param('SecretString')
secret_binary = self._get_param('SecretBinary')
tags = self._get_param('Tags', if_none=[])
return secretsmanager_backends[self.region].create_secret(
name=name,
secret_string=secret_string,
secret_binary=secret_binary,
tags=tags
)
@ -64,3 +68,46 @@ class SecretsManagerResponse(BaseResponse):
rotation_lambda_arn=rotation_lambda_arn,
rotation_rules=rotation_rules
)
def put_secret_value(self):
secret_id = self._get_param('SecretId', if_none='')
secret_string = self._get_param('SecretString', if_none='')
version_stages = self._get_param('VersionStages', if_none=['AWSCURRENT'])
return secretsmanager_backends[self.region].put_secret_value(
secret_id=secret_id,
secret_string=secret_string,
version_stages=version_stages,
)
def list_secret_version_ids(self):
secret_id = self._get_param('SecretId', if_none='')
return secretsmanager_backends[self.region].list_secret_version_ids(
secret_id=secret_id
)
def list_secrets(self):
max_results = self._get_int_param("MaxResults")
next_token = self._get_param("NextToken")
secret_list, next_token = secretsmanager_backends[self.region].list_secrets(
max_results=max_results,
next_token=next_token,
)
return json.dumps(dict(SecretList=secret_list, NextToken=next_token))
def delete_secret(self):
secret_id = self._get_param("SecretId")
recovery_window_in_days = self._get_param("RecoveryWindowInDays")
force_delete_without_recovery = self._get_param("ForceDeleteWithoutRecovery")
arn, name, deletion_date = secretsmanager_backends[self.region].delete_secret(
secret_id=secret_id,
recovery_window_in_days=recovery_window_in_days,
force_delete_without_recovery=force_delete_without_recovery,
)
return json.dumps(dict(ARN=arn, Name=name, DeletionDate=deletion_date))
def restore_secret(self):
secret_id = self._get_param("SecretId")
arn, name = secretsmanager_backends[self.region].restore_secret(
secret_id=secret_id,
)
return json.dumps(dict(ARN=arn, Name=name))

View File

@ -255,7 +255,7 @@ class SNSBackend(BaseBackend):
return candidate_topic
def _get_values_nexttoken(self, values_map, next_token=None):
if next_token is None:
if next_token is None or not next_token:
next_token = 0
next_token = int(next_token)
values = list(values_map.values())[

View File

@ -420,7 +420,7 @@ CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
<VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout>
</CreateQueueResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</CreateQueueResponse>"""
@ -429,7 +429,7 @@ GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse>
<QueueUrl>{{ queue.url(request_url) }}</QueueUrl>
</GetQueueUrlResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</GetQueueUrlResponse>"""
@ -440,13 +440,13 @@ LIST_QUEUES_RESPONSE = """<ListQueuesResponse>
{% endfor %}
</ListQueuesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</ListQueuesResponse>"""
DELETE_QUEUE_RESPONSE = """<DeleteQueueResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DeleteQueueResponse>"""
@ -460,13 +460,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """<GetQueueAttributesResponse>
{% endfor %}
</GetQueueAttributesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</GetQueueAttributesResponse>"""
SET_QUEUE_ATTRIBUTE_RESPONSE = """<SetQueueAttributesResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SetQueueAttributesResponse>"""
@ -483,7 +483,7 @@ SEND_MESSAGE_RESPONSE = """<SendMessageResponse>
</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SendMessageResponse>"""
@ -543,7 +543,7 @@ RECEIVE_MESSAGE_RESPONSE = """<ReceiveMessageResponse>
{% endfor %}
</ReceiveMessageResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</ReceiveMessageResponse>"""
@ -561,13 +561,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """<SendMessageBatchResponse>
{% endfor %}
</SendMessageBatchResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</SendMessageBatchResponse>"""
DELETE_MESSAGE_RESPONSE = """<DeleteMessageResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DeleteMessageResponse>"""
@ -580,13 +580,13 @@ DELETE_MESSAGE_BATCH_RESPONSE = """<DeleteMessageBatchResponse>
{% endfor %}
</DeleteMessageBatchResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</DeleteMessageBatchResponse>"""
CHANGE_MESSAGE_VISIBILITY_RESPONSE = """<ChangeMessageVisibilityResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</ChangeMessageVisibilityResponse>"""
@ -613,7 +613,7 @@ CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """<ChangeMessageVisibilityBatchRespo
PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
<RequestId></RequestId>
</ResponseMetadata>
</PurgeQueueResponse>"""

View File

@ -0,0 +1,25 @@
package com.amazonaws.examples
import com.amazonaws.client.builder.AwsClientBuilder
import com.amazonaws.regions.{Region, Regions}
import com.amazonaws.services.sqs.AmazonSQSClientBuilder
import scala.jdk.CollectionConverters._
object QueueTest extends App {
val region = Region.getRegion(Regions.US_WEST_2).getName
val serviceEndpoint = "http://localhost:5000"
val amazonSqs = AmazonSQSClientBuilder.standard()
.withEndpointConfiguration(
new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region))
.build
val queueName = "my-first-queue"
amazonSqs.createQueue(queueName)
val urls = amazonSqs.listQueues().getQueueUrls.asScala
println("Listing queues")
println(urls.map(url => s" - $url").mkString(System.lineSeparator))
println()
}

View File

@ -48,6 +48,7 @@ for policy_name in policies:
PolicyArn=policies[policy_name]['Arn'],
VersionId=policies[policy_name]['DefaultVersionId'])
for key in response['PolicyVersion']:
if key != "CreateDate": # the policy's CreateDate should not be overwritten by its version's CreateDate
policies[policy_name][key] = response['PolicyVersion'][key]
with open(output_file, 'w') as f:

View File

@ -18,17 +18,27 @@ def read(*parts):
return fp.read()
def get_version():
version_file = read('moto', '__init__.py')
version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]',
version_file, re.MULTILINE)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
install_requires = [
"Jinja2>=2.7.3",
"Jinja2>=2.10.1",
"boto>=2.36.0",
"boto3>=1.6.16",
"botocore>=1.12.13",
"boto3>=1.9.86",
"botocore>=1.12.86",
"cryptography>=2.3.0",
"datetime",
"requests>=2.5",
"xmltodict",
"six>1.9",
"werkzeug",
"PyYAML",
"PyYAML>=5.1",
"pytz",
"python-dateutil<3.0.0,>=2.1",
"python-jose<4.0.0",
@ -37,8 +47,9 @@ install_requires = [
"jsondiff==1.1.2",
"aws-xray-sdk!=0.96,>=0.93",
"responses>=0.9.0",
"idna<2.8,>=2.5",
"idna<2.9,>=2.5",
"cfn-lint",
"sshpubkeys>=3.1.0,<4.0"
]
extras_require = {
@ -55,7 +66,7 @@ else:
setup(
name='moto',
version='1.3.7',
version=get_version(),
description='A library that allows your python tests to easily'
' mock out the boto library',
long_description=read('README.md'),
@ -78,10 +89,10 @@ setup(
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],

View File

@ -32,7 +32,7 @@ def test_create_autoscaling_group():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a', 'us-east-1b'],
default_cooldown=60,
desired_capacity=2,
health_check_period=100,
@ -42,7 +42,10 @@ def test_create_autoscaling_group():
launch_config=config,
load_balancers=["test_lb"],
placement_group="test_placement",
vpc_zone_identifier=mocked_networking['subnet1'],
vpc_zone_identifier="{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
),
termination_policies=["OldestInstance", "NewestInstance"],
tags=[Tag(
resource_id='tester_group',
@ -57,12 +60,15 @@ def test_create_autoscaling_group():
group = conn.get_all_groups()[0]
group.name.should.equal('tester_group')
set(group.availability_zones).should.equal(
set(['us-east-1c', 'us-east-1b']))
set(['us-east-1a', 'us-east-1b']))
group.desired_capacity.should.equal(2)
group.max_size.should.equal(2)
group.min_size.should.equal(2)
group.instances.should.have.length_of(2)
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1'])
group.vpc_zone_identifier.should.equal("{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
))
group.launch_config_name.should.equal('tester')
group.default_cooldown.should.equal(60)
group.health_check_period.should.equal(100)
@ -109,7 +115,7 @@ def test_create_autoscaling_groups_defaults():
group.launch_config_name.should.equal('tester')
# Defaults
list(group.availability_zones).should.equal([])
list(group.availability_zones).should.equal(['us-east-1a']) # subnet1
group.desired_capacity.should.equal(2)
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1'])
group.default_cooldown.should.equal(300)
@ -217,7 +223,6 @@ def test_autoscaling_update():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -227,13 +232,16 @@ def test_autoscaling_update():
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.availability_zones.should.equal(['us-east-1a'])
group.vpc_zone_identifier.should.equal(mocked_networking['subnet1'])
group.vpc_zone_identifier = 'subnet-5678efgh'
group.availability_zones = ['us-east-1b']
group.vpc_zone_identifier = mocked_networking['subnet2']
group.update()
group = conn.get_all_groups()[0]
group.vpc_zone_identifier.should.equal('subnet-5678efgh')
group.availability_zones.should.equal(['us-east-1b'])
group.vpc_zone_identifier.should.equal(mocked_networking['subnet2'])
@mock_autoscaling_deprecated
@ -249,7 +257,7 @@ def test_autoscaling_tags_update():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -309,7 +317,7 @@ def test_autoscaling_group_delete():
@mock_autoscaling_deprecated
def test_autoscaling_group_describe_instances():
mocked_networking = setup_networking_deprecated()
conn = boto.connect_autoscale()
conn = boto.ec2.autoscale.connect_to_region('us-east-1')
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
@ -332,7 +340,7 @@ def test_autoscaling_group_describe_instances():
instances[0].health_status.should.equal('Healthy')
autoscale_instance_ids = [instance.instance_id for instance in instances]
ec2_conn = boto.connect_ec2()
ec2_conn = boto.ec2.connect_to_region('us-east-1')
reservations = ec2_conn.get_all_instances()
instances = reservations[0].instances
instances.should.have.length_of(2)
@ -355,7 +363,7 @@ def test_set_desired_capacity_up():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -391,7 +399,7 @@ def test_set_desired_capacity_down():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -427,7 +435,7 @@ def test_set_desired_capacity_the_same():
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
availability_zones=['us-east-1a'],
desired_capacity=2,
max_size=2,
min_size=2,
@ -543,6 +551,7 @@ def test_describe_load_balancers():
)
response = client.describe_load_balancers(AutoScalingGroupName='test_asg')
assert response['ResponseMetadata']['RequestId']
list(response['LoadBalancers']).should.have.length_of(1)
response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb')
@ -738,8 +747,12 @@ def test_describe_autoscaling_groups_boto3():
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
group = response['AutoScalingGroups'][0]
group['AutoScalingGroupName'].should.equal('test_asg')
group['AvailabilityZones'].should.equal(['us-east-1a'])
group['VPCZoneIdentifier'].should.equal(mocked_networking['subnet1'])
group['NewInstancesProtectedFromScaleIn'].should.equal(True)
group['Instances'][0]['ProtectedFromScaleIn'].should.equal(True)
for instance in group['Instances']:
instance['AvailabilityZone'].should.equal('us-east-1a')
instance['ProtectedFromScaleIn'].should.equal(True)
@mock_autoscaling
@ -770,6 +783,7 @@ def test_describe_autoscaling_instances_boto3():
response = client.describe_auto_scaling_instances(InstanceIds=instance_ids)
for instance in response['AutoScalingInstances']:
instance['AutoScalingGroupName'].should.equal('test_asg')
instance['AvailabilityZone'].should.equal('us-east-1a')
instance['ProtectedFromScaleIn'].should.equal(True)
@ -793,6 +807,10 @@ def test_update_autoscaling_group_boto3():
_ = client.update_auto_scaling_group(
AutoScalingGroupName='test_asg',
MinSize=1,
VPCZoneIdentifier="{subnet1},{subnet2}".format(
subnet1=mocked_networking['subnet1'],
subnet2=mocked_networking['subnet2'],
),
NewInstancesProtectedFromScaleIn=False,
)
@ -801,6 +819,7 @@ def test_update_autoscaling_group_boto3():
)
group = response['AutoScalingGroups'][0]
group['MinSize'].should.equal(1)
set(group['AvailabilityZones']).should.equal({'us-east-1a', 'us-east-1b'})
group['NewInstancesProtectedFromScaleIn'].should.equal(False)

View File

@ -106,7 +106,7 @@ def test_detach_all_target_groups():
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
TargetGroupARNs=[target_group_arn],
VPCZoneIdentifier=mocked_networking['vpc'])
VPCZoneIdentifier=mocked_networking['subnet1'])
response = client.describe_load_balancer_target_groups(
AutoScalingGroupName='test_asg')

View File

@ -1,5 +1,6 @@
import boto
import boto3
from boto import vpc as boto_vpc
from moto import mock_ec2, mock_ec2_deprecated
@ -19,9 +20,14 @@ def setup_networking():
@mock_ec2_deprecated
def setup_networking_deprecated():
conn = boto.connect_vpc()
conn = boto_vpc.connect_to_region('us-east-1')
vpc = conn.create_vpc("10.11.0.0/16")
subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24")
subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24")
subnet1 = conn.create_subnet(
vpc.id,
"10.11.1.0/24",
availability_zone='us-east-1a')
subnet2 = conn.create_subnet(
vpc.id,
"10.11.2.0/24",
availability_zone='us-east-1b')
return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id}

View File

@ -12,6 +12,8 @@ import sure # noqa
from freezegun import freeze_time
from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings
from nose.tools import assert_raises
from botocore.exceptions import ClientError
_lambda_region = 'us-west-2'
@ -280,7 +282,7 @@ def test_create_function_from_aws_bucket():
result.pop('LastModified')
result.should.equal({
'FunctionName': 'testFunction',
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region),
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
'Runtime': 'python2.7',
'Role': 'test-iam-role',
'Handler': 'lambda_function.lambda_handler',
@ -289,7 +291,7 @@ def test_create_function_from_aws_bucket():
'Description': 'test lambda function',
'Timeout': 3,
'MemorySize': 128,
'Version': '$LATEST',
'Version': '1',
'VpcConfig': {
"SecurityGroupIds": ["sg-123abc"],
"SubnetIds": ["subnet-123abc"],
@ -325,7 +327,7 @@ def test_create_function_from_zipfile():
result.should.equal({
'FunctionName': 'testFunction',
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region),
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
'Runtime': 'python2.7',
'Role': 'test-iam-role',
'Handler': 'lambda_function.lambda_handler',
@ -334,7 +336,7 @@ def test_create_function_from_zipfile():
'Timeout': 3,
'MemorySize': 128,
'CodeSha256': hashlib.sha256(zip_content).hexdigest(),
'Version': '$LATEST',
'Version': '1',
'VpcConfig': {
"SecurityGroupIds": [],
"SubnetIds": [],
@ -396,6 +398,13 @@ def test_get_function():
# Test get function with
result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST')
result['Configuration']['Version'].should.equal('$LATEST')
result['Configuration']['FunctionArn'].should.equal('arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST')
# Test get function when can't find function name
with assert_raises(ClientError):
conn.get_function(FunctionName='junk', Qualifier='$LATEST')
@mock_lambda
@ -457,14 +466,15 @@ def test_publish():
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
Publish=False,
)
function_list = conn.list_functions()
function_list['Functions'].should.have.length_of(1)
latest_arn = function_list['Functions'][0]['FunctionArn']
conn.publish_version(FunctionName='testFunction')
res = conn.publish_version(FunctionName='testFunction')
assert res['ResponseMetadata']['HTTPStatusCode'] == 201
function_list = conn.list_functions()
function_list['Functions'].should.have.length_of(2)
@ -477,7 +487,7 @@ def test_publish():
function_list = conn.list_functions()
function_list['Functions'].should.have.length_of(1)
function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST')
function_list['Functions'][0]['FunctionArn'].should.contain('testFunction')
@mock_lambda
@ -520,7 +530,7 @@ def test_list_create_list_get_delete_list():
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content),
"Description": "test lambda function",
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region),
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
"FunctionName": "testFunction",
"Handler": "lambda_function.lambda_handler",
"MemorySize": 128,
@ -733,7 +743,7 @@ def test_get_function_created_with_zipfile():
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
"CodeSize": len(zip_content),
"Description": "test lambda function",
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region),
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
"FunctionName": "testFunction",
"Handler": "lambda_function.handler",
"MemorySize": 128,
@ -819,3 +829,107 @@ def get_function_policy():
assert isinstance(response['Policy'], str)
res = json.loads(response['Policy'])
assert res['Statement'][0]['Action'] == 'lambda:InvokeFunction'
@mock_lambda
@mock_s3
def test_list_versions_by_function():
s3_conn = boto3.client('s3', 'us-west-2')
s3_conn.create_bucket(Bucket='test-bucket')
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
conn = boto3.client('lambda', 'us-west-2')
conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='arn:aws:iam::123456789012:role/test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
res = conn.publish_version(FunctionName='testFunction')
assert res['ResponseMetadata']['HTTPStatusCode'] == 201
versions = conn.list_versions_by_function(FunctionName='testFunction')
assert len(versions['Versions']) == 3
assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST'
assert versions['Versions'][1]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:1'
assert versions['Versions'][2]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:2'
conn.create_function(
FunctionName='testFunction_2',
Runtime='python2.7',
Role='arn:aws:iam::123456789012:role/test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=False,
)
versions = conn.list_versions_by_function(FunctionName='testFunction_2')
assert len(versions['Versions']) == 1
assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction_2:$LATEST'
@mock_lambda
@mock_s3
def test_create_function_with_already_exists():
s3_conn = boto3.client('s3', 'us-west-2')
s3_conn.create_bucket(Bucket='test-bucket')
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
conn = boto3.client('lambda', 'us-west-2')
conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
assert response['FunctionName'] == 'testFunction'
@mock_lambda
@mock_s3
def test_list_versions_by_function_for_nonexistent_function():
conn = boto3.client('lambda', 'us-west-2')
versions = conn.list_versions_by_function(FunctionName='testFunction')
assert len(versions['Versions']) == 0

View File

@ -323,6 +323,54 @@ def test_create_job_queue():
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(0)
# Create job queue which already exists
try:
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
# Create job queue with incorrect state
try:
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue2',
state='JUNK',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
# Create job queue with no compute env
try:
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue3',
state='JUNK',
priority=123,
computeEnvironmentOrder=[
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
@mock_ec2
@mock_ecs
@ -397,6 +445,17 @@ def test_update_job_queue():
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['priority'].should.equal(5)
batch_client.update_job_queue(
jobQueue='test_job_queue',
priority=5
)
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['priority'].should.equal(5)
@mock_ec2
@mock_ecs

View File

@ -29,6 +29,10 @@ template = {
"NinjaENI": {
"Description": "Elastic IP mapping to Auto-Scaling Group",
"Value": {"Ref": "ENI"}
},
"ENIIpAddress": {
"Description": "ENI's Private IP address",
"Value": {"Fn::GetAtt": ["ENI", "PrimaryPrivateIpAddress"]}
}
}
}

View File

@ -184,6 +184,423 @@ dummy_import_template_json = json.dumps(dummy_import_template)
dummy_redrive_template_json = json.dumps(dummy_redrive_template)
@mock_cloudformation
def test_boto3_describe_stack_instances():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-2'],
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount='123456789012',
StackInstanceRegion='us-west-2',
)
use1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount='123456789012',
StackInstanceRegion='us-east-1',
)
usw2_instance['StackInstance'].should.have.key('Region').which.should.equal('us-west-2')
usw2_instance['StackInstance'].should.have.key('Account').which.should.equal('123456789012')
use1_instance['StackInstance'].should.have.key('Region').which.should.equal('us-east-1')
use1_instance['StackInstance'].should.have.key('Account').which.should.equal('123456789012')
@mock_cloudformation
def test_boto3_list_stacksets_length():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.create_stack_set(
StackSetName="test_stack_set2",
TemplateBody=dummy_template_yaml,
)
stacksets = cf_conn.list_stack_sets()
stacksets.should.have.length_of(2)
@mock_cloudformation
def test_boto3_list_stacksets_contents():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
stacksets = cf_conn.list_stack_sets()
stacksets['Summaries'][0].should.have.key('StackSetName').which.should.equal('test_stack_set')
stacksets['Summaries'][0].should.have.key('Status').which.should.equal('ACTIVE')
@mock_cloudformation
def test_boto3_stop_stack_set_operation():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-1', 'us-west-2'],
)
operation_id = cf_conn.list_stack_set_operations(
StackSetName="test_stack_set")['Summaries'][-1]['OperationId']
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set",
OperationId=operation_id
)
list_operation = cf_conn.list_stack_set_operations(
StackSetName="test_stack_set"
)
list_operation['Summaries'][-1]['Status'].should.equal('STOPPED')
@mock_cloudformation
def test_boto3_describe_stack_set_operation():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-1', 'us-west-2'],
)
operation_id = cf_conn.list_stack_set_operations(
StackSetName="test_stack_set")['Summaries'][-1]['OperationId']
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set",
OperationId=operation_id
)
response = cf_conn.describe_stack_set_operation(
StackSetName="test_stack_set",
OperationId=operation_id,
)
response['StackSetOperation']['Status'].should.equal('STOPPED')
response['StackSetOperation']['Action'].should.equal('CREATE')
@mock_cloudformation
def test_boto3_list_stack_set_operation_results():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-1', 'us-west-2'],
)
operation_id = cf_conn.list_stack_set_operations(
StackSetName="test_stack_set")['Summaries'][-1]['OperationId']
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set",
OperationId=operation_id
)
response = cf_conn.list_stack_set_operation_results(
StackSetName="test_stack_set",
OperationId=operation_id,
)
response['Summaries'].should.have.length_of(3)
response['Summaries'][0].should.have.key('Account').which.should.equal('123456789012')
response['Summaries'][1].should.have.key('Status').which.should.equal('STOPPED')
@mock_cloudformation
def test_boto3_update_stack_instances():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
param = [
{'ParameterKey': 'SomeParam', 'ParameterValue': 'StackSetValue'},
{'ParameterKey': 'AnotherParam', 'ParameterValue': 'StackSetValue2'},
]
param_overrides = [
{'ParameterKey': 'SomeParam', 'ParameterValue': 'OverrideValue'},
{'ParameterKey': 'AnotherParam', 'ParameterValue': 'OverrideValue2'}
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-1', 'us-west-2'],
)
cf_conn.update_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-west-1', 'us-west-2'],
ParameterOverrides=param_overrides,
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount='123456789012',
StackInstanceRegion='us-west-2',
)
usw1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount='123456789012',
StackInstanceRegion='us-west-1',
)
use1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount='123456789012',
StackInstanceRegion='us-east-1',
)
usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey'])
usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue'])
usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey'])
usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue'])
usw1_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey'])
usw1_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue'])
usw1_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey'])
usw1_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue'])
use1_instance['StackInstance']['ParameterOverrides'].should.be.empty
@mock_cloudformation
def test_boto3_delete_stack_instances():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-2'],
)
cf_conn.delete_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1'],
RetainStacks=False,
)
cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'].should.have.length_of(1)
cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'][0]['Region'].should.equal(
'us-west-2')
@mock_cloudformation
def test_boto3_create_stack_instances():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-2'],
)
cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'].should.have.length_of(2)
cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'][0]['Account'].should.equal(
'123456789012')
@mock_cloudformation
def test_boto3_create_stack_instances_with_param_overrides():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
param = [
{'ParameterKey': 'TagDescription', 'ParameterValue': 'StackSetValue'},
{'ParameterKey': 'TagName', 'ParameterValue': 'StackSetValue2'},
]
param_overrides = [
{'ParameterKey': 'TagDescription', 'ParameterValue': 'OverrideValue'},
{'ParameterKey': 'TagName', 'ParameterValue': 'OverrideValue2'}
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-2'],
ParameterOverrides=param_overrides,
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount='123456789012',
StackInstanceRegion='us-west-2',
)
usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey'])
usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey'])
usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue'])
usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue'])
@mock_cloudformation
def test_update_stack_set():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
param = [
{'ParameterKey': 'TagDescription', 'ParameterValue': 'StackSetValue'},
{'ParameterKey': 'TagName', 'ParameterValue': 'StackSetValue2'},
]
param_overrides = [
{'ParameterKey': 'TagDescription', 'ParameterValue': 'OverrideValue'},
{'ParameterKey': 'TagName', 'ParameterValue': 'OverrideValue2'}
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.update_stack_set(
StackSetName='test_stack_set',
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param_overrides,
)
stackset = cf_conn.describe_stack_set(StackSetName='test_stack_set')
stackset['StackSet']['Parameters'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue'])
stackset['StackSet']['Parameters'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue'])
stackset['StackSet']['Parameters'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey'])
stackset['StackSet']['Parameters'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey'])
@mock_cloudformation
def test_boto3_list_stack_set_operations():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-2'],
)
cf_conn.update_stack_instances(
StackSetName="test_stack_set",
Accounts=['123456789012'],
Regions=['us-east-1', 'us-west-2'],
)
list_operation = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")
list_operation['Summaries'].should.have.length_of(2)
list_operation['Summaries'][-1]['Action'].should.equal('UPDATE')
@mock_cloudformation
def test_boto3_delete_stack_set():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.delete_stack_set(StackSetName='test_stack_set')
cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['Status'].should.equal(
'DELETED')
@mock_cloudformation
def test_boto3_create_stack_set():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_json,
)
cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['TemplateBody'].should.equal(
dummy_template_json)
@mock_cloudformation
def test_boto3_create_stack_set_with_yaml():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml,
)
cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['TemplateBody'].should.equal(
dummy_template_yaml)
@mock_cloudformation
@mock_s3
def test_create_stack_set_from_s3_url():
s3 = boto3.client('s3')
s3_conn = boto3.resource('s3')
bucket = s3_conn.create_bucket(Bucket="foobar")
key = s3_conn.Object(
'foobar', 'template-key').put(Body=dummy_template_json)
key_url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'foobar',
'Key': 'template-key'
}
)
cf_conn = boto3.client('cloudformation', region_name='us-west-1')
cf_conn.create_stack_set(
StackSetName='stack_from_url',
TemplateURL=key_url,
)
cf_conn.describe_stack_set(StackSetName="stack_from_url")['StackSet']['TemplateBody'].should.equal(
dummy_template_json)
@mock_cloudformation
def test_boto3_create_stack_set_with_ref_yaml():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
params = [
{'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'},
{'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'},
]
cf_conn.create_stack_set(
StackSetName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params
)
cf_conn.describe_stack_set(StackSetName="test_stack")['StackSet']['TemplateBody'].should.equal(
dummy_template_yaml_with_ref)
@mock_cloudformation
def test_boto3_describe_stack_set_params():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
params = [
{'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'},
{'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'},
]
cf_conn.create_stack_set(
StackSetName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params
)
cf_conn.describe_stack_set(StackSetName="test_stack")['StackSet']['Parameters'].should.equal(
params)
@mock_cloudformation
def test_boto3_create_stack():
@ -399,6 +816,32 @@ def test_create_change_set_from_s3_url():
assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId']
@mock_cloudformation
def test_describe_change_set():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_change_set(
StackName='NewStack',
TemplateBody=dummy_template_json,
ChangeSetName='NewChangeSet',
ChangeSetType='CREATE',
)
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet")
stack['ChangeSetName'].should.equal('NewChangeSet')
stack['StackName'].should.equal('NewStack')
cf_conn.create_change_set(
StackName='NewStack',
TemplateBody=dummy_update_template_json,
ChangeSetName='NewChangeSet2',
ChangeSetType='UPDATE',
)
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet2")
stack['ChangeSetName'].should.equal('NewChangeSet2')
stack['StackName'].should.equal('NewStack')
stack['Changes'].should.have.length_of(2)
@mock_cloudformation
def test_execute_change_set_w_arn():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
@ -420,7 +863,7 @@ def test_execute_change_set_w_name():
ChangeSetName='NewChangeSet',
ChangeSetType='CREATE',
)
cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack')
cf_conn.execute_change_set(ChangeSetName='NewChangeSet', StackName='NewStack')
@mock_cloudformation
@ -489,6 +932,20 @@ def test_describe_stack_by_stack_id():
stack_by_id['StackName'].should.equal("test_stack")
@mock_cloudformation
def test_list_change_sets():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_change_set(
StackName='NewStack2',
TemplateBody=dummy_template_json,
ChangeSetName='NewChangeSet2',
ChangeSetType='CREATE',
)
change_set = cf_conn.list_change_sets(StackName='NewStack2')['Summaries'][0]
change_set['StackName'].should.equal('NewStack2')
change_set['ChangeSetName'].should.equal('NewChangeSet2')
@mock_cloudformation
def test_list_stacks():
cf = boto3.resource('cloudformation', region_name='us-east-1')
@ -521,6 +978,22 @@ def test_delete_stack_from_resource():
list(cf.stacks.all()).should.have.length_of(0)
@mock_cloudformation
@mock_ec2
def test_delete_change_set():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_change_set(
StackName='NewStack',
TemplateBody=dummy_template_json,
ChangeSetName='NewChangeSet',
ChangeSetType='CREATE',
)
cf_conn.list_change_sets(StackName='NewStack')['Summaries'].should.have.length_of(1)
cf_conn.delete_change_set(ChangeSetName='NewChangeSet', StackName='NewStack')
cf_conn.list_change_sets(StackName='NewStack')['Summaries'].should.have.length_of(0)
@mock_cloudformation
@mock_ec2
def test_delete_stack_by_name():

View File

@ -2,6 +2,8 @@ from __future__ import unicode_literals
import json
import base64
from decimal import Decimal
import boto
import boto.cloudformation
import boto.datapipeline
@ -22,6 +24,7 @@ from moto import (
mock_cloudformation,
mock_cloudformation_deprecated,
mock_datapipeline_deprecated,
mock_dynamodb2,
mock_ec2,
mock_ec2_deprecated,
mock_elb,
@ -39,6 +42,7 @@ from moto import (
mock_sqs,
mock_sqs_deprecated,
mock_elbv2)
from moto.dynamodb2.models import Table
from .fixtures import (
ec2_classic_eip,
@ -2085,7 +2089,7 @@ def test_stack_kms():
def test_stack_spot_fleet():
conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']
@ -2169,7 +2173,7 @@ def test_stack_spot_fleet():
def test_stack_spot_fleet_should_figure_out_default_price():
conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']
subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']
@ -2433,3 +2437,131 @@ def test_stack_elbv2_resources_integration():
dns['OutputValue'].should.equal(load_balancers[0]['DNSName'])
name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName'])
@mock_dynamodb2
@mock_cloudformation
def test_stack_dynamodb_resources_integration():
dynamodb_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"myDynamoDBTable": {
"Type": "AWS::DynamoDB::Table",
"Properties": {
"AttributeDefinitions": [
{
"AttributeName": "Album",
"AttributeType": "S"
},
{
"AttributeName": "Artist",
"AttributeType": "S"
},
{
"AttributeName": "Sales",
"AttributeType": "N"
},
{
"AttributeName": "NumberOfSongs",
"AttributeType": "N"
}
],
"KeySchema": [
{
"AttributeName": "Album",
"KeyType": "HASH"
},
{
"AttributeName": "Artist",
"KeyType": "RANGE"
}
],
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
},
"TableName": "myTableName",
"GlobalSecondaryIndexes": [{
"IndexName": "myGSI",
"KeySchema": [
{
"AttributeName": "Sales",
"KeyType": "HASH"
},
{
"AttributeName": "Artist",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Album","NumberOfSongs"],
"ProjectionType": "INCLUDE"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
}
},
{
"IndexName": "myGSI2",
"KeySchema": [
{
"AttributeName": "NumberOfSongs",
"KeyType": "HASH"
},
{
"AttributeName": "Sales",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Album","Artist"],
"ProjectionType": "INCLUDE"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": "5",
"WriteCapacityUnits": "5"
}
}],
"LocalSecondaryIndexes":[{
"IndexName": "myLSI",
"KeySchema": [
{
"AttributeName": "Album",
"KeyType": "HASH"
},
{
"AttributeName": "Sales",
"KeyType": "RANGE"
}
],
"Projection": {
"NonKeyAttributes": ["Artist","NumberOfSongs"],
"ProjectionType": "INCLUDE"
}
}]
}
}
}
}
dynamodb_template_json = json.dumps(dynamodb_template)
cfn_conn = boto3.client('cloudformation', 'us-east-1')
cfn_conn.create_stack(
StackName='dynamodb_stack',
TemplateBody=dynamodb_template_json,
)
dynamodb_conn = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb_conn.Table('myTableName')
table.name.should.equal('myTableName')
table.put_item(Item={"Album": "myAlbum", "Artist": "myArtist", "Sales": 10, "NumberOfSongs": 5})
response = table.get_item(Key={"Album": "myAlbum", "Artist": "myArtist"})
response['Item']['Album'].should.equal('myAlbum')
response['Item']['Sales'].should.equal(Decimal('10'))
response['Item']['NumberOfSongs'].should.equal(Decimal('5'))
response['Item']['Album'].should.equal('myAlbum')

View File

@ -83,6 +83,18 @@ get_availability_zones_output = {
}
}
parameters = {
"Parameters": {
"Param": {
"Type": "String",
},
"NoEchoParam": {
"Type": "String",
"NoEcho": True
}
}
}
split_select_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
@ -157,6 +169,9 @@ get_attribute_outputs_template = dict(
get_availability_zones_template = dict(
list(dummy_template.items()) + list(get_availability_zones_output.items()))
parameters_template = dict(
list(dummy_template.items()) + list(parameters.items()))
dummy_template_json = json.dumps(dummy_template)
name_type_template_json = json.dumps(name_type_template)
output_type_template_json = json.dumps(outputs_template)
@ -165,6 +180,7 @@ get_attribute_outputs_template_json = json.dumps(
get_attribute_outputs_template)
get_availability_zones_template_json = json.dumps(
get_availability_zones_template)
parameters_template_json = json.dumps(parameters_template)
split_select_template_json = json.dumps(split_select_template)
sub_template_json = json.dumps(sub_template)
export_value_template_json = json.dumps(export_value_template)
@ -290,6 +306,18 @@ def test_parse_stack_with_bad_get_attribute_outputs():
"test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError)
def test_parse_stack_with_parameters():
stack = FakeStack(
stack_id="test_id",
name="test_stack",
template=parameters_template_json,
parameters={"Param": "visible value", "NoEchoParam": "hidden value"},
region_name='us-west-1')
stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam")
stack.resource_map.no_echo_parameter_keys.should_not.have("Param")
def test_parse_equals_condition():
parse_condition(
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
@ -448,8 +476,8 @@ def test_short_form_func_in_yaml_teamplate():
KeySplit: !Split [A, B]
KeySub: !Sub A
"""
yaml.add_multi_constructor('', yaml_tag_constructor)
template_dict = yaml.load(template)
yaml.add_multi_constructor('', yaml_tag_constructor, Loader=yaml.Loader)
template_dict = yaml.load(template, Loader=yaml.Loader)
key_and_expects = [
['KeyRef', {'Ref': 'foo'}],
['KeyB64', {'Fn::Base64': 'valueToEncode'}],

View File

@ -484,6 +484,82 @@ def test_describe_identity_providers():
result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value)
@mock_cognitoidp
def test_update_identity_provider():
conn = boto3.client("cognito-idp", "us-west-2")
provider_name = str(uuid.uuid4())
provider_type = "Facebook"
value = str(uuid.uuid4())
new_value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.create_identity_provider(
UserPoolId=user_pool_id,
ProviderName=provider_name,
ProviderType=provider_type,
ProviderDetails={
"thing": value
},
)
result = conn.update_identity_provider(
UserPoolId=user_pool_id,
ProviderName=provider_name,
ProviderDetails={
"thing": new_value
},
)
result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id)
result["IdentityProvider"]["ProviderName"].should.equal(provider_name)
result["IdentityProvider"]["ProviderType"].should.equal(provider_type)
result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(new_value)
@mock_cognitoidp
def test_update_identity_provider_no_user_pool():
conn = boto3.client("cognito-idp", "us-west-2")
new_value = str(uuid.uuid4())
with assert_raises(conn.exceptions.ResourceNotFoundException) as cm:
conn.update_identity_provider(
UserPoolId="foo",
ProviderName="bar",
ProviderDetails={
"thing": new_value
},
)
cm.exception.operation_name.should.equal('UpdateIdentityProvider')
cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException')
cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400)
@mock_cognitoidp
def test_update_identity_provider_no_identity_provider():
conn = boto3.client("cognito-idp", "us-west-2")
provider_name = str(uuid.uuid4())
provider_type = "Facebook"
value = str(uuid.uuid4())
new_value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
with assert_raises(conn.exceptions.ResourceNotFoundException) as cm:
conn.update_identity_provider(
UserPoolId=user_pool_id,
ProviderName="foo",
ProviderDetails={
"thing": new_value
},
)
cm.exception.operation_name.should.equal('UpdateIdentityProvider')
cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException')
cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400)
@mock_cognitoidp
def test_delete_identity_providers():
conn = boto3.client("cognito-idp", "us-west-2")
@ -1086,3 +1162,53 @@ def test_confirm_forgot_password():
ConfirmationCode=str(uuid.uuid4()),
Password=str(uuid.uuid4()),
)
@mock_cognitoidp
def test_admin_update_user_attributes():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{
'Name': 'family_name',
'Value': 'Doe',
},
{
'Name': 'given_name',
'Value': 'John',
}
]
)
conn.admin_update_user_attributes(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{
'Name': 'family_name',
'Value': 'Doe',
},
{
'Name': 'given_name',
'Value': 'Jane',
}
]
)
user = conn.admin_get_user(
UserPoolId=user_pool_id,
Username=username
)
attributes = user['UserAttributes']
attributes.should.be.a(list)
for attr in attributes:
val = attr['Value']
if attr['Name'] == 'family_name':
val.should.equal('Doe')
elif attr['Name'] == 'given_name':
val.should.equal('Jane')

View File

@ -0,0 +1,491 @@
from datetime import datetime, timedelta
import boto3
from botocore.exceptions import ClientError
from nose.tools import assert_raises
from moto.config import mock_config
@mock_config
def test_put_configuration_recorder():
client = boto3.client('config', region_name='us-west-2')
# Try without a name supplied:
with assert_raises(ClientError) as ce:
client.put_configuration_recorder(ConfigurationRecorder={'roleARN': 'somearn'})
assert ce.exception.response['Error']['Code'] == 'InvalidConfigurationRecorderNameException'
assert 'is not valid, blank string.' in ce.exception.response['Error']['Message']
# Try with a really long name:
with assert_raises(ClientError) as ce:
client.put_configuration_recorder(ConfigurationRecorder={'name': 'a' * 257, 'roleARN': 'somearn'})
assert ce.exception.response['Error']['Code'] == 'ValidationException'
assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message']
# With resource types and flags set to True:
bad_groups = [
{'allSupported': True, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']},
{'allSupported': False, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']},
{'allSupported': True, 'includeGlobalResourceTypes': False, 'resourceTypes': ['item']},
{'allSupported': False, 'includeGlobalResourceTypes': False, 'resourceTypes': []},
{'includeGlobalResourceTypes': False, 'resourceTypes': []},
{'includeGlobalResourceTypes': True},
{'resourceTypes': []},
{}
]
for bg in bad_groups:
with assert_raises(ClientError) as ce:
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'default',
'roleARN': 'somearn',
'recordingGroup': bg
})
assert ce.exception.response['Error']['Code'] == 'InvalidRecordingGroupException'
assert ce.exception.response['Error']['Message'] == 'The recording group provided is not valid'
# With an invalid Resource Type:
with assert_raises(ClientError) as ce:
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'default',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
# 2 good, and 2 bad:
'resourceTypes': ['AWS::EC2::Volume', 'LOLNO', 'AWS::EC2::VPC', 'LOLSTILLNO']
}
})
assert ce.exception.response['Error']['Code'] == 'ValidationException'
assert "2 validation error detected: Value '['LOLNO', 'LOLSTILLNO']" in str(ce.exception.response['Error']['Message'])
assert 'AWS::EC2::Instance' in ce.exception.response['Error']['Message']
# Create a proper one:
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
result = client.describe_configuration_recorders()['ConfigurationRecorders']
assert len(result) == 1
assert result[0]['name'] == 'testrecorder'
assert result[0]['roleARN'] == 'somearn'
assert not result[0]['recordingGroup']['allSupported']
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
assert len(result[0]['recordingGroup']['resourceTypes']) == 2
assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \
and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes']
# Now update the configuration recorder:
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': True,
'includeGlobalResourceTypes': True
}
})
result = client.describe_configuration_recorders()['ConfigurationRecorders']
assert len(result) == 1
assert result[0]['name'] == 'testrecorder'
assert result[0]['roleARN'] == 'somearn'
assert result[0]['recordingGroup']['allSupported']
assert result[0]['recordingGroup']['includeGlobalResourceTypes']
assert len(result[0]['recordingGroup']['resourceTypes']) == 0
# With a default recording group (i.e. lacking one)
client.put_configuration_recorder(ConfigurationRecorder={'name': 'testrecorder', 'roleARN': 'somearn'})
result = client.describe_configuration_recorders()['ConfigurationRecorders']
assert len(result) == 1
assert result[0]['name'] == 'testrecorder'
assert result[0]['roleARN'] == 'somearn'
assert result[0]['recordingGroup']['allSupported']
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
assert not result[0]['recordingGroup'].get('resourceTypes')
# Can currently only have exactly 1 Config Recorder in an account/region:
with assert_raises(ClientError) as ce:
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'someotherrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
}
})
assert ce.exception.response['Error']['Code'] == 'MaxNumberOfConfigurationRecordersExceededException'
assert "maximum number of configuration recorders: 1 is reached." in ce.exception.response['Error']['Message']
@mock_config
def test_describe_configurations():
client = boto3.client('config', region_name='us-west-2')
# Without any configurations:
result = client.describe_configuration_recorders()
assert not result['ConfigurationRecorders']
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
result = client.describe_configuration_recorders()['ConfigurationRecorders']
assert len(result) == 1
assert result[0]['name'] == 'testrecorder'
assert result[0]['roleARN'] == 'somearn'
assert not result[0]['recordingGroup']['allSupported']
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
assert len(result[0]['recordingGroup']['resourceTypes']) == 2
assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \
and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes']
# Specify an incorrect name:
with assert_raises(ClientError) as ce:
client.describe_configuration_recorders(ConfigurationRecorderNames=['wrong'])
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
assert 'wrong' in ce.exception.response['Error']['Message']
# And with both a good and wrong name:
with assert_raises(ClientError) as ce:
client.describe_configuration_recorders(ConfigurationRecorderNames=['testrecorder', 'wrong'])
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
assert 'wrong' in ce.exception.response['Error']['Message']
@mock_config
def test_delivery_channels():
client = boto3.client('config', region_name='us-west-2')
# Try without a config recorder:
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={})
assert ce.exception.response['Error']['Code'] == 'NoAvailableConfigurationRecorderException'
assert ce.exception.response['Error']['Message'] == 'Configuration recorder is not available to ' \
'put delivery channel.'
# Create a config recorder to continue testing:
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
# Try without a name supplied:
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={})
assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryChannelNameException'
assert 'is not valid, blank string.' in ce.exception.response['Error']['Message']
# Try with a really long name:
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={'name': 'a' * 257})
assert ce.exception.response['Error']['Code'] == 'ValidationException'
assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message']
# Without specifying a bucket name:
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel'})
assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException'
assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.'
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': ''})
assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException'
assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.'
# With an empty string for the S3 key prefix:
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={
'name': 'testchannel', 's3BucketName': 'somebucket', 's3KeyPrefix': ''})
assert ce.exception.response['Error']['Code'] == 'InvalidS3KeyPrefixException'
assert 'empty s3 key prefix.' in ce.exception.response['Error']['Message']
# With an empty string for the SNS ARN:
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={
'name': 'testchannel', 's3BucketName': 'somebucket', 'snsTopicARN': ''})
assert ce.exception.response['Error']['Code'] == 'InvalidSNSTopicARNException'
assert 'The sns topic arn' in ce.exception.response['Error']['Message']
# With an invalid delivery frequency:
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={
'name': 'testchannel',
's3BucketName': 'somebucket',
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'WRONG'}
})
assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryFrequency'
assert 'WRONG' in ce.exception.response['Error']['Message']
assert 'TwentyFour_Hours' in ce.exception.response['Error']['Message']
# Create a proper one:
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
result = client.describe_delivery_channels()['DeliveryChannels']
assert len(result) == 1
assert len(result[0].keys()) == 2
assert result[0]['name'] == 'testchannel'
assert result[0]['s3BucketName'] == 'somebucket'
# Overwrite it with another proper configuration:
client.put_delivery_channel(DeliveryChannel={
'name': 'testchannel',
's3BucketName': 'somebucket',
'snsTopicARN': 'sometopicarn',
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'}
})
result = client.describe_delivery_channels()['DeliveryChannels']
assert len(result) == 1
assert len(result[0].keys()) == 4
assert result[0]['name'] == 'testchannel'
assert result[0]['s3BucketName'] == 'somebucket'
assert result[0]['snsTopicARN'] == 'sometopicarn'
assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours'
# Can only have 1:
with assert_raises(ClientError) as ce:
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel2', 's3BucketName': 'somebucket'})
assert ce.exception.response['Error']['Code'] == 'MaxNumberOfDeliveryChannelsExceededException'
assert 'because the maximum number of delivery channels: 1 is reached.' in ce.exception.response['Error']['Message']
@mock_config
def test_describe_delivery_channels():
client = boto3.client('config', region_name='us-west-2')
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
# Without any channels:
result = client.describe_delivery_channels()
assert not result['DeliveryChannels']
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
result = client.describe_delivery_channels()['DeliveryChannels']
assert len(result) == 1
assert len(result[0].keys()) == 2
assert result[0]['name'] == 'testchannel'
assert result[0]['s3BucketName'] == 'somebucket'
# Overwrite it with another proper configuration:
client.put_delivery_channel(DeliveryChannel={
'name': 'testchannel',
's3BucketName': 'somebucket',
'snsTopicARN': 'sometopicarn',
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'}
})
result = client.describe_delivery_channels()['DeliveryChannels']
assert len(result) == 1
assert len(result[0].keys()) == 4
assert result[0]['name'] == 'testchannel'
assert result[0]['s3BucketName'] == 'somebucket'
assert result[0]['snsTopicARN'] == 'sometopicarn'
assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours'
# Specify an incorrect name:
with assert_raises(ClientError) as ce:
client.describe_delivery_channels(DeliveryChannelNames=['wrong'])
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'
assert 'wrong' in ce.exception.response['Error']['Message']
# And with both a good and wrong name:
with assert_raises(ClientError) as ce:
client.describe_delivery_channels(DeliveryChannelNames=['testchannel', 'wrong'])
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'
assert 'wrong' in ce.exception.response['Error']['Message']
@mock_config
def test_start_configuration_recorder():
client = boto3.client('config', region_name='us-west-2')
# Without a config recorder:
with assert_raises(ClientError) as ce:
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
# Make the config recorder;
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
# Without a delivery channel:
with assert_raises(ClientError) as ce:
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
assert ce.exception.response['Error']['Code'] == 'NoAvailableDeliveryChannelException'
# Make the delivery channel:
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
# Start it:
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
# Verify it's enabled:
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
lower_bound = (datetime.utcnow() - timedelta(minutes=5))
assert result[0]['recording']
assert result[0]['lastStatus'] == 'PENDING'
assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow()
assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow()
@mock_config
def test_stop_configuration_recorder():
client = boto3.client('config', region_name='us-west-2')
# Without a config recorder:
with assert_raises(ClientError) as ce:
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
# Make the config recorder;
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
# Make the delivery channel for creation:
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
# Start it:
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
# Verify it's disabled:
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
lower_bound = (datetime.utcnow() - timedelta(minutes=5))
assert not result[0]['recording']
assert result[0]['lastStatus'] == 'PENDING'
assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow()
assert lower_bound < result[0]['lastStopTime'].replace(tzinfo=None) <= datetime.utcnow()
assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow()
@mock_config
def test_describe_configuration_recorder_status():
client = boto3.client('config', region_name='us-west-2')
# Without any:
result = client.describe_configuration_recorder_status()
assert not result['ConfigurationRecordersStatus']
# Make the config recorder;
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
# Without specifying a config recorder:
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
assert len(result) == 1
assert result[0]['name'] == 'testrecorder'
assert not result[0]['recording']
# With a proper name:
result = client.describe_configuration_recorder_status(
ConfigurationRecorderNames=['testrecorder'])['ConfigurationRecordersStatus']
assert len(result) == 1
assert result[0]['name'] == 'testrecorder'
assert not result[0]['recording']
# Invalid name:
with assert_raises(ClientError) as ce:
client.describe_configuration_recorder_status(ConfigurationRecorderNames=['testrecorder', 'wrong'])
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
assert 'wrong' in ce.exception.response['Error']['Message']
@mock_config
def test_delete_configuration_recorder():
client = boto3.client('config', region_name='us-west-2')
# Make the config recorder;
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
# Delete it:
client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder')
# Try again -- it should be deleted:
with assert_raises(ClientError) as ce:
client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder')
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
@mock_config
def test_delete_delivery_channel():
client = boto3.client('config', region_name='us-west-2')
# Need a recorder to test the constraint on recording being enabled:
client.put_configuration_recorder(ConfigurationRecorder={
'name': 'testrecorder',
'roleARN': 'somearn',
'recordingGroup': {
'allSupported': False,
'includeGlobalResourceTypes': False,
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
}
})
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
# With the recorder enabled:
with assert_raises(ClientError) as ce:
client.delete_delivery_channel(DeliveryChannelName='testchannel')
assert ce.exception.response['Error']['Code'] == 'LastDeliveryChannelDeleteFailedException'
assert 'because there is a running configuration recorder.' in ce.exception.response['Error']['Message']
# Stop recording:
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
# Try again:
client.delete_delivery_channel(DeliveryChannelName='testchannel')
# Verify:
with assert_raises(ClientError) as ce:
client.delete_delivery_channel(DeliveryChannelName='testchannel')
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'

View File

@ -2,7 +2,9 @@ from __future__ import unicode_literals
import sure # noqa
from moto.core.responses import AWSServiceSpec
from botocore.awsrequest import AWSPreparedRequest
from moto.core.responses import AWSServiceSpec, BaseResponse
from moto.core.responses import flatten_json_request_body
@ -79,3 +81,9 @@ def test_flatten_json_request_body():
i += 1
key = keyfmt.format(idx + 1, i)
props.should.equal(body['Configurations'][idx]['Properties'])
def test_parse_qs_unicode_decode_error():
body = b'{"key": "%D0"}, "C": "#0 = :0"}'
request = AWSPreparedRequest('GET', 'http://request', {'foo': 'bar'}, body, False)
BaseResponse().setup_class(request, request.url, request.headers)

View File

@ -0,0 +1,48 @@
import unittest
from moto import mock_dynamodb2_deprecated, mock_dynamodb2
import socket
from six import PY3
class TestSocketPair(unittest.TestCase):
@mock_dynamodb2_deprecated
def test_asyncio_deprecated(self):
if PY3:
self.assertIn(
'moto.packages.httpretty.core.fakesock.socket',
str(socket.socket),
'Our mock should be present'
)
import asyncio
self.assertIsNotNone(asyncio.get_event_loop())
@mock_dynamodb2_deprecated
def test_socket_pair_deprecated(self):
# In Python2, the fakesocket is not set, for some reason.
if PY3:
self.assertIn(
'moto.packages.httpretty.core.fakesock.socket',
str(socket.socket),
'Our mock should be present'
)
a, b = socket.socketpair()
self.assertIsNotNone(a)
self.assertIsNotNone(b)
if a:
a.close()
if b:
b.close()
@mock_dynamodb2
def test_socket_pair(self):
a, b = socket.socketpair()
self.assertIsNotNone(a)
self.assertIsNotNone(b)
if a:
a.close()
if b:
b.close()

View File

@ -452,6 +452,90 @@ def test_basic_projection_expressions():
assert 'body' in results['Items'][1]
assert 'forum_name' in results['Items'][1]
@mock_dynamodb2
def test_basic_projection_expressions_using_scan():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table = dynamodb.Table('users')
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'body': 'some test message'
})
table.put_item(Item={
'forum_name': 'not-the-key',
'subject': '123',
'body': 'some other test message'
})
# Test a scan returning all items
results = table.scan(
FilterExpression=Key('forum_name').eq(
'the-key'),
ProjectionExpression='body, subject'
)
assert 'body' in results['Items'][0]
assert results['Items'][0]['body'] == 'some test message'
assert 'subject' in results['Items'][0]
table.put_item(Item={
'forum_name': 'the-key',
'subject': '1234',
'body': 'yet another test message'
})
results = table.scan(
FilterExpression=Key('forum_name').eq(
'the-key'),
ProjectionExpression='body'
)
assert 'body' in results['Items'][0]
assert 'subject' not in results['Items'][0]
assert 'forum_name' not in results['Items'][0]
assert 'body' in results['Items'][1]
assert 'subject' not in results['Items'][1]
assert 'forum_name' not in results['Items'][1]
# The projection expression should not remove data from storage
results = table.query(
KeyConditionExpression=Key('forum_name').eq(
'the-key'),
)
assert 'subject' in results['Items'][0]
assert 'body' in results['Items'][1]
assert 'forum_name' in results['Items'][1]
@mock_dynamodb2
def test_basic_projection_expressions_with_attr_expression_names():
@ -519,6 +603,84 @@ def test_basic_projection_expressions_with_attr_expression_names():
assert 'attachment' in results['Items'][0]
assert results['Items'][0]['attachment'] == 'something'
@mock_dynamodb2
def test_basic_projection_expressions_using_scan_with_attr_expression_names():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table = dynamodb.Table('users')
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'body': 'some test message',
'attachment': 'something'
})
table.put_item(Item={
'forum_name': 'not-the-key',
'subject': '123',
'body': 'some other test message',
'attachment': 'something'
})
# Test a scan returning all items
results = table.scan(
FilterExpression=Key('forum_name').eq(
'the-key'),
ProjectionExpression='#rl, #rt, subject',
ExpressionAttributeNames={
'#rl': 'body',
'#rt': 'attachment'
},
)
assert 'body' in results['Items'][0]
assert 'attachment' in results['Items'][0]
assert 'subject' in results['Items'][0]
assert 'form_name' not in results['Items'][0]
# Test without a FilterExpression
results = table.scan(
ProjectionExpression='#rl, #rt, subject',
ExpressionAttributeNames={
'#rl': 'body',
'#rt': 'attachment'
},
)
assert 'body' in results['Items'][0]
assert 'attachment' in results['Items'][0]
assert 'subject' in results['Items'][0]
assert 'form_name' not in results['Items'][0]
@mock_dynamodb2
def test_put_item_returns_consumed_capacity():
@ -949,6 +1111,33 @@ def test_bad_scan_filter():
raise RuntimeError('Should of raised ResourceInUseException')
@mock_dynamodb2
def test_create_table_pay_per_request():
client = boto3.client('dynamodb', region_name='us-east-1')
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
BillingMode="PAY_PER_REQUEST"
)
@mock_dynamodb2
def test_create_table_error_pay_per_request_with_provisioned_param():
client = boto3.client('dynamodb', region_name='us-east-1')
try:
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123},
BillingMode="PAY_PER_REQUEST"
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ValidationException')
@mock_dynamodb2
def test_duplicate_create():
client = boto3.client('dynamodb', region_name='us-east-1')
@ -1505,3 +1694,220 @@ def test_dynamodb_streams_2():
assert 'LatestStreamLabel' in resp['TableDescription']
assert 'LatestStreamArn' in resp['TableDescription']
@mock_dynamodb2
def test_condition_expressions():
client = boto3.client('dynamodb', region_name='us-east-1')
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
)
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'S': 'app1'},
'match': {'S': 'match'},
'existing': {'S': 'existing'},
}
)
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'S': 'app1'},
'match': {'S': 'match'},
'existing': {'S': 'existing'},
},
ConditionExpression='attribute_exists(#existing) AND attribute_not_exists(#nonexistent) AND #match = :match',
ExpressionAttributeNames={
'#existing': 'existing',
'#nonexistent': 'nope',
'#match': 'match',
},
ExpressionAttributeValues={
':match': {'S': 'match'}
}
)
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'S': 'app1'},
'match': {'S': 'match'},
'existing': {'S': 'existing'},
},
ConditionExpression='NOT(attribute_exists(#nonexistent1) AND attribute_exists(#nonexistent2))',
ExpressionAttributeNames={
'#nonexistent1': 'nope',
'#nonexistent2': 'nope2'
}
)
with assert_raises(client.exceptions.ConditionalCheckFailedException):
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'S': 'app1'},
'match': {'S': 'match'},
'existing': {'S': 'existing'},
},
ConditionExpression='attribute_exists(#nonexistent1) AND attribute_exists(#nonexistent2)',
ExpressionAttributeNames={
'#nonexistent1': 'nope',
'#nonexistent2': 'nope2'
}
)
with assert_raises(client.exceptions.ConditionalCheckFailedException):
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'S': 'app1'},
'match': {'S': 'match'},
'existing': {'S': 'existing'},
},
ConditionExpression='NOT(attribute_not_exists(#nonexistent1) AND attribute_not_exists(#nonexistent2))',
ExpressionAttributeNames={
'#nonexistent1': 'nope',
'#nonexistent2': 'nope2'
}
)
with assert_raises(client.exceptions.ConditionalCheckFailedException):
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'S': 'app1'},
'match': {'S': 'match'},
'existing': {'S': 'existing'},
},
ConditionExpression='attribute_exists(#existing) AND attribute_not_exists(#nonexistent) AND #match = :match',
ExpressionAttributeNames={
'#existing': 'existing',
'#nonexistent': 'nope',
'#match': 'match',
},
ExpressionAttributeValues={
':match': {'S': 'match2'}
}
)
@mock_dynamodb2
def test_query_gsi_with_range_key():
dynamodb = boto3.client('dynamodb', region_name='us-east-1')
dynamodb.create_table(
TableName='test',
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[
{'AttributeName': 'id', 'AttributeType': 'S'},
{'AttributeName': 'gsi_hash_key', 'AttributeType': 'S'},
{'AttributeName': 'gsi_range_key', 'AttributeType': 'S'}
],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1},
GlobalSecondaryIndexes=[
{
'IndexName': 'test_gsi',
'KeySchema': [
{
'AttributeName': 'gsi_hash_key',
'KeyType': 'HASH'
},
{
'AttributeName': 'gsi_range_key',
'KeyType': 'RANGE'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
},
]
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': 'test1'},
'gsi_hash_key': {'S': 'key1'},
'gsi_range_key': {'S': 'range1'},
}
)
dynamodb.put_item(
TableName='test',
Item={
'id': {'S': 'test2'},
'gsi_hash_key': {'S': 'key1'},
}
)
res = dynamodb.query(TableName='test', IndexName='test_gsi',
KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key',
ExpressionAttributeValues={
':gsi_hash_key': {'S': 'key1'},
':gsi_range_key': {'S': 'range1'}
})
res.should.have.key("Count").equal(1)
res.should.have.key("Items")
res['Items'][0].should.equal({
'id': {'S': 'test1'},
'gsi_hash_key': {'S': 'key1'},
'gsi_range_key': {'S': 'range1'},
})
@mock_dynamodb2
def test_scan_by_non_exists_index():
dynamodb = boto3.client('dynamodb', region_name='us-east-1')
dynamodb.create_table(
TableName='test',
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[
{'AttributeName': 'id', 'AttributeType': 'S'},
{'AttributeName': 'gsi_col', 'AttributeType': 'S'}
],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1},
GlobalSecondaryIndexes=[
{
'IndexName': 'test_gsi',
'KeySchema': [
{
'AttributeName': 'gsi_col',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
},
]
)
with assert_raises(ClientError) as ex:
dynamodb.scan(TableName='test', IndexName='non_exists_index')
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400)
ex.exception.response['Error']['Message'].should.equal(
'The table does not have the specified index: non_exists_index'
)

Some files were not shown because too many files have changed in this diff Show More