Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
66db4fffa0
7
.bumpversion.cfg
Normal file
7
.bumpversion.cfg
Normal file
@ -0,0 +1,7 @@
|
||||
[bumpversion]
|
||||
current_version = 1.3.4
|
||||
|
||||
[bumpversion:file:setup.py]
|
||||
|
||||
[bumpversion:file:moto/__init__.py]
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@ -13,3 +13,9 @@ build/
|
||||
.DS_Store
|
||||
python_env
|
||||
.ropeproject/
|
||||
.pytest_cache/
|
||||
venv/
|
||||
.python-version
|
||||
.vscode/
|
||||
tests/file.tmp
|
||||
.eggs/
|
||||
|
||||
72
.travis.yml
72
.travis.yml
@ -1,37 +1,57 @@
|
||||
dist: xenial
|
||||
language: python
|
||||
sudo: false
|
||||
services:
|
||||
- docker
|
||||
- docker
|
||||
python:
|
||||
- 2.7
|
||||
- 3.6
|
||||
- 2.7
|
||||
- 3.6
|
||||
- 3.7
|
||||
env:
|
||||
- TEST_SERVER_MODE=false
|
||||
- TEST_SERVER_MODE=true
|
||||
- TEST_SERVER_MODE=false
|
||||
- TEST_SERVER_MODE=true
|
||||
before_install:
|
||||
- export BOTO_CONFIG=/dev/null
|
||||
- export BOTO_CONFIG=/dev/null
|
||||
install:
|
||||
# We build moto first so the docker container doesn't try to compile it as well, also note we don't use
|
||||
# -d for docker run so the logs show up in travis
|
||||
# Python images come from here: https://hub.docker.com/_/python/
|
||||
- |
|
||||
python setup.py sdist
|
||||
- |
|
||||
python setup.py sdist
|
||||
|
||||
if [ "$TEST_SERVER_MODE" = "true" ]; then
|
||||
docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh &
|
||||
export AWS_SECRET_ACCESS_KEY=foobar_secret
|
||||
export AWS_ACCESS_KEY_ID=foobar_key
|
||||
fi
|
||||
travis_retry pip install boto==2.45.0
|
||||
travis_retry pip install boto3
|
||||
travis_retry pip install dist/moto*.gz
|
||||
travis_retry pip install coveralls==1.1
|
||||
travis_retry pip install -r requirements-dev.txt
|
||||
if [ "$TEST_SERVER_MODE" = "true" ]; then
|
||||
docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh &
|
||||
fi
|
||||
travis_retry pip install boto==2.45.0
|
||||
travis_retry pip install boto3
|
||||
travis_retry pip install dist/moto*.gz
|
||||
travis_retry pip install coveralls==1.1
|
||||
travis_retry pip install -r requirements-dev.txt
|
||||
|
||||
if [ "$TEST_SERVER_MODE" = "true" ]; then
|
||||
python wait_for.py
|
||||
fi
|
||||
if [ "$TEST_SERVER_MODE" = "true" ]; then
|
||||
python wait_for.py
|
||||
fi
|
||||
script:
|
||||
- make test
|
||||
- make test
|
||||
after_success:
|
||||
- coveralls
|
||||
- coveralls
|
||||
before_deploy:
|
||||
- git checkout $TRAVIS_BRANCH
|
||||
- git fetch --unshallow
|
||||
- python update_version_from_git.py
|
||||
deploy:
|
||||
- provider: pypi
|
||||
distributions: sdist bdist_wheel
|
||||
user: spulec
|
||||
password:
|
||||
secure: NxnPylnTfekJmGyoufCw0lMoYRskSMJzvAIyAlJJVYKwEhmiCPOrdy5qV8i8mRZ1AkUsqU3jBZ/PD56n96clHW0E3d080UleRDj6JpyALVdeLfMqZl9kLmZ8bqakWzYq3VSJKw2zGP/L4tPGf8wTK1SUv9yl/YNDsBdCkjDverw=
|
||||
on:
|
||||
branch:
|
||||
- master
|
||||
skip_cleanup: true
|
||||
skip_existing: true
|
||||
# - provider: pypi
|
||||
# distributions: sdist bdist_wheel
|
||||
# user: spulec
|
||||
# password:
|
||||
# secure: NxnPylnTfekJmGyoufCw0lMoYRskSMJzvAIyAlJJVYKwEhmiCPOrdy5qV8i8mRZ1AkUsqU3jBZ/PD56n96clHW0E3d080UleRDj6JpyALVdeLfMqZl9kLmZ8bqakWzYq3VSJKw2zGP/L4tPGf8wTK1SUv9yl/YNDsBdCkjDverw=
|
||||
# on:
|
||||
# tags: true
|
||||
# skip_existing: true
|
||||
|
||||
10
AUTHORS.md
10
AUTHORS.md
@ -47,3 +47,13 @@ Moto is written by Steve Pulec with contributions from:
|
||||
* [Adam Stauffer](https://github.com/adamstauffer)
|
||||
* [Guy Templeton](https://github.com/gjtempleton)
|
||||
* [Michael van Tellingen](https://github.com/mvantellingen)
|
||||
* [Jessie Nadler](https://github.com/nadlerjessie)
|
||||
* [Alex Morken](https://github.com/alexmorken)
|
||||
* [Clive Li](https://github.com/cliveli)
|
||||
* [Jim Shields](https://github.com/jimjshields)
|
||||
* [William Richard](https://github.com/william-richard)
|
||||
* [Alex Casalboni](https://github.com/alexcasalboni)
|
||||
* [Jon Beilke](https://github.com/jrbeilke)
|
||||
* [Bendeguz Acs](https://github.com/acsbendi)
|
||||
* [Craig Anderson](https://github.com/craiga)
|
||||
* [Robert Lewis](https://github.com/ralewis85)
|
||||
|
||||
89
CHANGELOG.md
89
CHANGELOG.md
@ -1,8 +1,95 @@
|
||||
Moto Changelog
|
||||
===================
|
||||
|
||||
Latest
|
||||
1.3.7
|
||||
-----
|
||||
|
||||
* Switch from mocking requests to using before-send for AWS calls
|
||||
|
||||
1.3.6
|
||||
-----
|
||||
|
||||
* Fix boto3 pinning.
|
||||
|
||||
1.3.5
|
||||
-----
|
||||
|
||||
* Pin down botocore issue as temporary fix for #1793.
|
||||
* More features on secrets manager
|
||||
|
||||
1.3.4
|
||||
------
|
||||
|
||||
* IAM get account authorization details
|
||||
* adding account id to ManagedPolicy ARN
|
||||
* APIGateway usage plans and usage plan keys
|
||||
* ECR list images
|
||||
|
||||
1.3.3
|
||||
------
|
||||
|
||||
* Fix a regression in S3 url regexes
|
||||
* APIGateway region fixes
|
||||
* ECS improvements
|
||||
* Add @mock_cognitoidentity, thanks to @brcoding
|
||||
|
||||
|
||||
1.3.2
|
||||
------
|
||||
The huge change in this version is that the responses library is no longer vendored. Many developers are now unblocked. Kudos to @spulec for the fix.
|
||||
|
||||
* Fix route53 TTL bug
|
||||
* Added filtering support for S3 lifecycle
|
||||
* unvendoring responses
|
||||
|
||||
1.3.0
|
||||
------
|
||||
|
||||
Dozens of major endpoint additions in this release. Highlights include:
|
||||
|
||||
* Fixed AMI tests and the Travis build setup
|
||||
* SNS improvements
|
||||
* Dynamodb improvements
|
||||
* EBS improvements
|
||||
* Redshift improvements
|
||||
* RDS snapshot improvements
|
||||
* S3 improvements
|
||||
* Cloudwatch improvements
|
||||
* SSM improvements
|
||||
* IAM improvements
|
||||
* ELBV1 and ELBV2 improvements
|
||||
* Lambda improvements
|
||||
* EC2 spot pricing improvements
|
||||
* ApiGateway improvements
|
||||
* VPC improvements
|
||||
|
||||
1.2.0
|
||||
------
|
||||
|
||||
* Supports filtering AMIs by self
|
||||
* Implemented signal_workflow_execution for SWF
|
||||
* Wired SWF backend to the moto server
|
||||
* Added url decoding to x-amz-copy-source header for copying S3 files
|
||||
* Revamped lambda function storage to do versioning
|
||||
* IOT improvements
|
||||
* RDS improvements
|
||||
* Implemented CloudWatch get_metric_statistics
|
||||
* Improved Cloudformation EC2 support
|
||||
* Implemented Cloudformation change_set endpoints
|
||||
|
||||
1.1.25
|
||||
-----
|
||||
|
||||
* Implemented Iot and Iot-data
|
||||
* Implemented resource tagging API
|
||||
* EC2 AMIs now have owners
|
||||
* Improve codegen scaffolding
|
||||
* Many small fixes to EC2 support
|
||||
* CloudFormation ELBv2 support
|
||||
* UTF fixes for S3
|
||||
* Implemented SSM get_parameters_by_path
|
||||
* More advanced Dynamodb querying
|
||||
|
||||
1.1.24
|
||||
-----
|
||||
|
||||
|
||||
@ -1,4 +1,29 @@
|
||||
### Contributing code
|
||||
|
||||
If you have improvements to Moto, send us your pull requests! For those
|
||||
just getting started, Github has a [howto](https://help.github.com/articles/using-pull-requests/).
|
||||
Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_CONDUCT.md), you can expect to be treated with respect at all times when interacting with this project.
|
||||
|
||||
## Running the tests locally
|
||||
|
||||
Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests.
|
||||
|
||||
## Is there a missing feature?
|
||||
|
||||
Moto is easier to contribute to than you probably think. There's [a list of which endpoints have been implemented](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) and we invite you to add new endpoints to existing services or to add new services.
|
||||
|
||||
How to teach Moto to support a new AWS endpoint:
|
||||
|
||||
* Create an issue describing what's missing. This is where we'll all talk about the new addition and help you get it done.
|
||||
* Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description.
|
||||
* Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`.
|
||||
* If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you.
|
||||
|
||||
# Maintainers
|
||||
|
||||
## Releasing a new version of Moto
|
||||
|
||||
You'll need a PyPi account and a Dockerhub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image.
|
||||
|
||||
* First, `scripts/bump_version` modifies the version and opens a PR
|
||||
* Then, merge the new pull request
|
||||
* Finally, generate and ship the new artifacts with `make publish`
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -2,5 +2,6 @@ include README.md LICENSE AUTHORS.md
|
||||
include requirements.txt requirements-dev.txt tox.ini
|
||||
include moto/ec2/resources/instance_types.json
|
||||
include moto/ec2/resources/amis.json
|
||||
include moto/cognitoidp/resources/*.json
|
||||
recursive-include moto/templates *
|
||||
recursive-include tests *
|
||||
recursive-include tests *
|
||||
|
||||
10
Makefile
10
Makefile
@ -10,7 +10,7 @@ endif
|
||||
|
||||
init:
|
||||
@python setup.py develop
|
||||
@pip install -r requirements.txt
|
||||
@pip install -r requirements-dev.txt
|
||||
|
||||
lint:
|
||||
flake8 moto
|
||||
@ -19,6 +19,7 @@ test: lint
|
||||
rm -f .coverage
|
||||
rm -rf cover
|
||||
@nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE)
|
||||
|
||||
test_server:
|
||||
@TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/
|
||||
|
||||
@ -36,14 +37,13 @@ tag_github_release:
|
||||
git tag `python setup.py --version`
|
||||
git push origin `python setup.py --version`
|
||||
|
||||
publish: implementation_coverage \
|
||||
upload_pypi_artifact \
|
||||
publish: upload_pypi_artifact \
|
||||
tag_github_release \
|
||||
push_dockerhub_image
|
||||
|
||||
implementation_coverage:
|
||||
./scripts/implementation_coverage.py > IMPLEMENTATION_COVERAGE.md
|
||||
git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage"
|
||||
./scripts/implementation_coverage.py
|
||||
git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" || true
|
||||
|
||||
scaffold:
|
||||
@pip install -r requirements-dev.txt > /dev/null
|
||||
|
||||
191
README.md
191
README.md
@ -2,9 +2,12 @@
|
||||
|
||||
[](https://gitter.im/awsmoto/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
[](https://travis-ci.org/spulec/moto)
|
||||
[](https://coveralls.io/r/spulec/moto)
|
||||
[](https://travis-ci.org/spulec/moto)
|
||||
[](https://coveralls.io/r/spulec/moto)
|
||||
[](http://docs.getmoto.org)
|
||||

|
||||

|
||||

|
||||
|
||||
# In a nutshell
|
||||
|
||||
@ -47,7 +50,7 @@ def test_my_model_save():
|
||||
|
||||
body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8")
|
||||
|
||||
assert body == b'is awesome'
|
||||
assert body == 'is awesome'
|
||||
```
|
||||
|
||||
With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys.
|
||||
@ -55,87 +58,99 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock
|
||||
It gets even better! Moto isn't just for Python code and it isn't just for S3. Look at the [standalone server mode](https://github.com/spulec/moto#stand-alone-server-mode) for more information about running Moto with other languages. Here's the status of the other AWS services implemented:
|
||||
|
||||
```gherkin
|
||||
|------------------------------------------------------------------------------|
|
||||
| Service Name | Decorator | Development Status |
|
||||
|------------------------------------------------------------------------------|
|
||||
| ACM | @mock_acm | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| API Gateway | @mock_apigateway | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Autoscaling | @mock_autoscaling| core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Cloudformation | @mock_cloudformation| core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Cloudwatch | @mock_cloudwatch | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| CloudwatchEvents | @mock_events | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Data Pipeline | @mock_datapipeline| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| DynamoDB | @mock_dynamodb | core endpoints done |
|
||||
| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes |
|
||||
|------------------------------------------------------------------------------|
|
||||
| EC2 | @mock_ec2 | core endpoints done |
|
||||
| - AMI | | core endpoints done |
|
||||
| - EBS | | core endpoints done |
|
||||
| - Instances | | all endpoints done |
|
||||
| - Security Groups | | core endpoints done |
|
||||
| - Tags | | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| ECR | @mock_ecr | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| ECS | @mock_ecs | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| ELB | @mock_elb | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| ELBv2 | @mock_elbv2 | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| EMR | @mock_emr | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Glacier | @mock_glacier | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| IAM | @mock_iam | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| IoT | @mock_iot | core endpoints done |
|
||||
| | @mock_iotdata | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Lambda | @mock_lambda | basic endpoints done, requires |
|
||||
| | | docker |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Logs | @mock_logs | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Kinesis | @mock_kinesis | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| KMS | @mock_kms | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Polly | @mock_polly | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| RDS | @mock_rds | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| RDS2 | @mock_rds2 | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Redshift | @mock_redshift | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Route53 | @mock_route53 | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| S3 | @mock_s3 | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| SES | @mock_ses | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| SNS | @mock_sns | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| SQS | @mock_sqs | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| SSM | @mock_ssm | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| STS | @mock_sts | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| SWF | @mock_swf | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| X-Ray | @mock_xray | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Service Name | Decorator | Development Status |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| ACM | @mock_acm | all endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| API Gateway | @mock_apigateway | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Autoscaling | @mock_autoscaling | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Cloudformation | @mock_cloudformation | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Cloudwatch | @mock_cloudwatch | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| CloudwatchEvents | @mock_events | all endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Cognito Identity | @mock_cognitoidentity | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Cognito Identity Provider | @mock_cognitoidp | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Config | @mock_config | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Data Pipeline | @mock_datapipeline | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| DynamoDB | @mock_dynamodb | core endpoints done |
|
||||
| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| EC2 | @mock_ec2 | core endpoints done |
|
||||
| - AMI | | core endpoints done |
|
||||
| - EBS | | core endpoints done |
|
||||
| - Instances | | all endpoints done |
|
||||
| - Security Groups | | core endpoints done |
|
||||
| - Tags | | all endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| ECR | @mock_ecr | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| ECS | @mock_ecs | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| ELB | @mock_elb | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| ELBv2 | @mock_elbv2 | all endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| EMR | @mock_emr | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Glacier | @mock_glacier | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| IAM | @mock_iam | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| IoT | @mock_iot | core endpoints done |
|
||||
| | @mock_iotdata | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Kinesis | @mock_kinesis | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| KMS | @mock_kms | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Lambda | @mock_lambda | basic endpoints done, requires |
|
||||
| | | docker |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Logs | @mock_logs | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Organizations | @mock_organizations | some core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Polly | @mock_polly | all endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| RDS | @mock_rds | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| RDS2 | @mock_rds2 | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Redshift | @mock_redshift | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| Route53 | @mock_route53 | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| S3 | @mock_s3 | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| SecretsManager | @mock_secretsmanager | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| SES | @mock_ses | all endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| SNS | @mock_sns | all endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| SQS | @mock_sqs | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| SSM | @mock_ssm | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| STS | @mock_sts | core endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| SWF | @mock_swf | basic endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
| X-Ray | @mock_xray | all endpoints done |
|
||||
|-------------------------------------------------------------------------------------|
|
||||
```
|
||||
|
||||
For a full list of endpoint [implementation coverage](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md)
|
||||
|
||||
### Another Example
|
||||
|
||||
Imagine you have a function that you use to launch new ec2 instances:
|
||||
@ -167,7 +182,7 @@ def test_add_servers():
|
||||
```
|
||||
|
||||
#### Using moto 1.0.X with boto2
|
||||
moto 1.0.X mock docorators are defined for boto3 and do not work with boto2. Use the @mock_AWSSVC_deprecated to work with boto2.
|
||||
moto 1.0.X mock decorators are defined for boto3 and do not work with boto2. Use the @mock_AWSSVC_deprecated to work with boto2.
|
||||
|
||||
Using moto with boto2
|
||||
```python
|
||||
@ -249,7 +264,7 @@ It uses flask, which isn't a default dependency. You can install the
|
||||
server 'extra' package with:
|
||||
|
||||
```python
|
||||
pip install moto[server]
|
||||
pip install "moto[server]"
|
||||
```
|
||||
|
||||
You can then start it running a service:
|
||||
@ -306,3 +321,11 @@ boto3.resource(
|
||||
```console
|
||||
$ pip install moto
|
||||
```
|
||||
|
||||
## Releases
|
||||
|
||||
Releases are done from travisci. Fairly closely following this:
|
||||
https://docs.travis-ci.com/user/deployment/pypi/
|
||||
|
||||
- Commits to `master` branch do a dev deploy to pypi.
|
||||
- Commits to a tag do a real deploy to pypi.
|
||||
|
||||
@ -20,7 +20,7 @@ If you want to install ``moto`` from source::
|
||||
Moto usage
|
||||
----------
|
||||
|
||||
For example we have the following code we want to test:
|
||||
For example, we have the following code we want to test:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
@ -39,12 +39,12 @@ For example we have the following code we want to test:
|
||||
k.key = self.name
|
||||
k.set_contents_from_string(self.value)
|
||||
|
||||
There are several method to do this, just keep in mind Moto creates a full blank environment.
|
||||
There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment.
|
||||
|
||||
Decorator
|
||||
~~~~~~~~~
|
||||
|
||||
With a decorator wrapping all the calls to S3 are automatically mocked out.
|
||||
With a decorator wrapping, all the calls to S3 are automatically mocked out.
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
@ -66,7 +66,7 @@ With a decorator wrapping all the calls to S3 are automatically mocked out.
|
||||
Context manager
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Same as decorator, every call inside ``with`` statement are mocked out.
|
||||
Same as the Decorator, every call inside the ``with`` statement is mocked out.
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
@ -83,7 +83,7 @@ Same as decorator, every call inside ``with`` statement are mocked out.
|
||||
Raw
|
||||
~~~
|
||||
|
||||
You can also start and stop manually the mocking.
|
||||
You can also start and stop the mocking manually.
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
@ -104,11 +104,11 @@ You can also start and stop manually the mocking.
|
||||
Stand-alone server mode
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Moto comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. It is very useful to test even if you don't use Python.
|
||||
Moto also comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. For testing purposes, it's extremely useful even if you don't use Python.
|
||||
|
||||
.. sourcecode:: bash
|
||||
|
||||
$ moto_server ec2 -p3000
|
||||
* Running on http://127.0.0.1:3000/
|
||||
|
||||
This method isn't encouraged if you're using ``boto``, best is to use decorator method.
|
||||
However, this method isn't encouraged if you're using ``boto``, the best solution would be to use a decorator method.
|
||||
|
||||
149
docs/index.rst
149
docs/index.rst
@ -17,66 +17,95 @@ with ``moto`` and its usage.
|
||||
Currently implemented Services:
|
||||
-------------------------------
|
||||
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Service Name | Decorator | Development Status |
|
||||
+=======================+=====================+===================================+
|
||||
| API Gateway | @mock_apigateway | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Autoscaling | @mock_autoscaling | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Cloudformation | @mock_cloudformation| core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Cloudwatch | @mock_cloudwatch | basic endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Data Pipeline | @mock_datapipeline | basic endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| - DynamoDB | - @mock_dynamodb | - core endpoints done |
|
||||
| - DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes|
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| EC2 | @mock_ec2 | core endpoints done |
|
||||
| - AMI | | core endpoints done |
|
||||
| - EBS | | core endpoints done |
|
||||
| - Instances | | all endpoints done |
|
||||
| - Security Groups | | core endpoints done |
|
||||
| - Tags | | all endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| ECS | @mock_ecs | basic endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| ELB | @mock_elb | core endpoints done |
|
||||
| | @mock_elbv2 | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| EMR | @mock_emr | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Glacier | @mock_glacier | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| IAM | @mock_iam | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Lambda | @mock_lambda | basic endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Kinesis | @mock_kinesis | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| KMS | @mock_kms | basic endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| RDS | @mock_rds | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| RDS2 | @mock_rds2 | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Redshift | @mock_redshift | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| Route53 | @mock_route53 | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| S3 | @mock_s3 | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| SES | @mock_ses | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| SNS | @mock_sns | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| SQS | @mock_sqs | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| STS | @mock_sts | core endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
| SWF | @mock_swf | basic endpoints done |
|
||||
+-----------------------+---------------------+-----------------------------------+
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Service Name | Decorator | Development Status |
|
||||
+===========================+=======================+====================================+
|
||||
| ACM | @mock_acm | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| API Gateway | @mock_apigateway | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Autoscaling | @mock_autoscaling | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Cloudformation | @mock_cloudformation | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Cloudwatch | @mock_cloudwatch | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| CloudwatchEvents | @mock_events | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Cognito Identity | @mock_cognitoidentity | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Cognito Identity Provider | @mock_cognitoidp | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Config | @mock_config | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Data Pipeline | @mock_datapipeline | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| DynamoDB | - @mock_dynamodb | - core endpoints done |
|
||||
| DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| EC2 | @mock_ec2 | core endpoints done |
|
||||
| - AMI | | - core endpoints done |
|
||||
| - EBS | | - core endpoints done |
|
||||
| - Instances | | - all endpoints done |
|
||||
| - Security Groups | | - core endpoints done |
|
||||
| - Tags | | - all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| ECR | @mock_ecr | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| ECS | @mock_ecs | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| ELB | @mock_elb | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| ELBv2 | @mock_elbv2 | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| EMR | @mock_emr | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Glacier | @mock_glacier | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| IAM | @mock_iam | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| IoT | @mock_iot | core endpoints done |
|
||||
| | @mock_iotdata | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Kinesis | @mock_kinesis | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| KMS | @mock_kms | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Lambda | @mock_lambda | basic endpoints done, |
|
||||
| | | requires docker |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Logs | @mock_logs | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Organizations | @mock_organizations | some core edpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Polly | @mock_polly | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| RDS | @mock_rds | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| RDS2 | @mock_rds2 | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Redshift | @mock_redshift | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| Route53 | @mock_route53 | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| S3 | @mock_s3 | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| SecretsManager | @mock_secretsmanager | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| SES | @mock_ses | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| SNS | @mock_sns | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| SQS | @mock_sqs | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| SSM | @mock_ssm | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| STS | @mock_sts | core endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| SWF | @mock_swf | basic endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
| X-Ray | @mock_xray | all endpoints done |
|
||||
+---------------------------+-----------------------+------------------------------------+
|
||||
|
||||
|
||||
|
||||
|
||||
@ -3,7 +3,7 @@ import logging
|
||||
# logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
__title__ = 'moto'
|
||||
__version__ = '1.0.1'
|
||||
__version__ = '1.3.14.dev'
|
||||
|
||||
from .acm import mock_acm # flake8: noqa
|
||||
from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa
|
||||
@ -11,9 +11,13 @@ from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8
|
||||
from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa
|
||||
from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa
|
||||
from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa
|
||||
from .cognitoidentity import mock_cognitoidentity, mock_cognitoidentity_deprecated # flake8: noqa
|
||||
from .cognitoidp import mock_cognitoidp, mock_cognitoidp_deprecated # flake8: noqa
|
||||
from .config import mock_config # flake8: noqa
|
||||
from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa
|
||||
from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa
|
||||
from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa
|
||||
from .dynamodbstreams import mock_dynamodbstreams # flake8: noqa
|
||||
from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa
|
||||
from .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa
|
||||
from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa
|
||||
@ -22,16 +26,20 @@ from .elbv2 import mock_elbv2 # flake8: noqa
|
||||
from .emr import mock_emr, mock_emr_deprecated # flake8: noqa
|
||||
from .events import mock_events # flake8: noqa
|
||||
from .glacier import mock_glacier, mock_glacier_deprecated # flake8: noqa
|
||||
from .glue import mock_glue # flake8: noqa
|
||||
from .iam import mock_iam, mock_iam_deprecated # flake8: noqa
|
||||
from .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa
|
||||
from .kms import mock_kms, mock_kms_deprecated # flake8: noqa
|
||||
from .organizations import mock_organizations # flake8: noqa
|
||||
from .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa
|
||||
from .polly import mock_polly # flake8: noqa
|
||||
from .rds import mock_rds, mock_rds_deprecated # flake8: noqa
|
||||
from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa
|
||||
from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa
|
||||
from .resourcegroups import mock_resourcegroups # flake8: noqa
|
||||
from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa
|
||||
from .ses import mock_ses, mock_ses_deprecated # flake8: noqa
|
||||
from .secretsmanager import mock_secretsmanager # flake8: noqa
|
||||
from .sns import mock_sns, mock_sns_deprecated # flake8: noqa
|
||||
from .sqs import mock_sqs, mock_sqs_deprecated # flake8: noqa
|
||||
from .sts import mock_sts, mock_sts_deprecated # flake8: noqa
|
||||
@ -41,6 +49,7 @@ from .swf import mock_swf, mock_swf_deprecated # flake8: noqa
|
||||
from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa
|
||||
from .logs import mock_logs, mock_logs_deprecated # flake8: noqa
|
||||
from .batch import mock_batch # flake8: noqa
|
||||
from .resourcegroupstaggingapi import mock_resourcegroupstaggingapi # flake8: noqa
|
||||
from .iot import mock_iot # flake8: noqa
|
||||
from .iotdata import mock_iotdata # flake8: noqa
|
||||
|
||||
|
||||
@ -243,7 +243,7 @@ class CertBundle(BaseModel):
|
||||
'KeyAlgorithm': key_algo,
|
||||
'NotAfter': datetime_to_epoch(self._cert.not_valid_after),
|
||||
'NotBefore': datetime_to_epoch(self._cert.not_valid_before),
|
||||
'Serial': self._cert.serial,
|
||||
'Serial': self._cert.serial_number,
|
||||
'SignatureAlgorithm': self._cert.signature_algorithm_oid._name.upper().replace('ENCRYPTION', ''),
|
||||
'Status': self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED.
|
||||
'Subject': 'CN={0}'.format(self.common_name),
|
||||
|
||||
@ -111,16 +111,16 @@ class AWSCertificateManagerResponse(BaseResponse):
|
||||
# actual data
|
||||
try:
|
||||
certificate = base64.standard_b64decode(certificate)
|
||||
except:
|
||||
except Exception:
|
||||
return AWSValidationException('The certificate is not PEM-encoded or is not valid.').response()
|
||||
try:
|
||||
private_key = base64.standard_b64decode(private_key)
|
||||
except:
|
||||
except Exception:
|
||||
return AWSValidationException('The private key is not PEM-encoded or is not valid.').response()
|
||||
if chain is not None:
|
||||
try:
|
||||
chain = base64.standard_b64decode(chain)
|
||||
except:
|
||||
except Exception:
|
||||
return AWSValidationException('The certificate chain is not PEM-encoded or is not valid.').response()
|
||||
|
||||
try:
|
||||
|
||||
@ -8,3 +8,11 @@ class StageNotFoundException(RESTError):
|
||||
def __init__(self):
|
||||
super(StageNotFoundException, self).__init__(
|
||||
"NotFoundException", "Invalid stage identifier specified")
|
||||
|
||||
|
||||
class ApiKeyNotFoundException(RESTError):
|
||||
code = 404
|
||||
|
||||
def __init__(self):
|
||||
super(ApiKeyNotFoundException, self).__init__(
|
||||
"NotFoundException", "Invalid API Key identifier specified")
|
||||
|
||||
@ -1,14 +1,17 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import random
|
||||
import string
|
||||
import requests
|
||||
import time
|
||||
|
||||
from moto.packages.responses import responses
|
||||
from boto3.session import Session
|
||||
import responses
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import iso_8601_datetime_with_milliseconds
|
||||
from .utils import create_id
|
||||
from .exceptions import StageNotFoundException
|
||||
from moto.core.utils import path_url
|
||||
from .exceptions import StageNotFoundException, ApiKeyNotFoundException
|
||||
|
||||
STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}"
|
||||
|
||||
@ -20,8 +23,7 @@ class Deployment(BaseModel, dict):
|
||||
self['id'] = deployment_id
|
||||
self['stageName'] = name
|
||||
self['description'] = description
|
||||
self['createdDate'] = iso_8601_datetime_with_milliseconds(
|
||||
datetime.datetime.now())
|
||||
self['createdDate'] = int(time.time())
|
||||
|
||||
|
||||
class IntegrationResponse(BaseModel, dict):
|
||||
@ -293,6 +295,44 @@ class Stage(BaseModel, dict):
|
||||
raise Exception('Patch operation "%s" not implemented' % op['op'])
|
||||
|
||||
|
||||
class ApiKey(BaseModel, dict):
|
||||
|
||||
def __init__(self, name=None, description=None, enabled=True,
|
||||
generateDistinctId=False, value=None, stageKeys=None, customerId=None):
|
||||
super(ApiKey, self).__init__()
|
||||
self['id'] = create_id()
|
||||
self['value'] = value if value else ''.join(random.sample(string.ascii_letters + string.digits, 40))
|
||||
self['name'] = name
|
||||
self['customerId'] = customerId
|
||||
self['description'] = description
|
||||
self['enabled'] = enabled
|
||||
self['createdDate'] = self['lastUpdatedDate'] = int(time.time())
|
||||
self['stageKeys'] = stageKeys
|
||||
|
||||
|
||||
class UsagePlan(BaseModel, dict):
|
||||
|
||||
def __init__(self, name=None, description=None, apiStages=[],
|
||||
throttle=None, quota=None):
|
||||
super(UsagePlan, self).__init__()
|
||||
self['id'] = create_id()
|
||||
self['name'] = name
|
||||
self['description'] = description
|
||||
self['apiStages'] = apiStages
|
||||
self['throttle'] = throttle
|
||||
self['quota'] = quota
|
||||
|
||||
|
||||
class UsagePlanKey(BaseModel, dict):
|
||||
|
||||
def __init__(self, id, type, name, value):
|
||||
super(UsagePlanKey, self).__init__()
|
||||
self['id'] = id
|
||||
self['name'] = name
|
||||
self['type'] = type
|
||||
self['value'] = value
|
||||
|
||||
|
||||
class RestAPI(BaseModel):
|
||||
|
||||
def __init__(self, id, region_name, name, description):
|
||||
@ -300,7 +340,7 @@ class RestAPI(BaseModel):
|
||||
self.region_name = region_name
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.create_date = datetime.datetime.utcnow()
|
||||
self.create_date = int(time.time())
|
||||
|
||||
self.deployments = {}
|
||||
self.stages = {}
|
||||
@ -308,12 +348,15 @@ class RestAPI(BaseModel):
|
||||
self.resources = {}
|
||||
self.add_child('/') # Add default child
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.id)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"id": self.id,
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"createdDate": iso_8601_datetime_with_milliseconds(self.create_date),
|
||||
"createdDate": int(time.time()),
|
||||
}
|
||||
|
||||
def add_child(self, path, parent_id=None):
|
||||
@ -330,7 +373,8 @@ class RestAPI(BaseModel):
|
||||
# TODO deal with no matching resource
|
||||
|
||||
def resource_callback(self, request):
|
||||
path_after_stage_name = '/'.join(request.path_url.split("/")[2:])
|
||||
path = path_url(request.url)
|
||||
path_after_stage_name = '/'.join(path.split("/")[2:])
|
||||
if not path_after_stage_name:
|
||||
path_after_stage_name = '/'
|
||||
|
||||
@ -388,6 +432,9 @@ class APIGatewayBackend(BaseBackend):
|
||||
def __init__(self, region_name):
|
||||
super(APIGatewayBackend, self).__init__()
|
||||
self.apis = {}
|
||||
self.keys = {}
|
||||
self.usage_plans = {}
|
||||
self.usage_plan_keys = {}
|
||||
self.region_name = region_name
|
||||
|
||||
def reset(self):
|
||||
@ -541,8 +588,71 @@ class APIGatewayBackend(BaseBackend):
|
||||
api = self.get_rest_api(function_id)
|
||||
return api.delete_deployment(deployment_id)
|
||||
|
||||
def create_apikey(self, payload):
|
||||
key = ApiKey(**payload)
|
||||
self.keys[key['id']] = key
|
||||
return key
|
||||
|
||||
def get_apikeys(self):
|
||||
return list(self.keys.values())
|
||||
|
||||
def get_apikey(self, api_key_id):
|
||||
return self.keys[api_key_id]
|
||||
|
||||
def delete_apikey(self, api_key_id):
|
||||
self.keys.pop(api_key_id)
|
||||
return {}
|
||||
|
||||
def create_usage_plan(self, payload):
|
||||
plan = UsagePlan(**payload)
|
||||
self.usage_plans[plan['id']] = plan
|
||||
return plan
|
||||
|
||||
def get_usage_plans(self, api_key_id=None):
|
||||
plans = list(self.usage_plans.values())
|
||||
if api_key_id is not None:
|
||||
plans = [
|
||||
plan
|
||||
for plan in plans
|
||||
if self.usage_plan_keys.get(plan['id'], {}).get(api_key_id, False)
|
||||
]
|
||||
return plans
|
||||
|
||||
def get_usage_plan(self, usage_plan_id):
|
||||
return self.usage_plans[usage_plan_id]
|
||||
|
||||
def delete_usage_plan(self, usage_plan_id):
|
||||
self.usage_plans.pop(usage_plan_id)
|
||||
return {}
|
||||
|
||||
def create_usage_plan_key(self, usage_plan_id, payload):
|
||||
if usage_plan_id not in self.usage_plan_keys:
|
||||
self.usage_plan_keys[usage_plan_id] = {}
|
||||
|
||||
key_id = payload["keyId"]
|
||||
if key_id not in self.keys:
|
||||
raise ApiKeyNotFoundException()
|
||||
|
||||
api_key = self.keys[key_id]
|
||||
|
||||
usage_plan_key = UsagePlanKey(id=key_id, type=payload["keyType"], name=api_key["name"], value=api_key["value"])
|
||||
self.usage_plan_keys[usage_plan_id][usage_plan_key['id']] = usage_plan_key
|
||||
return usage_plan_key
|
||||
|
||||
def get_usage_plan_keys(self, usage_plan_id):
|
||||
if usage_plan_id not in self.usage_plan_keys:
|
||||
return []
|
||||
|
||||
return list(self.usage_plan_keys[usage_plan_id].values())
|
||||
|
||||
def get_usage_plan_key(self, usage_plan_id, key_id):
|
||||
return self.usage_plan_keys[usage_plan_id][key_id]
|
||||
|
||||
def delete_usage_plan_key(self, usage_plan_id, key_id):
|
||||
self.usage_plan_keys[usage_plan_id].pop(key_id)
|
||||
return {}
|
||||
|
||||
|
||||
apigateway_backends = {}
|
||||
# Not available in boto yet
|
||||
for region_name in ['us-east-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1']:
|
||||
for region_name in Session().get_available_regions('apigateway'):
|
||||
apigateway_backends[region_name] = APIGatewayBackend(region_name)
|
||||
|
||||
@ -4,7 +4,7 @@ import json
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import apigateway_backends
|
||||
from .exceptions import StageNotFoundException
|
||||
from .exceptions import StageNotFoundException, ApiKeyNotFoundException
|
||||
|
||||
|
||||
class APIGatewayResponse(BaseResponse):
|
||||
@ -226,3 +226,79 @@ class APIGatewayResponse(BaseResponse):
|
||||
deployment = self.backend.delete_deployment(
|
||||
function_id, deployment_id)
|
||||
return 200, {}, json.dumps(deployment)
|
||||
|
||||
def apikeys(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
|
||||
if self.method == 'POST':
|
||||
apikey_response = self.backend.create_apikey(json.loads(self.body))
|
||||
elif self.method == 'GET':
|
||||
apikeys_response = self.backend.get_apikeys()
|
||||
return 200, {}, json.dumps({"item": apikeys_response})
|
||||
return 200, {}, json.dumps(apikey_response)
|
||||
|
||||
def apikey_individual(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
|
||||
url_path_parts = self.path.split("/")
|
||||
apikey = url_path_parts[2]
|
||||
|
||||
if self.method == 'GET':
|
||||
apikey_response = self.backend.get_apikey(apikey)
|
||||
elif self.method == 'DELETE':
|
||||
apikey_response = self.backend.delete_apikey(apikey)
|
||||
return 200, {}, json.dumps(apikey_response)
|
||||
|
||||
def usage_plans(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
|
||||
if self.method == 'POST':
|
||||
usage_plan_response = self.backend.create_usage_plan(json.loads(self.body))
|
||||
elif self.method == 'GET':
|
||||
api_key_id = self.querystring.get("keyId", [None])[0]
|
||||
usage_plans_response = self.backend.get_usage_plans(api_key_id=api_key_id)
|
||||
return 200, {}, json.dumps({"item": usage_plans_response})
|
||||
return 200, {}, json.dumps(usage_plan_response)
|
||||
|
||||
def usage_plan_individual(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
|
||||
url_path_parts = self.path.split("/")
|
||||
usage_plan = url_path_parts[2]
|
||||
|
||||
if self.method == 'GET':
|
||||
usage_plan_response = self.backend.get_usage_plan(usage_plan)
|
||||
elif self.method == 'DELETE':
|
||||
usage_plan_response = self.backend.delete_usage_plan(usage_plan)
|
||||
return 200, {}, json.dumps(usage_plan_response)
|
||||
|
||||
def usage_plan_keys(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
|
||||
url_path_parts = self.path.split("/")
|
||||
usage_plan_id = url_path_parts[2]
|
||||
|
||||
if self.method == 'POST':
|
||||
try:
|
||||
usage_plan_response = self.backend.create_usage_plan_key(usage_plan_id, json.loads(self.body))
|
||||
except ApiKeyNotFoundException as error:
|
||||
return error.code, {}, '{{"message":"{0}","code":"{1}"}}'.format(error.message, error.error_type)
|
||||
|
||||
elif self.method == 'GET':
|
||||
usage_plans_response = self.backend.get_usage_plan_keys(usage_plan_id)
|
||||
return 200, {}, json.dumps({"item": usage_plans_response})
|
||||
|
||||
return 200, {}, json.dumps(usage_plan_response)
|
||||
|
||||
def usage_plan_key_individual(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
|
||||
url_path_parts = self.path.split("/")
|
||||
usage_plan_id = url_path_parts[2]
|
||||
key_id = url_path_parts[4]
|
||||
|
||||
if self.method == 'GET':
|
||||
usage_plan_response = self.backend.get_usage_plan_key(usage_plan_id, key_id)
|
||||
elif self.method == 'DELETE':
|
||||
usage_plan_response = self.backend.delete_usage_plan_key(usage_plan_id, key_id)
|
||||
return 200, {}, json.dumps(usage_plan_response)
|
||||
|
||||
@ -18,4 +18,10 @@ url_paths = {
|
||||
'{0}/restapis/(?P<function_id>[^/]+)/resources/(?P<resource_id>[^/]+)/methods/(?P<method_name>[^/]+)/responses/(?P<status_code>\d+)$': APIGatewayResponse().resource_method_responses,
|
||||
'{0}/restapis/(?P<function_id>[^/]+)/resources/(?P<resource_id>[^/]+)/methods/(?P<method_name>[^/]+)/integration/?$': APIGatewayResponse().integrations,
|
||||
'{0}/restapis/(?P<function_id>[^/]+)/resources/(?P<resource_id>[^/]+)/methods/(?P<method_name>[^/]+)/integration/responses/(?P<status_code>\d+)/?$': APIGatewayResponse().integration_responses,
|
||||
'{0}/apikeys$': APIGatewayResponse().apikeys,
|
||||
'{0}/apikeys/(?P<apikey>[^/]+)': APIGatewayResponse().apikey_individual,
|
||||
'{0}/usageplans$': APIGatewayResponse().usage_plans,
|
||||
'{0}/usageplans/(?P<usage_plan_id>[^/]+)/?$': APIGatewayResponse().usage_plan_individual,
|
||||
'{0}/usageplans/(?P<usage_plan_id>[^/]+)/keys$': APIGatewayResponse().usage_plan_keys,
|
||||
'{0}/usageplans/(?P<usage_plan_id>[^/]+)/keys/(?P<api_key_id>[^/]+)/?$': APIGatewayResponse().usage_plan_key_individual,
|
||||
}
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
import six
|
||||
import random
|
||||
import string
|
||||
|
||||
|
||||
def create_id():
|
||||
size = 10
|
||||
chars = list(range(10)) + ['A-Z']
|
||||
chars = list(range(10)) + list(string.ascii_lowercase)
|
||||
return ''.join(six.text_type(random.choice(chars)) for x in range(size))
|
||||
|
||||
@ -3,12 +3,22 @@ from moto.core.exceptions import RESTError
|
||||
|
||||
|
||||
class AutoscalingClientError(RESTError):
|
||||
code = 400
|
||||
|
||||
|
||||
class ResourceContentionError(RESTError):
|
||||
code = 500
|
||||
|
||||
|
||||
class ResourceContentionError(AutoscalingClientError):
|
||||
|
||||
def __init__(self):
|
||||
super(ResourceContentionError, self).__init__(
|
||||
"ResourceContentionError",
|
||||
"You already have a pending update to an Auto Scaling resource (for example, a group, instance, or load balancer).")
|
||||
|
||||
|
||||
class InvalidInstanceError(AutoscalingClientError):
|
||||
|
||||
def __init__(self, instance_id):
|
||||
super(InvalidInstanceError, self).__init__(
|
||||
"ValidationError",
|
||||
"Instance [{0}] is invalid."
|
||||
.format(instance_id))
|
||||
|
||||
@ -1,5 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import random
|
||||
|
||||
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
|
||||
from moto.ec2.exceptions import InvalidInstanceIdError
|
||||
|
||||
from moto.compat import OrderedDict
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.ec2 import ec2_backends
|
||||
@ -7,7 +12,7 @@ from moto.elb import elb_backends
|
||||
from moto.elbv2 import elbv2_backends
|
||||
from moto.elb.exceptions import LoadBalancerNotFoundError
|
||||
from .exceptions import (
|
||||
ResourceContentionError,
|
||||
AutoscalingClientError, ResourceContentionError, InvalidInstanceError
|
||||
)
|
||||
|
||||
# http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown
|
||||
@ -17,10 +22,12 @@ ASG_NAME_TAG = "aws:autoscaling:groupName"
|
||||
|
||||
|
||||
class InstanceState(object):
|
||||
def __init__(self, instance, lifecycle_state="InService", health_status="Healthy"):
|
||||
def __init__(self, instance, lifecycle_state="InService",
|
||||
health_status="Healthy", protected_from_scale_in=False):
|
||||
self.instance = instance
|
||||
self.lifecycle_state = lifecycle_state
|
||||
self.health_status = health_status
|
||||
self.protected_from_scale_in = protected_from_scale_in
|
||||
|
||||
|
||||
class FakeScalingPolicy(BaseModel):
|
||||
@ -68,6 +75,26 @@ class FakeLaunchConfiguration(BaseModel):
|
||||
self.associate_public_ip_address = associate_public_ip_address
|
||||
self.block_device_mapping_dict = block_device_mapping_dict
|
||||
|
||||
@classmethod
|
||||
def create_from_instance(cls, name, instance, backend):
|
||||
config = backend.create_launch_configuration(
|
||||
name=name,
|
||||
image_id=instance.image_id,
|
||||
kernel_id='',
|
||||
ramdisk_id='',
|
||||
key_name=instance.key_name,
|
||||
security_groups=instance.security_groups,
|
||||
user_data=instance.user_data,
|
||||
instance_type=instance.instance_type,
|
||||
instance_monitoring=False,
|
||||
instance_profile_name=None,
|
||||
spot_price=None,
|
||||
ebs_optimized=instance.ebs_optimized,
|
||||
associate_public_ip_address=instance.associate_public_ip,
|
||||
block_device_mappings=instance.block_device_mapping
|
||||
)
|
||||
return config
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
@ -152,17 +179,19 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
min_size, launch_config_name, vpc_zone_identifier,
|
||||
default_cooldown, health_check_period, health_check_type,
|
||||
load_balancers, target_group_arns, placement_group, termination_policies,
|
||||
autoscaling_backend, tags):
|
||||
autoscaling_backend, tags,
|
||||
new_instances_protected_from_scale_in=False):
|
||||
self.autoscaling_backend = autoscaling_backend
|
||||
self.name = name
|
||||
self.availability_zones = availability_zones
|
||||
|
||||
self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier)
|
||||
|
||||
self.max_size = max_size
|
||||
self.min_size = min_size
|
||||
|
||||
self.launch_config = self.autoscaling_backend.launch_configurations[
|
||||
launch_config_name]
|
||||
self.launch_config_name = launch_config_name
|
||||
self.vpc_zone_identifier = vpc_zone_identifier
|
||||
|
||||
self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN
|
||||
self.health_check_period = health_check_period
|
||||
@ -171,11 +200,42 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
self.target_group_arns = target_group_arns
|
||||
self.placement_group = placement_group
|
||||
self.termination_policies = termination_policies
|
||||
self.new_instances_protected_from_scale_in = new_instances_protected_from_scale_in
|
||||
|
||||
self.suspended_processes = []
|
||||
self.instance_states = []
|
||||
self.tags = tags if tags else []
|
||||
self.set_desired_capacity(desired_capacity)
|
||||
|
||||
def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False):
|
||||
# for updates, if only AZs are provided, they must not clash with
|
||||
# the AZs of existing VPCs
|
||||
if update and availability_zones and not vpc_zone_identifier:
|
||||
vpc_zone_identifier = self.vpc_zone_identifier
|
||||
|
||||
if vpc_zone_identifier:
|
||||
# extract azs for vpcs
|
||||
subnet_ids = vpc_zone_identifier.split(',')
|
||||
subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(subnet_ids=subnet_ids)
|
||||
vpc_zones = [subnet.availability_zone for subnet in subnets]
|
||||
|
||||
if availability_zones and set(availability_zones) != set(vpc_zones):
|
||||
raise AutoscalingClientError(
|
||||
"ValidationError",
|
||||
"The availability zones of the specified subnets and the Auto Scaling group do not match",
|
||||
)
|
||||
availability_zones = vpc_zones
|
||||
elif not availability_zones:
|
||||
if not update:
|
||||
raise AutoscalingClientError(
|
||||
"ValidationError",
|
||||
"At least one Availability Zone or VPC Subnet is required."
|
||||
)
|
||||
return
|
||||
|
||||
self.availability_zones = availability_zones
|
||||
self.vpc_zone_identifier = vpc_zone_identifier
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
@ -202,6 +262,8 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
placement_group=None,
|
||||
termination_policies=properties.get("TerminationPolicies", []),
|
||||
tags=properties.get("Tags", []),
|
||||
new_instances_protected_from_scale_in=properties.get(
|
||||
"NewInstancesProtectedFromScaleIn", False)
|
||||
)
|
||||
return group
|
||||
|
||||
@ -230,24 +292,31 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
def update(self, availability_zones, desired_capacity, max_size, min_size,
|
||||
launch_config_name, vpc_zone_identifier, default_cooldown,
|
||||
health_check_period, health_check_type,
|
||||
placement_group, termination_policies):
|
||||
if availability_zones:
|
||||
self.availability_zones = availability_zones
|
||||
placement_group, termination_policies,
|
||||
new_instances_protected_from_scale_in=None):
|
||||
self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True)
|
||||
|
||||
if max_size is not None:
|
||||
self.max_size = max_size
|
||||
if min_size is not None:
|
||||
self.min_size = min_size
|
||||
|
||||
if desired_capacity is None:
|
||||
if min_size is not None and min_size > len(self.instance_states):
|
||||
desired_capacity = min_size
|
||||
if max_size is not None and max_size < len(self.instance_states):
|
||||
desired_capacity = max_size
|
||||
|
||||
if launch_config_name:
|
||||
self.launch_config = self.autoscaling_backend.launch_configurations[
|
||||
launch_config_name]
|
||||
self.launch_config_name = launch_config_name
|
||||
if vpc_zone_identifier is not None:
|
||||
self.vpc_zone_identifier = vpc_zone_identifier
|
||||
if health_check_period is not None:
|
||||
self.health_check_period = health_check_period
|
||||
if health_check_type is not None:
|
||||
self.health_check_type = health_check_type
|
||||
if new_instances_protected_from_scale_in is not None:
|
||||
self.new_instances_protected_from_scale_in = new_instances_protected_from_scale_in
|
||||
|
||||
if desired_capacity is not None:
|
||||
self.set_desired_capacity(desired_capacity)
|
||||
@ -272,12 +341,16 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
else:
|
||||
# Need to remove some instances
|
||||
count_to_remove = curr_instance_count - self.desired_capacity
|
||||
instances_to_remove = self.instance_states[:count_to_remove]
|
||||
instance_ids_to_remove = [
|
||||
instance.instance.id for instance in instances_to_remove]
|
||||
self.autoscaling_backend.ec2_backend.terminate_instances(
|
||||
instance_ids_to_remove)
|
||||
self.instance_states = self.instance_states[count_to_remove:]
|
||||
instances_to_remove = [ # only remove unprotected
|
||||
state for state in self.instance_states
|
||||
if not state.protected_from_scale_in
|
||||
][:count_to_remove]
|
||||
if instances_to_remove: # just in case not instances to remove
|
||||
instance_ids_to_remove = [
|
||||
instance.instance.id for instance in instances_to_remove]
|
||||
self.autoscaling_backend.ec2_backend.terminate_instances(
|
||||
instance_ids_to_remove)
|
||||
self.instance_states = list(set(self.instance_states) - set(instances_to_remove))
|
||||
|
||||
def get_propagated_tags(self):
|
||||
propagated_tags = {}
|
||||
@ -298,11 +371,15 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
self.launch_config.user_data,
|
||||
self.launch_config.security_groups,
|
||||
instance_type=self.launch_config.instance_type,
|
||||
tags={'instance': propagated_tags}
|
||||
tags={'instance': propagated_tags},
|
||||
placement=random.choice(self.availability_zones),
|
||||
)
|
||||
for instance in reservation.instances:
|
||||
instance.autoscaling_group = self
|
||||
self.instance_states.append(InstanceState(instance))
|
||||
self.instance_states.append(InstanceState(
|
||||
instance,
|
||||
protected_from_scale_in=self.new_instances_protected_from_scale_in,
|
||||
))
|
||||
|
||||
def append_target_groups(self, target_group_arns):
|
||||
append = [x for x in target_group_arns if x not in self.target_group_arns]
|
||||
@ -364,7 +441,9 @@ class AutoScalingBackend(BaseBackend):
|
||||
default_cooldown, health_check_period,
|
||||
health_check_type, load_balancers,
|
||||
target_group_arns, placement_group,
|
||||
termination_policies, tags):
|
||||
termination_policies, tags,
|
||||
new_instances_protected_from_scale_in=False,
|
||||
instance_id=None):
|
||||
|
||||
def make_int(value):
|
||||
return int(value) if value is not None else value
|
||||
@ -377,6 +456,13 @@ class AutoScalingBackend(BaseBackend):
|
||||
health_check_period = 300
|
||||
else:
|
||||
health_check_period = make_int(health_check_period)
|
||||
if launch_config_name is None and instance_id is not None:
|
||||
try:
|
||||
instance = self.ec2_backend.get_instance(instance_id)
|
||||
launch_config_name = name
|
||||
FakeLaunchConfiguration.create_from_instance(launch_config_name, instance, self)
|
||||
except InvalidInstanceIdError:
|
||||
raise InvalidInstanceError(instance_id)
|
||||
|
||||
group = FakeAutoScalingGroup(
|
||||
name=name,
|
||||
@ -395,6 +481,7 @@ class AutoScalingBackend(BaseBackend):
|
||||
termination_policies=termination_policies,
|
||||
autoscaling_backend=self,
|
||||
tags=tags,
|
||||
new_instances_protected_from_scale_in=new_instances_protected_from_scale_in,
|
||||
)
|
||||
|
||||
self.autoscaling_groups[name] = group
|
||||
@ -407,12 +494,14 @@ class AutoScalingBackend(BaseBackend):
|
||||
launch_config_name, vpc_zone_identifier,
|
||||
default_cooldown, health_check_period,
|
||||
health_check_type, placement_group,
|
||||
termination_policies):
|
||||
termination_policies,
|
||||
new_instances_protected_from_scale_in=None):
|
||||
group = self.autoscaling_groups[name]
|
||||
group.update(availability_zones, desired_capacity, max_size,
|
||||
min_size, launch_config_name, vpc_zone_identifier,
|
||||
default_cooldown, health_check_period, health_check_type,
|
||||
placement_group, termination_policies)
|
||||
placement_group, termination_policies,
|
||||
new_instances_protected_from_scale_in=new_instances_protected_from_scale_in)
|
||||
return group
|
||||
|
||||
def describe_auto_scaling_groups(self, names):
|
||||
@ -440,7 +529,13 @@ class AutoScalingBackend(BaseBackend):
|
||||
raise ResourceContentionError
|
||||
else:
|
||||
group.desired_capacity = original_size + len(instance_ids)
|
||||
new_instances = [InstanceState(self.ec2_backend.get_instance(x)) for x in instance_ids]
|
||||
new_instances = [
|
||||
InstanceState(
|
||||
self.ec2_backend.get_instance(x),
|
||||
protected_from_scale_in=group.new_instances_protected_from_scale_in,
|
||||
)
|
||||
for x in instance_ids
|
||||
]
|
||||
for instance in new_instances:
|
||||
self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name})
|
||||
group.instance_states.extend(new_instances)
|
||||
@ -614,6 +709,29 @@ class AutoScalingBackend(BaseBackend):
|
||||
asg_targets = [{'id': x.instance.id} for x in group.instance_states]
|
||||
self.elbv2_backend.deregister_targets(target_group, (asg_targets))
|
||||
|
||||
def suspend_processes(self, group_name, scaling_processes):
|
||||
group = self.autoscaling_groups[group_name]
|
||||
group.suspended_processes = scaling_processes or []
|
||||
|
||||
def set_instance_protection(self, group_name, instance_ids, protected_from_scale_in):
|
||||
group = self.autoscaling_groups[group_name]
|
||||
protected_instances = [
|
||||
x for x in group.instance_states if x.instance.id in instance_ids]
|
||||
for instance in protected_instances:
|
||||
instance.protected_from_scale_in = protected_from_scale_in
|
||||
|
||||
def notify_terminate_instances(self, instance_ids):
|
||||
for autoscaling_group_name, autoscaling_group in self.autoscaling_groups.items():
|
||||
original_instance_count = len(autoscaling_group.instance_states)
|
||||
autoscaling_group.instance_states = list(filter(
|
||||
lambda i_state: i_state.instance.id not in instance_ids,
|
||||
autoscaling_group.instance_states
|
||||
))
|
||||
difference = original_instance_count - len(autoscaling_group.instance_states)
|
||||
if difference > 0:
|
||||
autoscaling_group.replace_autoscaling_group_instances(difference, autoscaling_group.get_propagated_tags())
|
||||
self.update_attached_elbs(autoscaling_group_name)
|
||||
|
||||
|
||||
autoscaling_backends = {}
|
||||
for region, ec2_backend in ec2_backends.items():
|
||||
|
||||
@ -48,7 +48,7 @@ class AutoScalingResponse(BaseResponse):
|
||||
start = all_names.index(marker) + 1
|
||||
else:
|
||||
start = 0
|
||||
max_records = self._get_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier
|
||||
max_records = self._get_int_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier
|
||||
launch_configurations_resp = all_launch_configurations[start:start + max_records]
|
||||
next_token = None
|
||||
if len(all_launch_configurations) > start + max_records:
|
||||
@ -74,6 +74,7 @@ class AutoScalingResponse(BaseResponse):
|
||||
desired_capacity=self._get_int_param('DesiredCapacity'),
|
||||
max_size=self._get_int_param('MaxSize'),
|
||||
min_size=self._get_int_param('MinSize'),
|
||||
instance_id=self._get_param('InstanceId'),
|
||||
launch_config_name=self._get_param('LaunchConfigurationName'),
|
||||
vpc_zone_identifier=self._get_param('VPCZoneIdentifier'),
|
||||
default_cooldown=self._get_int_param('DefaultCooldown'),
|
||||
@ -85,6 +86,8 @@ class AutoScalingResponse(BaseResponse):
|
||||
termination_policies=self._get_multi_param(
|
||||
'TerminationPolicies.member'),
|
||||
tags=self._get_list_prefix('Tags.member'),
|
||||
new_instances_protected_from_scale_in=self._get_bool_param(
|
||||
'NewInstancesProtectedFromScaleIn', False)
|
||||
)
|
||||
template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE)
|
||||
return template.render()
|
||||
@ -166,7 +169,7 @@ class AutoScalingResponse(BaseResponse):
|
||||
start = all_names.index(token) + 1
|
||||
else:
|
||||
start = 0
|
||||
max_records = self._get_param("MaxRecords", 50)
|
||||
max_records = self._get_int_param("MaxRecords", 50)
|
||||
if max_records > 100:
|
||||
raise ValueError
|
||||
groups = all_groups[start:start + max_records]
|
||||
@ -192,6 +195,8 @@ class AutoScalingResponse(BaseResponse):
|
||||
placement_group=self._get_param('PlacementGroup'),
|
||||
termination_policies=self._get_multi_param(
|
||||
'TerminationPolicies.member'),
|
||||
new_instances_protected_from_scale_in=self._get_bool_param(
|
||||
'NewInstancesProtectedFromScaleIn', None)
|
||||
)
|
||||
template = self.response_template(UPDATE_AUTOSCALING_GROUP_TEMPLATE)
|
||||
return template.render()
|
||||
@ -283,6 +288,22 @@ class AutoScalingResponse(BaseResponse):
|
||||
template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def suspend_processes(self):
|
||||
autoscaling_group_name = self._get_param('AutoScalingGroupName')
|
||||
scaling_processes = self._get_multi_param('ScalingProcesses.member')
|
||||
self.autoscaling_backend.suspend_processes(autoscaling_group_name, scaling_processes)
|
||||
template = self.response_template(SUSPEND_PROCESSES_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def set_instance_protection(self):
|
||||
group_name = self._get_param('AutoScalingGroupName')
|
||||
instance_ids = self._get_multi_param('InstanceIds.member')
|
||||
protected_from_scale_in = self._get_bool_param('ProtectedFromScaleIn')
|
||||
self.autoscaling_backend.set_instance_protection(
|
||||
group_name, instance_ids, protected_from_scale_in)
|
||||
template = self.response_template(SET_INSTANCE_PROTECTION_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
|
||||
CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """<CreateLaunchConfigurationResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<ResponseMetadata>
|
||||
@ -313,8 +334,7 @@ DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """<DescribeLaunchConfigurationsRespon
|
||||
<UserData/>
|
||||
{% endif %}
|
||||
<InstanceType>{{ launch_configuration.instance_type }}</InstanceType>
|
||||
<LaunchConfigurationARN>arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:
|
||||
9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }}</LaunchConfigurationARN>
|
||||
<LaunchConfigurationARN>arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }}</LaunchConfigurationARN>
|
||||
{% if launch_configuration.block_device_mappings %}
|
||||
<BlockDeviceMappings>
|
||||
{% for mount_point, mapping in launch_configuration.block_device_mappings.items() %}
|
||||
@ -385,7 +405,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<AttachLoadBalancerTargetGroups
|
||||
<AttachLoadBalancerTargetGroupsResult>
|
||||
</AttachLoadBalancerTargetGroupsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</AttachLoadBalancerTargetGroupsResponse>"""
|
||||
|
||||
@ -393,7 +413,7 @@ ATTACH_INSTANCES_TEMPLATE = """<AttachInstancesResponse xmlns="http://autoscalin
|
||||
<AttachInstancesResult>
|
||||
</AttachInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</AttachInstancesResponse>"""
|
||||
|
||||
@ -409,7 +429,7 @@ DESCRIBE_LOAD_BALANCER_TARGET_GROUPS = """<DescribeLoadBalancerTargetGroupsRespo
|
||||
</LoadBalancerTargetGroups>
|
||||
</DescribeLoadBalancerTargetGroupsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeLoadBalancerTargetGroupsResponse>"""
|
||||
|
||||
@ -435,7 +455,7 @@ DETACH_INSTANCES_TEMPLATE = """<DetachInstancesResponse xmlns="http://autoscalin
|
||||
</Activities>
|
||||
</DetachInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</DetachInstancesResponse>"""
|
||||
|
||||
@ -443,7 +463,7 @@ DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """<DetachLoadBalancerTargetGroups
|
||||
<DetachLoadBalancerTargetGroupsResult>
|
||||
</DetachLoadBalancerTargetGroupsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</DetachLoadBalancerTargetGroupsResponse>"""
|
||||
|
||||
@ -463,7 +483,14 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
<SuspendedProcesses/>
|
||||
<SuspendedProcesses>
|
||||
{% for suspended_process in group.suspended_processes %}
|
||||
<member>
|
||||
<ProcessName>{{suspended_process}}</ProcessName>
|
||||
<SuspensionReason></SuspensionReason>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</SuspendedProcesses>
|
||||
<AutoScalingGroupName>{{ group.name }}</AutoScalingGroupName>
|
||||
<HealthCheckType>{{ group.health_check_type }}</HealthCheckType>
|
||||
<CreatedTime>2013-05-06T17:47:15.107Z</CreatedTime>
|
||||
@ -473,10 +500,11 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
{% for instance_state in group.instance_states %}
|
||||
<member>
|
||||
<HealthStatus>{{ instance_state.health_status }}</HealthStatus>
|
||||
<AvailabilityZone>us-east-1e</AvailabilityZone>
|
||||
<AvailabilityZone>{{ instance_state.instance.placement }}</AvailabilityZone>
|
||||
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
|
||||
<LaunchConfigurationName>{{ group.launch_config_name }}</LaunchConfigurationName>
|
||||
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
|
||||
<ProtectedFromScaleIn>{{ instance_state.protected_from_scale_in|string|lower }}</ProtectedFromScaleIn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Instances>
|
||||
@ -495,6 +523,15 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
{% else %}
|
||||
<LoadBalancerNames/>
|
||||
{% endif %}
|
||||
{% if group.target_group_arns %}
|
||||
<TargetGroupARNs>
|
||||
{% for target_group_arn in group.target_group_arns %}
|
||||
<member>{{ target_group_arn }}</member>
|
||||
{% endfor %}
|
||||
</TargetGroupARNs>
|
||||
{% else %}
|
||||
<TargetGroupARNs/>
|
||||
{% endif %}
|
||||
<MinSize>{{ group.min_size }}</MinSize>
|
||||
{% if group.vpc_zone_identifier %}
|
||||
<VPCZoneIdentifier>{{ group.vpc_zone_identifier }}</VPCZoneIdentifier>
|
||||
@ -503,8 +540,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
{% endif %}
|
||||
<HealthCheckGracePeriod>{{ group.health_check_period }}</HealthCheckGracePeriod>
|
||||
<DefaultCooldown>{{ group.default_cooldown }}</DefaultCooldown>
|
||||
<AutoScalingGroupARN>arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb
|
||||
:autoScalingGroupName/{{ group.name }}</AutoScalingGroupARN>
|
||||
<AutoScalingGroupARN>arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb:autoScalingGroupName/{{ group.name }}</AutoScalingGroupARN>
|
||||
{% if group.termination_policies %}
|
||||
<TerminationPolicies>
|
||||
{% for policy in group.termination_policies %}
|
||||
@ -518,6 +554,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
{% if group.placement_group %}
|
||||
<PlacementGroup>{{ group.placement_group }}</PlacementGroup>
|
||||
{% endif %}
|
||||
<NewInstancesProtectedFromScaleIn>{{ group.new_instances_protected_from_scale_in|string|lower }}</NewInstancesProtectedFromScaleIn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AutoScalingGroups>
|
||||
@ -549,10 +586,11 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """<DescribeAutoScalingInstancesRespon
|
||||
<member>
|
||||
<HealthStatus>{{ instance_state.health_status }}</HealthStatus>
|
||||
<AutoScalingGroupName>{{ instance_state.instance.autoscaling_group.name }}</AutoScalingGroupName>
|
||||
<AvailabilityZone>us-east-1e</AvailabilityZone>
|
||||
<AvailabilityZone>{{ instance_state.instance.placement }}</AvailabilityZone>
|
||||
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
|
||||
<LaunchConfigurationName>{{ instance_state.instance.autoscaling_group.launch_config_name }}</LaunchConfigurationName>
|
||||
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
|
||||
<ProtectedFromScaleIn>{{ instance_state.protected_from_scale_in|string|lower }}</ProtectedFromScaleIn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AutoScalingInstances>
|
||||
@ -617,7 +655,7 @@ DELETE_POLICY_TEMPLATE = """<DeleteScalingPolicyResponse xmlns="http://autoscali
|
||||
ATTACH_LOAD_BALANCERS_TEMPLATE = """<AttachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<AttachLoadBalancersResult></AttachLoadBalancersResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</AttachLoadBalancersResponse>"""
|
||||
|
||||
@ -633,20 +671,33 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http
|
||||
</LoadBalancers>
|
||||
</DescribeLoadBalancersResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeLoadBalancersResponse>"""
|
||||
|
||||
DETACH_LOAD_BALANCERS_TEMPLATE = """<DetachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<DetachLoadBalancersResult></DetachLoadBalancersResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</DetachLoadBalancersResponse>"""
|
||||
|
||||
SUSPEND_PROCESSES_TEMPLATE = """<SuspendProcessesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>7c6e177f-f082-11e1-ac58-3714bEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SuspendProcessesResponse>"""
|
||||
|
||||
SET_INSTANCE_HEALTH_TEMPLATE = """<SetInstanceHealthResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<SetInstanceHealthResponse></SetInstanceHealthResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetInstanceHealthResponse>"""
|
||||
|
||||
SET_INSTANCE_PROTECTION_TEMPLATE = """<SetInstanceProtectionResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<SetInstanceProtectionResult></SetInstanceProtectionResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetInstanceProtectionResponse>"""
|
||||
|
||||
@ -2,7 +2,9 @@ from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
from collections import defaultdict
|
||||
import copy
|
||||
import datetime
|
||||
import docker
|
||||
import docker.errors
|
||||
import hashlib
|
||||
import io
|
||||
@ -17,18 +19,23 @@ import tarfile
|
||||
import calendar
|
||||
import threading
|
||||
import traceback
|
||||
import weakref
|
||||
import requests.adapters
|
||||
|
||||
import boto.awslambda
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.exceptions import RESTError
|
||||
from moto.core.utils import unix_time_millis
|
||||
from moto.s3.models import s3_backend
|
||||
from moto.logs.models import logs_backends
|
||||
from moto.s3.exceptions import MissingBucket, MissingKey
|
||||
from moto import settings
|
||||
from .utils import make_function_arn, make_function_ver_arn
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ACCOUNT_ID = '123456789012'
|
||||
|
||||
|
||||
try:
|
||||
from tempfile import TemporaryDirectory
|
||||
@ -38,6 +45,7 @@ except ImportError:
|
||||
|
||||
_stderr_regex = re.compile(r'START|END|REPORT RequestId: .*')
|
||||
_orig_adapter_send = requests.adapters.HTTPAdapter.send
|
||||
docker_3 = docker.__version__[0] >= '3'
|
||||
|
||||
|
||||
def zip2tar(zip_bytes):
|
||||
@ -98,7 +106,11 @@ class _DockerDataVolumeContext:
|
||||
|
||||
# It doesn't exist so we need to create it
|
||||
self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256)
|
||||
container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: '/tmp/data'}, detach=True)
|
||||
if docker_3:
|
||||
volumes = {self.name: {'bind': '/tmp/data', 'mode': 'rw'}}
|
||||
else:
|
||||
volumes = {self.name: '/tmp/data'}
|
||||
container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes=volumes, detach=True)
|
||||
try:
|
||||
tar_bytes = zip2tar(self._lambda_func.code_bytes)
|
||||
container.put_archive('/tmp/data', tar_bytes)
|
||||
@ -121,7 +133,7 @@ class _DockerDataVolumeContext:
|
||||
|
||||
|
||||
class LambdaFunction(BaseModel):
|
||||
def __init__(self, spec, region, validate_s3=True):
|
||||
def __init__(self, spec, region, validate_s3=True, version=1):
|
||||
# required
|
||||
self.region = region
|
||||
self.code = spec['Code']
|
||||
@ -161,7 +173,7 @@ class LambdaFunction(BaseModel):
|
||||
'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []})
|
||||
|
||||
# auto-generated
|
||||
self.version = '$LATEST'
|
||||
self.version = version
|
||||
self.last_modified = datetime.datetime.utcnow().strftime(
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
|
||||
@ -203,11 +215,15 @@ class LambdaFunction(BaseModel):
|
||||
self.code_size = key.size
|
||||
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
|
||||
|
||||
self.function_arn = 'arn:aws:lambda:{}:123456789012:function:{}'.format(
|
||||
self.region, self.function_name)
|
||||
self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name)
|
||||
|
||||
self.tags = dict()
|
||||
|
||||
def set_version(self, version):
|
||||
self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version)
|
||||
self.version = version
|
||||
self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
@property
|
||||
def vpc_config(self):
|
||||
config = self._vpc_config.copy()
|
||||
@ -215,6 +231,10 @@ class LambdaFunction(BaseModel):
|
||||
config.update({"VpcId": "vpc-123abc"})
|
||||
return config
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
return self.function_name
|
||||
|
||||
def __repr__(self):
|
||||
return json.dumps(self.get_configuration())
|
||||
|
||||
@ -231,7 +251,7 @@ class LambdaFunction(BaseModel):
|
||||
"Role": self.role,
|
||||
"Runtime": self.run_time,
|
||||
"Timeout": self.timeout,
|
||||
"Version": self.version,
|
||||
"Version": str(self.version),
|
||||
"VpcConfig": self.vpc_config,
|
||||
}
|
||||
|
||||
@ -255,14 +275,14 @@ class LambdaFunction(BaseModel):
|
||||
def convert(s):
|
||||
try:
|
||||
return str(s, encoding='utf-8')
|
||||
except:
|
||||
except Exception:
|
||||
return s
|
||||
|
||||
@staticmethod
|
||||
def is_json(test_str):
|
||||
try:
|
||||
response = json.loads(test_str)
|
||||
except:
|
||||
except Exception:
|
||||
response = test_str
|
||||
return response
|
||||
|
||||
@ -304,6 +324,10 @@ class LambdaFunction(BaseModel):
|
||||
exit_code = -1
|
||||
container.stop()
|
||||
container.kill()
|
||||
else:
|
||||
if docker_3:
|
||||
exit_code = exit_code['StatusCode']
|
||||
|
||||
output = container.logs(stdout=False, stderr=True)
|
||||
output += container.logs(stdout=True, stderr=False)
|
||||
container.remove()
|
||||
@ -366,7 +390,7 @@ class LambdaFunction(BaseModel):
|
||||
'Role': properties['Role'],
|
||||
'Runtime': properties['Runtime'],
|
||||
}
|
||||
optional_properties = 'Description MemorySize Publish Timeout VpcConfig'.split()
|
||||
optional_properties = 'Description MemorySize Publish Timeout VpcConfig Environment'.split()
|
||||
# NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the
|
||||
# default logic
|
||||
for prop in optional_properties:
|
||||
@ -389,8 +413,7 @@ class LambdaFunction(BaseModel):
|
||||
from moto.cloudformation.exceptions import \
|
||||
UnformattedGetAttTemplateException
|
||||
if attribute_name == 'Arn':
|
||||
return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format(
|
||||
self.region, self.function_name)
|
||||
return make_function_arn(self.region, ACCOUNT_ID, self.function_name)
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
@staticmethod
|
||||
@ -436,6 +459,9 @@ class LambdaVersion(BaseModel):
|
||||
def __init__(self, spec):
|
||||
self.version = spec['Version']
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.logical_resource_id)
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json,
|
||||
region_name):
|
||||
@ -446,9 +472,130 @@ class LambdaVersion(BaseModel):
|
||||
return LambdaVersion(spec)
|
||||
|
||||
|
||||
class LambdaStorage(object):
|
||||
def __init__(self):
|
||||
# Format 'func_name' {'alias': {}, 'versions': []}
|
||||
self._functions = {}
|
||||
self._arns = weakref.WeakValueDictionary()
|
||||
|
||||
def _get_latest(self, name):
|
||||
return self._functions[name]['latest']
|
||||
|
||||
def _get_version(self, name, version):
|
||||
index = version - 1
|
||||
|
||||
try:
|
||||
return self._functions[name]['versions'][index]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def _get_alias(self, name, alias):
|
||||
return self._functions[name]['alias'].get(alias, None)
|
||||
|
||||
def get_function(self, name, qualifier=None):
|
||||
if name not in self._functions:
|
||||
return None
|
||||
|
||||
if qualifier is None:
|
||||
return self._get_latest(name)
|
||||
|
||||
try:
|
||||
return self._get_version(name, int(qualifier))
|
||||
except ValueError:
|
||||
return self._functions[name]['latest']
|
||||
|
||||
def list_versions_by_function(self, name):
|
||||
if name not in self._functions:
|
||||
return None
|
||||
|
||||
latest = copy.copy(self._functions[name]['latest'])
|
||||
latest.function_arn += ':$LATEST'
|
||||
return [latest] + self._functions[name]['versions']
|
||||
|
||||
def get_arn(self, arn):
|
||||
return self._arns.get(arn, None)
|
||||
|
||||
def put_function(self, fn):
|
||||
"""
|
||||
:param fn: Function
|
||||
:type fn: LambdaFunction
|
||||
"""
|
||||
if fn.function_name in self._functions:
|
||||
self._functions[fn.function_name]['latest'] = fn
|
||||
else:
|
||||
self._functions[fn.function_name] = {
|
||||
'latest': fn,
|
||||
'versions': [],
|
||||
'alias': weakref.WeakValueDictionary()
|
||||
}
|
||||
|
||||
self._arns[fn.function_arn] = fn
|
||||
|
||||
def publish_function(self, name):
|
||||
if name not in self._functions:
|
||||
return None
|
||||
if not self._functions[name]['latest']:
|
||||
return None
|
||||
|
||||
new_version = len(self._functions[name]['versions']) + 1
|
||||
fn = copy.copy(self._functions[name]['latest'])
|
||||
fn.set_version(new_version)
|
||||
|
||||
self._functions[name]['versions'].append(fn)
|
||||
self._arns[fn.function_arn] = fn
|
||||
return fn
|
||||
|
||||
def del_function(self, name, qualifier=None):
|
||||
if name in self._functions:
|
||||
if not qualifier:
|
||||
# Something is still reffing this so delete all arns
|
||||
latest = self._functions[name]['latest'].function_arn
|
||||
del self._arns[latest]
|
||||
|
||||
for fn in self._functions[name]['versions']:
|
||||
del self._arns[fn.function_arn]
|
||||
|
||||
del self._functions[name]
|
||||
|
||||
return True
|
||||
|
||||
elif qualifier == '$LATEST':
|
||||
self._functions[name]['latest'] = None
|
||||
|
||||
# If theres no functions left
|
||||
if not self._functions[name]['versions'] and not self._functions[name]['latest']:
|
||||
del self._functions[name]
|
||||
|
||||
return True
|
||||
|
||||
else:
|
||||
fn = self.get_function(name, qualifier)
|
||||
if fn:
|
||||
self._functions[name]['versions'].remove(fn)
|
||||
|
||||
# If theres no functions left
|
||||
if not self._functions[name]['versions'] and not self._functions[name]['latest']:
|
||||
del self._functions[name]
|
||||
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def all(self):
|
||||
result = []
|
||||
|
||||
for function_group in self._functions.values():
|
||||
if function_group['latest'] is not None:
|
||||
result.append(function_group['latest'])
|
||||
|
||||
result.extend(function_group['versions'])
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class LambdaBackend(BaseBackend):
|
||||
def __init__(self, region_name):
|
||||
self._functions = {}
|
||||
self._lambdas = LambdaStorage()
|
||||
self.region_name = region_name
|
||||
|
||||
def reset(self):
|
||||
@ -456,33 +603,39 @@ class LambdaBackend(BaseBackend):
|
||||
self.__dict__ = {}
|
||||
self.__init__(region_name)
|
||||
|
||||
def has_function(self, function_name):
|
||||
return function_name in self._functions
|
||||
|
||||
def has_function_arn(self, function_arn):
|
||||
return self.get_function_by_arn(function_arn) is not None
|
||||
|
||||
def create_function(self, spec):
|
||||
fn = LambdaFunction(spec, self.region_name)
|
||||
self._functions[fn.function_name] = fn
|
||||
function_name = spec.get('FunctionName', None)
|
||||
if function_name is None:
|
||||
raise RESTError('InvalidParameterValueException', 'Missing FunctionName')
|
||||
|
||||
fn = LambdaFunction(spec, self.region_name, version='$LATEST')
|
||||
|
||||
self._lambdas.put_function(fn)
|
||||
|
||||
if spec.get('Publish'):
|
||||
ver = self.publish_function(function_name)
|
||||
fn.version = ver.version
|
||||
return fn
|
||||
|
||||
def get_function(self, function_name):
|
||||
return self._functions[function_name]
|
||||
def publish_function(self, function_name):
|
||||
return self._lambdas.publish_function(function_name)
|
||||
|
||||
def get_function(self, function_name, qualifier=None):
|
||||
return self._lambdas.get_function(function_name, qualifier)
|
||||
|
||||
def list_versions_by_function(self, function_name):
|
||||
return self._lambdas.list_versions_by_function(function_name)
|
||||
|
||||
def get_function_by_arn(self, function_arn):
|
||||
for function in self._functions.values():
|
||||
if function.function_arn == function_arn:
|
||||
return function
|
||||
return None
|
||||
return self._lambdas.get_arn(function_arn)
|
||||
|
||||
def delete_function(self, function_name):
|
||||
del self._functions[function_name]
|
||||
def delete_function(self, function_name, qualifier=None):
|
||||
return self._lambdas.del_function(function_name, qualifier)
|
||||
|
||||
def list_functions(self):
|
||||
return self._functions.values()
|
||||
return self._lambdas.all()
|
||||
|
||||
def send_message(self, function_name, message):
|
||||
def send_message(self, function_name, message, subject=None, qualifier=None):
|
||||
event = {
|
||||
"Records": [
|
||||
{
|
||||
@ -509,29 +662,37 @@ class LambdaBackend(BaseBackend):
|
||||
"Type": "Notification",
|
||||
"UnsubscribeUrl": "EXAMPLE",
|
||||
"TopicArn": "arn:aws:sns:EXAMPLE",
|
||||
"Subject": "TestInvoke"
|
||||
"Subject": subject or "TestInvoke"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
}
|
||||
self._functions[function_name].invoke(json.dumps(event), {}, {})
|
||||
pass
|
||||
func = self._lambdas.get_function(function_name, qualifier)
|
||||
func.invoke(json.dumps(event), {}, {})
|
||||
|
||||
def list_tags(self, resource):
|
||||
return self.get_function_by_arn(resource).tags
|
||||
|
||||
def tag_resource(self, resource, tags):
|
||||
self.get_function_by_arn(resource).tags.update(tags)
|
||||
fn = self.get_function_by_arn(resource)
|
||||
if not fn:
|
||||
return False
|
||||
|
||||
fn.tags.update(tags)
|
||||
return True
|
||||
|
||||
def untag_resource(self, resource, tagKeys):
|
||||
function = self.get_function_by_arn(resource)
|
||||
for key in tagKeys:
|
||||
try:
|
||||
del function.tags[key]
|
||||
except KeyError:
|
||||
pass
|
||||
# Don't care
|
||||
fn = self.get_function_by_arn(resource)
|
||||
if fn:
|
||||
for key in tagKeys:
|
||||
try:
|
||||
del fn.tags[key]
|
||||
except KeyError:
|
||||
pass
|
||||
# Don't care
|
||||
return True
|
||||
return False
|
||||
|
||||
def add_policy(self, function_name, policy):
|
||||
self.get_function(function_name).policy = policy
|
||||
@ -546,3 +707,4 @@ lambda_backends = {_region.name: LambdaBackend(_region.name)
|
||||
for _region in boto.awslambda.regions()}
|
||||
|
||||
lambda_backends['ap-southeast-2'] = LambdaBackend('ap-southeast-2')
|
||||
lambda_backends['us-gov-west-1'] = LambdaBackend('us-gov-west-1')
|
||||
|
||||
@ -1,19 +1,34 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
try:
|
||||
from urllib import unquote
|
||||
from urlparse import urlparse, parse_qs
|
||||
except:
|
||||
from urllib.parse import unquote, urlparse, parse_qs
|
||||
except ImportError:
|
||||
from urllib.parse import unquote
|
||||
|
||||
from moto.core.utils import amz_crc32, amzn_request_id
|
||||
from moto.core.utils import amz_crc32, amzn_request_id, path_url
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import lambda_backends
|
||||
|
||||
|
||||
class LambdaResponse(BaseResponse):
|
||||
@property
|
||||
def json_body(self):
|
||||
"""
|
||||
:return: JSON
|
||||
:rtype: dict
|
||||
"""
|
||||
return json.loads(self.body)
|
||||
|
||||
@property
|
||||
def lambda_backend(self):
|
||||
"""
|
||||
Get backend
|
||||
:return: Lambda Backend
|
||||
:rtype: moto.awslambda.models.LambdaBackend
|
||||
"""
|
||||
return lambda_backends[self.region]
|
||||
|
||||
def root(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
@ -33,6 +48,20 @@ class LambdaResponse(BaseResponse):
|
||||
else:
|
||||
raise ValueError("Cannot handle request")
|
||||
|
||||
def versions(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
if request.method == 'GET':
|
||||
# This is ListVersionByFunction
|
||||
|
||||
path = request.path if hasattr(request, 'path') else path_url(request.url)
|
||||
function_name = path.split('/')[-2]
|
||||
return self._list_versions_by_function(function_name)
|
||||
|
||||
elif request.method == 'POST':
|
||||
return self._publish_function(request, full_url, headers)
|
||||
else:
|
||||
raise ValueError("Cannot handle request")
|
||||
|
||||
@amz_crc32
|
||||
@amzn_request_id
|
||||
def invoke(self, request, full_url, headers):
|
||||
@ -69,37 +98,32 @@ class LambdaResponse(BaseResponse):
|
||||
return self._add_policy(request, full_url, headers)
|
||||
|
||||
def _add_policy(self, request, full_url, headers):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
path = request.path if hasattr(request, 'path') else path_url(request.url)
|
||||
function_name = path.split('/')[-2]
|
||||
if lambda_backend.has_function(function_name):
|
||||
if self.lambda_backend.get_function(function_name):
|
||||
policy = request.body.decode('utf8')
|
||||
lambda_backend.add_policy(function_name, policy)
|
||||
self.lambda_backend.add_policy(function_name, policy)
|
||||
return 200, {}, json.dumps(dict(Statement=policy))
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
def _get_policy(self, request, full_url, headers):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
path = request.path if hasattr(request, 'path') else path_url(request.url)
|
||||
function_name = path.split('/')[-2]
|
||||
if lambda_backend.has_function(function_name):
|
||||
function = lambda_backend.get_function(function_name)
|
||||
return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + function.policy + "]}"))
|
||||
if self.lambda_backend.get_function(function_name):
|
||||
lambda_function = self.lambda_backend.get_function(function_name)
|
||||
return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + lambda_function.policy + "]}"))
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
def _invoke(self, request, full_url):
|
||||
response_headers = {}
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
function_name = path.split('/')[-2]
|
||||
function_name = self.path.rsplit('/', 2)[-2]
|
||||
qualifier = self._get_param('qualifier')
|
||||
|
||||
if lambda_backend.has_function(function_name):
|
||||
fn = lambda_backend.get_function(function_name)
|
||||
fn = self.lambda_backend.get_function(function_name, qualifier)
|
||||
if fn:
|
||||
payload = fn.invoke(self.body, self.headers, response_headers)
|
||||
response_headers['Content-Length'] = str(len(payload))
|
||||
return 202, response_headers, payload
|
||||
@ -108,109 +132,115 @@ class LambdaResponse(BaseResponse):
|
||||
|
||||
def _invoke_async(self, request, full_url):
|
||||
response_headers = {}
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
function_name = path.split('/')[-3]
|
||||
if lambda_backend.has_function(function_name):
|
||||
fn = lambda_backend.get_function(function_name)
|
||||
fn.invoke(self.body, self.headers, response_headers)
|
||||
response_headers['Content-Length'] = str(0)
|
||||
return 202, response_headers, ""
|
||||
function_name = self.path.rsplit('/', 3)[-3]
|
||||
|
||||
fn = self.lambda_backend.get_function(function_name, None)
|
||||
if fn:
|
||||
payload = fn.invoke(self.body, self.headers, response_headers)
|
||||
response_headers['Content-Length'] = str(len(payload))
|
||||
return 202, response_headers, payload
|
||||
else:
|
||||
return 404, response_headers, "{}"
|
||||
|
||||
def _list_functions(self, request, full_url, headers):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
return 200, {}, json.dumps({
|
||||
"Functions": [fn.get_configuration() for fn in lambda_backend.list_functions()],
|
||||
# "NextMarker": str(uuid.uuid4()),
|
||||
})
|
||||
result = {
|
||||
'Functions': []
|
||||
}
|
||||
|
||||
for fn in self.lambda_backend.list_functions():
|
||||
json_data = fn.get_configuration()
|
||||
json_data['Version'] = '$LATEST'
|
||||
result['Functions'].append(json_data)
|
||||
|
||||
return 200, {}, json.dumps(result)
|
||||
|
||||
def _list_versions_by_function(self, function_name):
|
||||
result = {
|
||||
'Versions': []
|
||||
}
|
||||
|
||||
functions = self.lambda_backend.list_versions_by_function(function_name)
|
||||
if functions:
|
||||
for fn in functions:
|
||||
json_data = fn.get_configuration()
|
||||
result['Versions'].append(json_data)
|
||||
|
||||
return 200, {}, json.dumps(result)
|
||||
|
||||
def _create_function(self, request, full_url, headers):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
spec = json.loads(self.body)
|
||||
try:
|
||||
fn = lambda_backend.create_function(spec)
|
||||
fn = self.lambda_backend.create_function(self.json_body)
|
||||
except ValueError as e:
|
||||
return 400, {}, json.dumps({"Error": {"Code": e.args[0], "Message": e.args[1]}})
|
||||
else:
|
||||
config = fn.get_configuration()
|
||||
return 201, {}, json.dumps(config)
|
||||
|
||||
def _publish_function(self, request, full_url, headers):
|
||||
function_name = self.path.rsplit('/', 2)[-2]
|
||||
|
||||
fn = self.lambda_backend.publish_function(function_name)
|
||||
if fn:
|
||||
config = fn.get_configuration()
|
||||
return 201, {}, json.dumps(config)
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
def _delete_function(self, request, full_url, headers):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
function_name = self.path.rsplit('/', 1)[-1]
|
||||
qualifier = self._get_param('Qualifier', None)
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
function_name = path.split('/')[-1]
|
||||
|
||||
if lambda_backend.has_function(function_name):
|
||||
lambda_backend.delete_function(function_name)
|
||||
if self.lambda_backend.delete_function(function_name, qualifier):
|
||||
return 204, {}, ""
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
def _get_function(self, request, full_url, headers):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
function_name = self.path.rsplit('/', 1)[-1]
|
||||
qualifier = self._get_param('Qualifier', None)
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
function_name = path.split('/')[-1]
|
||||
fn = self.lambda_backend.get_function(function_name, qualifier)
|
||||
|
||||
if lambda_backend.has_function(function_name):
|
||||
fn = lambda_backend.get_function(function_name)
|
||||
if fn:
|
||||
code = fn.get_code()
|
||||
if qualifier is None or qualifier == '$LATEST':
|
||||
code['Configuration']['Version'] = '$LATEST'
|
||||
if qualifier == '$LATEST':
|
||||
code['Configuration']['FunctionArn'] += ':$LATEST'
|
||||
return 200, {}, json.dumps(code)
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
def get_lambda_backend(self, full_url):
|
||||
from moto.awslambda.models import lambda_backends
|
||||
region = self._get_aws_region(full_url)
|
||||
return lambda_backends[region]
|
||||
|
||||
def _get_aws_region(self, full_url):
|
||||
region = re.search(self.region_regex, full_url)
|
||||
region = self.region_regex.search(full_url)
|
||||
if region:
|
||||
return region.group(1)
|
||||
else:
|
||||
return self.default_region
|
||||
|
||||
def _list_tags(self, request, full_url):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
function_arn = unquote(self.path.rsplit('/', 1)[-1])
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
function_arn = unquote(path.split('/')[-1])
|
||||
|
||||
if lambda_backend.has_function_arn(function_arn):
|
||||
function = lambda_backend.get_function_by_arn(function_arn)
|
||||
return 200, {}, json.dumps(dict(Tags=function.tags))
|
||||
fn = self.lambda_backend.get_function_by_arn(function_arn)
|
||||
if fn:
|
||||
return 200, {}, json.dumps({'Tags': fn.tags})
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
def _tag_resource(self, request, full_url):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
function_arn = unquote(self.path.rsplit('/', 1)[-1])
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
function_arn = unquote(path.split('/')[-1])
|
||||
|
||||
spec = json.loads(self.body)
|
||||
|
||||
if lambda_backend.has_function_arn(function_arn):
|
||||
lambda_backend.tag_resource(function_arn, spec['Tags'])
|
||||
if self.lambda_backend.tag_resource(function_arn, self.json_body['Tags']):
|
||||
return 200, {}, "{}"
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
def _untag_resource(self, request, full_url):
|
||||
lambda_backend = self.get_lambda_backend(full_url)
|
||||
function_arn = unquote(self.path.rsplit('/', 1)[-1])
|
||||
tag_keys = self.querystring['tagKeys']
|
||||
|
||||
path = request.path if hasattr(request, 'path') else request.path_url
|
||||
function_arn = unquote(path.split('/')[-1].split('?')[0])
|
||||
|
||||
tag_keys = parse_qs(urlparse(full_url).query)['tagKeys']
|
||||
|
||||
if lambda_backend.has_function_arn(function_arn):
|
||||
lambda_backend.untag_resource(function_arn, tag_keys)
|
||||
if self.lambda_backend.untag_resource(function_arn, tag_keys):
|
||||
return 204, {}, "{}"
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
@ -10,6 +10,7 @@ response = LambdaResponse()
|
||||
url_paths = {
|
||||
'{0}/(?P<api_version>[^/]+)/functions/?$': response.root,
|
||||
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/?$': response.function,
|
||||
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/versions/?$': response.versions,
|
||||
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invocations/?$': response.invoke,
|
||||
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invoke-async/?$': response.invoke_async,
|
||||
r'{0}/(?P<api_version>[^/]+)/tags/(?P<resource_arn>.+)': response.tag,
|
||||
|
||||
20
moto/awslambda/utils.py
Normal file
20
moto/awslambda/utils.py
Normal file
@ -0,0 +1,20 @@
|
||||
from collections import namedtuple
|
||||
|
||||
ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version'])
|
||||
|
||||
|
||||
def make_function_arn(region, account, name):
|
||||
return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account, name)
|
||||
|
||||
|
||||
def make_function_ver_arn(region, account, name, version='1'):
|
||||
arn = make_function_arn(region, account, name)
|
||||
return '{0}:{1}'.format(arn, version)
|
||||
|
||||
|
||||
def split_function_arn(arn):
|
||||
arn = arn.replace('arn:aws:lambda:')
|
||||
|
||||
region, account, _, name, version = arn.split(':')
|
||||
|
||||
return ARN(region, account, name, version)
|
||||
@ -6,10 +6,13 @@ from moto.autoscaling import autoscaling_backends
|
||||
from moto.awslambda import lambda_backends
|
||||
from moto.cloudformation import cloudformation_backends
|
||||
from moto.cloudwatch import cloudwatch_backends
|
||||
from moto.cognitoidentity import cognitoidentity_backends
|
||||
from moto.cognitoidp import cognitoidp_backends
|
||||
from moto.core import moto_api_backends
|
||||
from moto.datapipeline import datapipeline_backends
|
||||
from moto.dynamodb import dynamodb_backends
|
||||
from moto.dynamodb2 import dynamodb_backends2
|
||||
from moto.dynamodbstreams import dynamodbstreams_backends
|
||||
from moto.ec2 import ec2_backends
|
||||
from moto.ecr import ecr_backends
|
||||
from moto.ecs import ecs_backends
|
||||
@ -18,27 +21,33 @@ from moto.elbv2 import elbv2_backends
|
||||
from moto.emr import emr_backends
|
||||
from moto.events import events_backends
|
||||
from moto.glacier import glacier_backends
|
||||
from moto.glue import glue_backends
|
||||
from moto.iam import iam_backends
|
||||
from moto.instance_metadata import instance_metadata_backends
|
||||
from moto.kinesis import kinesis_backends
|
||||
from moto.kms import kms_backends
|
||||
from moto.logs import logs_backends
|
||||
from moto.opsworks import opsworks_backends
|
||||
from moto.organizations import organizations_backends
|
||||
from moto.polly import polly_backends
|
||||
from moto.rds2 import rds2_backends
|
||||
from moto.redshift import redshift_backends
|
||||
from moto.resourcegroups import resourcegroups_backends
|
||||
from moto.route53 import route53_backends
|
||||
from moto.s3 import s3_backends
|
||||
from moto.ses import ses_backends
|
||||
from moto.secretsmanager import secretsmanager_backends
|
||||
from moto.sns import sns_backends
|
||||
from moto.sqs import sqs_backends
|
||||
from moto.ssm import ssm_backends
|
||||
from moto.sts import sts_backends
|
||||
from moto.swf import swf_backends
|
||||
from moto.xray import xray_backends
|
||||
from moto.iot import iot_backends
|
||||
from moto.iotdata import iotdata_backends
|
||||
from moto.batch import batch_backends
|
||||
|
||||
from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends
|
||||
from moto.config import config_backends
|
||||
|
||||
BACKENDS = {
|
||||
'acm': acm_backends,
|
||||
@ -47,9 +56,13 @@ BACKENDS = {
|
||||
'batch': batch_backends,
|
||||
'cloudformation': cloudformation_backends,
|
||||
'cloudwatch': cloudwatch_backends,
|
||||
'cognito-identity': cognitoidentity_backends,
|
||||
'cognito-idp': cognitoidp_backends,
|
||||
'config': config_backends,
|
||||
'datapipeline': datapipeline_backends,
|
||||
'dynamodb': dynamodb_backends,
|
||||
'dynamodb2': dynamodb_backends2,
|
||||
'dynamodbstreams': dynamodbstreams_backends,
|
||||
'ec2': ec2_backends,
|
||||
'ecr': ecr_backends,
|
||||
'ecs': ecs_backends,
|
||||
@ -58,6 +71,7 @@ BACKENDS = {
|
||||
'events': events_backends,
|
||||
'emr': emr_backends,
|
||||
'glacier': glacier_backends,
|
||||
'glue': glue_backends,
|
||||
'iam': iam_backends,
|
||||
'moto_api': moto_api_backends,
|
||||
'instance_metadata': instance_metadata_backends,
|
||||
@ -65,19 +79,24 @@ BACKENDS = {
|
||||
'kinesis': kinesis_backends,
|
||||
'kms': kms_backends,
|
||||
'opsworks': opsworks_backends,
|
||||
'organizations': organizations_backends,
|
||||
'polly': polly_backends,
|
||||
'redshift': redshift_backends,
|
||||
'resource-groups': resourcegroups_backends,
|
||||
'rds': rds2_backends,
|
||||
's3': s3_backends,
|
||||
's3bucket_path': s3_backends,
|
||||
'ses': ses_backends,
|
||||
'secretsmanager': secretsmanager_backends,
|
||||
'sns': sns_backends,
|
||||
'sqs': sqs_backends,
|
||||
'ssm': ssm_backends,
|
||||
'sts': sts_backends,
|
||||
'swf': swf_backends,
|
||||
'route53': route53_backends,
|
||||
'lambda': lambda_backends,
|
||||
'xray': xray_backends,
|
||||
'resourcegroupstaggingapi': resourcegroupstaggingapi_backends,
|
||||
'iot': iot_backends,
|
||||
'iot-data': iotdata_backends,
|
||||
}
|
||||
|
||||
@ -295,6 +295,14 @@ class Job(threading.Thread, BaseModel):
|
||||
}
|
||||
if self.job_stopped:
|
||||
result['stoppedAt'] = datetime2int(self.job_stopped_at)
|
||||
result['container'] = {}
|
||||
result['container']['command'] = ['/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"']
|
||||
result['container']['privileged'] = False
|
||||
result['container']['readonlyRootFilesystem'] = False
|
||||
result['container']['ulimits'] = {}
|
||||
result['container']['vcpus'] = 1
|
||||
result['container']['volumes'] = ''
|
||||
result['container']['logStreamName'] = self.log_stream_name
|
||||
if self.job_stopped_reason is not None:
|
||||
result['statusReason'] = self.job_stopped_reason
|
||||
return result
|
||||
@ -378,6 +386,7 @@ class Job(threading.Thread, BaseModel):
|
||||
# Send to cloudwatch
|
||||
log_group = '/aws/batch/job'
|
||||
stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id)
|
||||
self.log_stream_name = stream_name
|
||||
self._log_backend.ensure_log_group(log_group, None)
|
||||
self._log_backend.create_log_stream(log_group, stream_name)
|
||||
self._log_backend.put_log_events(log_group, stream_name, logs, None)
|
||||
|
||||
@ -27,7 +27,7 @@ class BatchResponse(BaseResponse):
|
||||
elif not hasattr(self, '_json'):
|
||||
try:
|
||||
self._json = json.loads(self.body)
|
||||
except json.JSONDecodeError:
|
||||
except ValueError:
|
||||
print()
|
||||
return self._json
|
||||
|
||||
|
||||
@ -33,6 +33,18 @@ class MissingParameterError(BadRequest):
|
||||
)
|
||||
|
||||
|
||||
class ExportNotFound(BadRequest):
|
||||
"""Exception to raise if a template tries to import a non-existent export"""
|
||||
|
||||
def __init__(self, export_name):
|
||||
template = Template(ERROR_RESPONSE)
|
||||
super(ExportNotFound, self).__init__()
|
||||
self.description = template.render(
|
||||
code='ExportNotFound',
|
||||
message="No export named {0} found.".format(export_name)
|
||||
)
|
||||
|
||||
|
||||
ERROR_RESPONSE = """<ErrorResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/">
|
||||
<Error>
|
||||
<Type>Sender</Type>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timedelta
|
||||
import json
|
||||
import yaml
|
||||
import uuid
|
||||
@ -9,13 +9,162 @@ from moto.compat import OrderedDict
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
|
||||
from .parsing import ResourceMap, OutputMap
|
||||
from .utils import generate_stack_id, yaml_tag_constructor
|
||||
from .utils import (
|
||||
generate_changeset_id,
|
||||
generate_stack_id,
|
||||
generate_stackset_arn,
|
||||
generate_stackset_id,
|
||||
yaml_tag_constructor,
|
||||
validate_template_cfn_lint,
|
||||
)
|
||||
from .exceptions import ValidationError
|
||||
|
||||
|
||||
class FakeStackSet(BaseModel):
|
||||
|
||||
def __init__(self, stackset_id, name, template, region='us-east-1',
|
||||
status='ACTIVE', description=None, parameters=None, tags=None,
|
||||
admin_role='AWSCloudFormationStackSetAdministrationRole',
|
||||
execution_role='AWSCloudFormationStackSetExecutionRole'):
|
||||
self.id = stackset_id
|
||||
self.arn = generate_stackset_arn(stackset_id, region)
|
||||
self.name = name
|
||||
self.template = template
|
||||
self.description = description
|
||||
self.parameters = parameters
|
||||
self.tags = tags
|
||||
self.admin_role = admin_role
|
||||
self.execution_role = execution_role
|
||||
self.status = status
|
||||
self.instances = FakeStackInstances(parameters, self.id, self.name)
|
||||
self.stack_instances = self.instances.stack_instances
|
||||
self.operations = []
|
||||
|
||||
def _create_operation(self, operation_id, action, status, accounts=[], regions=[]):
|
||||
operation = {
|
||||
'OperationId': str(operation_id),
|
||||
'Action': action,
|
||||
'Status': status,
|
||||
'CreationTimestamp': datetime.now(),
|
||||
'EndTimestamp': datetime.now() + timedelta(minutes=2),
|
||||
'Instances': [{account: region} for account in accounts for region in regions],
|
||||
}
|
||||
|
||||
self.operations += [operation]
|
||||
return operation
|
||||
|
||||
def get_operation(self, operation_id):
|
||||
for operation in self.operations:
|
||||
if operation_id == operation['OperationId']:
|
||||
return operation
|
||||
raise ValidationError(operation_id)
|
||||
|
||||
def update_operation(self, operation_id, status):
|
||||
operation = self.get_operation(operation_id)
|
||||
operation['Status'] = status
|
||||
return operation_id
|
||||
|
||||
def delete(self):
|
||||
self.status = 'DELETED'
|
||||
|
||||
def update(self, template, description, parameters, tags, admin_role,
|
||||
execution_role, accounts, regions, operation_id=None):
|
||||
if not operation_id:
|
||||
operation_id = uuid.uuid4()
|
||||
|
||||
self.template = template if template else self.template
|
||||
self.description = description if description is not None else self.description
|
||||
self.parameters = parameters if parameters else self.parameters
|
||||
self.tags = tags if tags else self.tags
|
||||
self.admin_role = admin_role if admin_role else self.admin_role
|
||||
self.execution_role = execution_role if execution_role else self.execution_role
|
||||
|
||||
if accounts and regions:
|
||||
self.update_instances(accounts, regions, self.parameters)
|
||||
|
||||
operation = self._create_operation(operation_id=operation_id,
|
||||
action='UPDATE', status='SUCCEEDED', accounts=accounts,
|
||||
regions=regions)
|
||||
return operation
|
||||
|
||||
def create_stack_instances(self, accounts, regions, parameters, operation_id=None):
|
||||
if not operation_id:
|
||||
operation_id = uuid.uuid4()
|
||||
if not parameters:
|
||||
parameters = self.parameters
|
||||
|
||||
self.instances.create_instances(accounts, regions, parameters, operation_id)
|
||||
self._create_operation(operation_id=operation_id, action='CREATE',
|
||||
status='SUCCEEDED', accounts=accounts, regions=regions)
|
||||
|
||||
def delete_stack_instances(self, accounts, regions, operation_id=None):
|
||||
if not operation_id:
|
||||
operation_id = uuid.uuid4()
|
||||
|
||||
self.instances.delete(accounts, regions)
|
||||
|
||||
operation = self._create_operation(operation_id=operation_id, action='DELETE',
|
||||
status='SUCCEEDED', accounts=accounts, regions=regions)
|
||||
return operation
|
||||
|
||||
def update_instances(self, accounts, regions, parameters, operation_id=None):
|
||||
if not operation_id:
|
||||
operation_id = uuid.uuid4()
|
||||
|
||||
self.instances.update(accounts, regions, parameters)
|
||||
operation = self._create_operation(operation_id=operation_id,
|
||||
action='UPDATE', status='SUCCEEDED', accounts=accounts,
|
||||
regions=regions)
|
||||
return operation
|
||||
|
||||
|
||||
class FakeStackInstances(BaseModel):
|
||||
def __init__(self, parameters, stackset_id, stackset_name):
|
||||
self.parameters = parameters if parameters else {}
|
||||
self.stackset_id = stackset_id
|
||||
self.stack_name = "StackSet-{}".format(stackset_id)
|
||||
self.stackset_name = stackset_name
|
||||
self.stack_instances = []
|
||||
|
||||
def create_instances(self, accounts, regions, parameters, operation_id):
|
||||
new_instances = []
|
||||
for region in regions:
|
||||
for account in accounts:
|
||||
instance = {
|
||||
'StackId': generate_stack_id(self.stack_name, region, account),
|
||||
'StackSetId': self.stackset_id,
|
||||
'Region': region,
|
||||
'Account': account,
|
||||
'Status': "CURRENT",
|
||||
'ParameterOverrides': parameters if parameters else [],
|
||||
}
|
||||
new_instances.append(instance)
|
||||
self.stack_instances += new_instances
|
||||
return new_instances
|
||||
|
||||
def update(self, accounts, regions, parameters):
|
||||
for account in accounts:
|
||||
for region in regions:
|
||||
instance = self.get_instance(account, region)
|
||||
if parameters:
|
||||
instance['ParameterOverrides'] = parameters
|
||||
else:
|
||||
instance['ParameterOverrides'] = []
|
||||
|
||||
def delete(self, accounts, regions):
|
||||
for i, instance in enumerate(self.stack_instances):
|
||||
if instance['Region'] in regions and instance['Account'] in accounts:
|
||||
self.stack_instances.pop(i)
|
||||
|
||||
def get_instance(self, account, region):
|
||||
for i, instance in enumerate(self.stack_instances):
|
||||
if instance['Region'] == region and instance['Account'] == account:
|
||||
return self.stack_instances[i]
|
||||
|
||||
|
||||
class FakeStack(BaseModel):
|
||||
|
||||
def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None):
|
||||
def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False):
|
||||
self.stack_id = stack_id
|
||||
self.name = name
|
||||
self.template = template
|
||||
@ -26,11 +175,15 @@ class FakeStack(BaseModel):
|
||||
self.role_arn = role_arn
|
||||
self.tags = tags if tags else {}
|
||||
self.events = []
|
||||
self._add_stack_event("CREATE_IN_PROGRESS",
|
||||
resource_status_reason="User Initiated")
|
||||
if create_change_set:
|
||||
self._add_stack_event("REVIEW_IN_PROGRESS",
|
||||
resource_status_reason="User Initiated")
|
||||
else:
|
||||
self._add_stack_event("CREATE_IN_PROGRESS",
|
||||
resource_status_reason="User Initiated")
|
||||
|
||||
self.description = self.template_dict.get('Description')
|
||||
self.cross_stack_resources = cross_stack_resources or []
|
||||
self.cross_stack_resources = cross_stack_resources or {}
|
||||
self.resource_map = self._create_resource_map()
|
||||
self.output_map = self._create_output_map()
|
||||
self._add_stack_event("CREATE_COMPLETE")
|
||||
@ -76,9 +229,9 @@ class FakeStack(BaseModel):
|
||||
def _parse_template(self):
|
||||
yaml.add_multi_constructor('', yaml_tag_constructor)
|
||||
try:
|
||||
self.template_dict = yaml.load(self.template)
|
||||
self.template_dict = yaml.load(self.template, Loader=yaml.Loader)
|
||||
except yaml.parser.ParserError:
|
||||
self.template_dict = json.loads(self.template)
|
||||
self.template_dict = json.loads(self.template, Loader=yaml.Loader)
|
||||
|
||||
@property
|
||||
def stack_parameters(self):
|
||||
@ -99,7 +252,8 @@ class FakeStack(BaseModel):
|
||||
def update(self, template, role_arn=None, parameters=None, tags=None):
|
||||
self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated")
|
||||
self.template = template
|
||||
self.resource_map.update(json.loads(template), parameters)
|
||||
self._parse_template()
|
||||
self.resource_map.update(self.template_dict, parameters)
|
||||
self.output_map = self._create_output_map()
|
||||
self._add_stack_event("UPDATE_COMPLETE")
|
||||
self.status = "UPDATE_COMPLETE"
|
||||
@ -117,6 +271,49 @@ class FakeStack(BaseModel):
|
||||
self.status = "DELETE_COMPLETE"
|
||||
|
||||
|
||||
class FakeChange(BaseModel):
|
||||
|
||||
def __init__(self, action, logical_resource_id, resource_type):
|
||||
self.action = action
|
||||
self.logical_resource_id = logical_resource_id
|
||||
self.resource_type = resource_type
|
||||
|
||||
|
||||
class FakeChangeSet(FakeStack):
|
||||
|
||||
def __init__(self, stack_id, stack_name, stack_template, change_set_id, change_set_name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None):
|
||||
super(FakeChangeSet, self).__init__(
|
||||
stack_id,
|
||||
stack_name,
|
||||
stack_template,
|
||||
parameters,
|
||||
region_name,
|
||||
notification_arns=notification_arns,
|
||||
tags=tags,
|
||||
role_arn=role_arn,
|
||||
cross_stack_resources=cross_stack_resources,
|
||||
create_change_set=True,
|
||||
)
|
||||
self.stack_name = stack_name
|
||||
self.change_set_id = change_set_id
|
||||
self.change_set_name = change_set_name
|
||||
self.changes = self.diff(template=template, parameters=parameters)
|
||||
|
||||
def diff(self, template, parameters=None):
|
||||
self.template = template
|
||||
self._parse_template()
|
||||
changes = []
|
||||
resources_by_action = self.resource_map.diff(self.template_dict, parameters)
|
||||
for action, resources in resources_by_action.items():
|
||||
for resource_name, resource in resources.items():
|
||||
changes.append(FakeChange(
|
||||
action=action,
|
||||
logical_resource_id=resource_name,
|
||||
resource_type=resource['ResourceType'],
|
||||
))
|
||||
return changes
|
||||
|
||||
|
||||
class FakeEvent(BaseModel):
|
||||
|
||||
def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):
|
||||
@ -136,10 +333,73 @@ class CloudFormationBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
self.stacks = OrderedDict()
|
||||
self.stacksets = OrderedDict()
|
||||
self.deleted_stacks = {}
|
||||
self.exports = OrderedDict()
|
||||
self.change_sets = OrderedDict()
|
||||
|
||||
def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):
|
||||
def create_stack_set(self, name, template, parameters, tags=None, description=None, region='us-east-1', admin_role=None, execution_role=None):
|
||||
stackset_id = generate_stackset_id(name)
|
||||
new_stackset = FakeStackSet(
|
||||
stackset_id=stackset_id,
|
||||
name=name,
|
||||
template=template,
|
||||
parameters=parameters,
|
||||
description=description,
|
||||
tags=tags,
|
||||
admin_role=admin_role,
|
||||
execution_role=execution_role,
|
||||
)
|
||||
self.stacksets[stackset_id] = new_stackset
|
||||
return new_stackset
|
||||
|
||||
def get_stack_set(self, name):
|
||||
stacksets = self.stacksets.keys()
|
||||
for stackset in stacksets:
|
||||
if self.stacksets[stackset].name == name:
|
||||
return self.stacksets[stackset]
|
||||
raise ValidationError(name)
|
||||
|
||||
def delete_stack_set(self, name):
|
||||
stacksets = self.stacksets.keys()
|
||||
for stackset in stacksets:
|
||||
if self.stacksets[stackset].name == name:
|
||||
self.stacksets[stackset].delete()
|
||||
|
||||
def create_stack_instances(self, stackset_name, accounts, regions, parameters, operation_id=None):
|
||||
stackset = self.get_stack_set(stackset_name)
|
||||
|
||||
stackset.create_stack_instances(
|
||||
accounts=accounts,
|
||||
regions=regions,
|
||||
parameters=parameters,
|
||||
operation_id=operation_id,
|
||||
)
|
||||
return stackset
|
||||
|
||||
def update_stack_set(self, stackset_name, template=None, description=None,
|
||||
parameters=None, tags=None, admin_role=None, execution_role=None,
|
||||
accounts=None, regions=None, operation_id=None):
|
||||
stackset = self.get_stack_set(stackset_name)
|
||||
update = stackset.update(
|
||||
template=template,
|
||||
description=description,
|
||||
parameters=parameters,
|
||||
tags=tags,
|
||||
admin_role=admin_role,
|
||||
execution_role=execution_role,
|
||||
accounts=accounts,
|
||||
regions=regions,
|
||||
operation_id=operation_id
|
||||
)
|
||||
return update
|
||||
|
||||
def delete_stack_instances(self, stackset_name, accounts, regions, operation_id=None):
|
||||
stackset = self.get_stack_set(stackset_name)
|
||||
stackset.delete_stack_instances(accounts, regions, operation_id)
|
||||
return stackset
|
||||
|
||||
def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False):
|
||||
stack_id = generate_stack_id(name)
|
||||
new_stack = FakeStack(
|
||||
stack_id=stack_id,
|
||||
@ -151,6 +411,7 @@ class CloudFormationBackend(BaseBackend):
|
||||
tags=tags,
|
||||
role_arn=role_arn,
|
||||
cross_stack_resources=self.exports,
|
||||
create_change_set=create_change_set,
|
||||
)
|
||||
self.stacks[stack_id] = new_stack
|
||||
self._validate_export_uniqueness(new_stack)
|
||||
@ -158,6 +419,82 @@ class CloudFormationBackend(BaseBackend):
|
||||
self.exports[export.name] = export
|
||||
return new_stack
|
||||
|
||||
def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):
|
||||
stack_id = None
|
||||
stack_template = None
|
||||
if change_set_type == 'UPDATE':
|
||||
stacks = self.stacks.values()
|
||||
stack = None
|
||||
for s in stacks:
|
||||
if s.name == stack_name:
|
||||
stack = s
|
||||
stack_id = stack.stack_id
|
||||
stack_template = stack.template
|
||||
if stack is None:
|
||||
raise ValidationError(stack_name)
|
||||
else:
|
||||
stack_id = generate_stack_id(stack_name)
|
||||
stack_template = template
|
||||
|
||||
change_set_id = generate_changeset_id(change_set_name, region_name)
|
||||
new_change_set = FakeChangeSet(
|
||||
stack_id=stack_id,
|
||||
stack_name=stack_name,
|
||||
stack_template=stack_template,
|
||||
change_set_id=change_set_id,
|
||||
change_set_name=change_set_name,
|
||||
template=template,
|
||||
parameters=parameters,
|
||||
region_name=region_name,
|
||||
notification_arns=notification_arns,
|
||||
tags=tags,
|
||||
role_arn=role_arn,
|
||||
cross_stack_resources=self.exports
|
||||
)
|
||||
self.change_sets[change_set_id] = new_change_set
|
||||
self.stacks[stack_id] = new_change_set
|
||||
return change_set_id, stack_id
|
||||
|
||||
def delete_change_set(self, change_set_name, stack_name=None):
|
||||
if change_set_name in self.change_sets:
|
||||
# This means arn was passed in
|
||||
del self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
del self.change_sets[cs]
|
||||
|
||||
def describe_change_set(self, change_set_name, stack_name=None):
|
||||
change_set = None
|
||||
if change_set_name in self.change_sets:
|
||||
# This means arn was passed in
|
||||
change_set = self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
change_set = self.change_sets[cs]
|
||||
if change_set is None:
|
||||
raise ValidationError(change_set_name)
|
||||
return change_set
|
||||
|
||||
def execute_change_set(self, change_set_name, stack_name=None):
|
||||
stack = None
|
||||
if change_set_name in self.change_sets:
|
||||
# This means arn was passed in
|
||||
stack = self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
stack = self.change_sets[cs]
|
||||
if stack is None:
|
||||
raise ValidationError(stack_name)
|
||||
if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS':
|
||||
stack._add_stack_event('CREATE_COMPLETE')
|
||||
else:
|
||||
stack._add_stack_event('UPDATE_IN_PROGRESS')
|
||||
stack._add_stack_event('UPDATE_COMPLETE')
|
||||
return True
|
||||
|
||||
def describe_stacks(self, name_or_stack_id):
|
||||
stacks = self.stacks.values()
|
||||
if name_or_stack_id:
|
||||
@ -173,8 +510,15 @@ class CloudFormationBackend(BaseBackend):
|
||||
else:
|
||||
return list(stacks)
|
||||
|
||||
def list_change_sets(self):
|
||||
return self.change_sets.values()
|
||||
|
||||
def list_stacks(self):
|
||||
return self.stacks.values()
|
||||
return [
|
||||
v for v in self.stacks.values()
|
||||
] + [
|
||||
v for v in self.deleted_stacks.values()
|
||||
]
|
||||
|
||||
def get_stack(self, name_or_stack_id):
|
||||
all_stacks = dict(self.deleted_stacks, **self.stacks)
|
||||
@ -221,6 +565,9 @@ class CloudFormationBackend(BaseBackend):
|
||||
next_token = str(token + 100) if len(all_exports) > token + 100 else None
|
||||
return exports, next_token
|
||||
|
||||
def validate_template(self, template):
|
||||
return validate_template_cfn_lint(template)
|
||||
|
||||
def _validate_export_uniqueness(self, stack):
|
||||
new_stack_export_names = [x.name for x in stack.exports]
|
||||
export_names = self.exports.keys()
|
||||
|
||||
@ -10,8 +10,9 @@ from moto.autoscaling import models as autoscaling_models
|
||||
from moto.awslambda import models as lambda_models
|
||||
from moto.batch import models as batch_models
|
||||
from moto.cloudwatch import models as cloudwatch_models
|
||||
from moto.cognitoidentity import models as cognitoidentity_models
|
||||
from moto.datapipeline import models as datapipeline_models
|
||||
from moto.dynamodb import models as dynamodb_models
|
||||
from moto.dynamodb2 import models as dynamodb2_models
|
||||
from moto.ec2 import models as ec2_models
|
||||
from moto.ecs import models as ecs_models
|
||||
from moto.elb import models as elb_models
|
||||
@ -27,7 +28,7 @@ from moto.s3 import models as s3_models
|
||||
from moto.sns import models as sns_models
|
||||
from moto.sqs import models as sqs_models
|
||||
from .utils import random_suffix
|
||||
from .exceptions import MissingParameterError, UnformattedGetAttTemplateException, ValidationError
|
||||
from .exceptions import ExportNotFound, MissingParameterError, UnformattedGetAttTemplateException, ValidationError
|
||||
from boto.cloudformation.stack import Output
|
||||
|
||||
MODEL_MAP = {
|
||||
@ -36,7 +37,7 @@ MODEL_MAP = {
|
||||
"AWS::Batch::JobDefinition": batch_models.JobDefinition,
|
||||
"AWS::Batch::JobQueue": batch_models.JobQueue,
|
||||
"AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment,
|
||||
"AWS::DynamoDB::Table": dynamodb_models.Table,
|
||||
"AWS::DynamoDB::Table": dynamodb2_models.Table,
|
||||
"AWS::Kinesis::Stream": kinesis_models.Stream,
|
||||
"AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping,
|
||||
"AWS::Lambda::Function": lambda_models.LambdaFunction,
|
||||
@ -65,6 +66,7 @@ MODEL_MAP = {
|
||||
"AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer,
|
||||
"AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup,
|
||||
"AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener,
|
||||
"AWS::Cognito::IdentityPool": cognitoidentity_models.CognitoIdentity,
|
||||
"AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline,
|
||||
"AWS::IAM::InstanceProfile": iam_models.InstanceProfile,
|
||||
"AWS::IAM::Role": iam_models.Role,
|
||||
@ -94,6 +96,7 @@ NAME_TYPE_MAP = {
|
||||
"AWS::ElasticBeanstalk::Application": "ApplicationName",
|
||||
"AWS::ElasticBeanstalk::Environment": "EnvironmentName",
|
||||
"AWS::ElasticLoadBalancing::LoadBalancer": "LoadBalancerName",
|
||||
"AWS::ElasticLoadBalancingV2::TargetGroup": "Name",
|
||||
"AWS::RDS::DBInstance": "DBInstanceIdentifier",
|
||||
"AWS::S3::Bucket": "BucketName",
|
||||
"AWS::SNS::Topic": "TopicName",
|
||||
@ -106,6 +109,8 @@ NULL_MODELS = [
|
||||
"AWS::CloudFormation::WaitConditionHandle",
|
||||
]
|
||||
|
||||
DEFAULT_REGION = 'us-east-1'
|
||||
|
||||
logger = logging.getLogger("moto")
|
||||
|
||||
|
||||
@ -202,6 +207,16 @@ def clean_json(resource_json, resources_map):
|
||||
values = [x.value for x in resources_map.cross_stack_resources.values() if x.name == cleaned_val]
|
||||
if any(values):
|
||||
return values[0]
|
||||
else:
|
||||
raise ExportNotFound(cleaned_val)
|
||||
|
||||
if 'Fn::GetAZs' in resource_json:
|
||||
region = resource_json.get('Fn::GetAZs') or DEFAULT_REGION
|
||||
result = []
|
||||
# TODO: make this configurable, to reflect the real AWS AZs
|
||||
for az in ('a', 'b', 'c', 'd'):
|
||||
result.append('%s%s' % (region, az))
|
||||
return result
|
||||
|
||||
cleaned_json = {}
|
||||
for key, value in resource_json.items():
|
||||
@ -230,6 +245,23 @@ def resource_name_property_from_type(resource_type):
|
||||
return NAME_TYPE_MAP.get(resource_type)
|
||||
|
||||
|
||||
def generate_resource_name(resource_type, stack_name, logical_id):
|
||||
if resource_type in ["AWS::ElasticLoadBalancingV2::TargetGroup",
|
||||
"AWS::ElasticLoadBalancingV2::LoadBalancer"]:
|
||||
# Target group names need to be less than 32 characters, so when cloudformation creates a name for you
|
||||
# it makes sure to stay under that limit
|
||||
name_prefix = '{0}-{1}'.format(stack_name, logical_id)
|
||||
my_random_suffix = random_suffix()
|
||||
truncated_name_prefix = name_prefix[0:32 - (len(my_random_suffix) + 1)]
|
||||
# if the truncated name ends in a dash, we'll end up with a double dash in the final name, which is
|
||||
# not allowed
|
||||
if truncated_name_prefix.endswith('-'):
|
||||
truncated_name_prefix = truncated_name_prefix[:-1]
|
||||
return '{0}-{1}'.format(truncated_name_prefix, my_random_suffix)
|
||||
else:
|
||||
return '{0}-{1}-{2}'.format(stack_name, logical_id, random_suffix())
|
||||
|
||||
|
||||
def parse_resource(logical_id, resource_json, resources_map):
|
||||
resource_type = resource_json['Type']
|
||||
resource_class = resource_class_from_type(resource_type)
|
||||
@ -244,15 +276,12 @@ def parse_resource(logical_id, resource_json, resources_map):
|
||||
if 'Properties' not in resource_json:
|
||||
resource_json['Properties'] = dict()
|
||||
if resource_name_property not in resource_json['Properties']:
|
||||
resource_json['Properties'][resource_name_property] = '{0}-{1}-{2}'.format(
|
||||
resources_map.get('AWS::StackName'),
|
||||
logical_id,
|
||||
random_suffix())
|
||||
resource_json['Properties'][resource_name_property] = generate_resource_name(
|
||||
resource_type, resources_map.get('AWS::StackName'), logical_id)
|
||||
resource_name = resource_json['Properties'][resource_name_property]
|
||||
else:
|
||||
resource_name = '{0}-{1}-{2}'.format(resources_map.get('AWS::StackName'),
|
||||
logical_id,
|
||||
random_suffix())
|
||||
resource_name = generate_resource_name(resource_type, resources_map.get('AWS::StackName'), logical_id)
|
||||
|
||||
return resource_class, resource_json, resource_name
|
||||
|
||||
|
||||
@ -357,7 +386,9 @@ class ResourceMap(collections.Mapping):
|
||||
"AWS::Region": self._region_name,
|
||||
"AWS::StackId": stack_id,
|
||||
"AWS::StackName": stack_name,
|
||||
"AWS::URLSuffix": "amazonaws.com",
|
||||
"AWS::NoValue": None,
|
||||
"AWS::Partition": "aws",
|
||||
}
|
||||
|
||||
def __getitem__(self, key):
|
||||
@ -395,11 +426,18 @@ class ResourceMap(collections.Mapping):
|
||||
self.resolved_parameters[parameter_name] = parameter.get('Default')
|
||||
|
||||
# Set any input parameters that were passed
|
||||
self.no_echo_parameter_keys = []
|
||||
for key, value in self.input_parameters.items():
|
||||
if key in self.resolved_parameters:
|
||||
value_type = parameter_slots[key].get('Type', 'String')
|
||||
parameter_slot = parameter_slots[key]
|
||||
|
||||
value_type = parameter_slot.get('Type', 'String')
|
||||
if value_type == 'CommaDelimitedList' or value_type.startswith("List"):
|
||||
value = value.split(',')
|
||||
|
||||
if parameter_slot.get('NoEcho'):
|
||||
self.no_echo_parameter_keys.append(key)
|
||||
|
||||
self.resolved_parameters[key] = value
|
||||
|
||||
# Check if there are any non-default params that were not passed input
|
||||
@ -435,36 +473,70 @@ class ResourceMap(collections.Mapping):
|
||||
ec2_models.ec2_backends[self._region_name].create_tags(
|
||||
[self[resource].physical_resource_id], self.tags)
|
||||
|
||||
def update(self, template, parameters=None):
|
||||
def diff(self, template, parameters=None):
|
||||
if parameters:
|
||||
self.input_parameters = parameters
|
||||
self.load_mapping()
|
||||
self.load_parameters()
|
||||
self.load_conditions()
|
||||
|
||||
old_template = self._resource_json_map
|
||||
new_template = template['Resources']
|
||||
|
||||
resource_names_by_action = {
|
||||
'Add': set(new_template) - set(old_template),
|
||||
'Modify': set(name for name in new_template if name in old_template and new_template[
|
||||
name] != old_template[name]),
|
||||
'Remove': set(old_template) - set(new_template)
|
||||
}
|
||||
resources_by_action = {
|
||||
'Add': {},
|
||||
'Modify': {},
|
||||
'Remove': {},
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Add']:
|
||||
resources_by_action['Add'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': new_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Modify']:
|
||||
resources_by_action['Modify'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': new_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Remove']:
|
||||
resources_by_action['Remove'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': old_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
return resources_by_action
|
||||
|
||||
def update(self, template, parameters=None):
|
||||
resources_by_action = self.diff(template, parameters)
|
||||
|
||||
old_template = self._resource_json_map
|
||||
new_template = template['Resources']
|
||||
self._resource_json_map = new_template
|
||||
|
||||
new_resource_names = set(new_template) - set(old_template)
|
||||
for resource_name in new_resource_names:
|
||||
for resource_name, resource in resources_by_action['Add'].items():
|
||||
resource_json = new_template[resource_name]
|
||||
new_resource = parse_and_create_resource(
|
||||
resource_name, resource_json, self, self._region_name)
|
||||
self._parsed_resources[resource_name] = new_resource
|
||||
|
||||
removed_resource_nams = set(old_template) - set(new_template)
|
||||
for resource_name in removed_resource_nams:
|
||||
for resource_name, resource in resources_by_action['Remove'].items():
|
||||
resource_json = old_template[resource_name]
|
||||
parse_and_delete_resource(
|
||||
resource_name, resource_json, self, self._region_name)
|
||||
self._parsed_resources.pop(resource_name)
|
||||
|
||||
resources_to_update = set(name for name in new_template if name in old_template and new_template[
|
||||
name] != old_template[name])
|
||||
tries = 1
|
||||
while resources_to_update and tries < 5:
|
||||
for resource_name in resources_to_update.copy():
|
||||
while resources_by_action['Modify'] and tries < 5:
|
||||
for resource_name, resource in resources_by_action['Modify'].copy().items():
|
||||
resource_json = new_template[resource_name]
|
||||
try:
|
||||
changed_resource = parse_and_update_resource(
|
||||
@ -475,7 +547,7 @@ class ResourceMap(collections.Mapping):
|
||||
last_exception = e
|
||||
else:
|
||||
self._parsed_resources[resource_name] = changed_resource
|
||||
resources_to_update.remove(resource_name)
|
||||
del resources_by_action['Modify'][resource_name]
|
||||
tries += 1
|
||||
if tries == 5:
|
||||
raise last_exception
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import yaml
|
||||
from six.moves.urllib.parse import urlparse
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from moto.core.utils import amzn_request_id
|
||||
from moto.s3 import s3_backend
|
||||
from .models import cloudformation_backends
|
||||
from .exceptions import ValidationError
|
||||
@ -77,6 +79,90 @@ class CloudFormationResponse(BaseResponse):
|
||||
template = self.response_template(CREATE_STACK_RESPONSE_TEMPLATE)
|
||||
return template.render(stack=stack)
|
||||
|
||||
@amzn_request_id
|
||||
def create_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
change_set_name = self._get_param('ChangeSetName')
|
||||
stack_body = self._get_param('TemplateBody')
|
||||
template_url = self._get_param('TemplateURL')
|
||||
role_arn = self._get_param('RoleARN')
|
||||
update_or_create = self._get_param('ChangeSetType', 'CREATE')
|
||||
parameters_list = self._get_list_prefix("Parameters.member")
|
||||
tags = dict((item['key'], item['value'])
|
||||
for item in self._get_list_prefix("Tags.member"))
|
||||
parameters = {param['parameter_key']: param['parameter_value']
|
||||
for param in parameters_list}
|
||||
if template_url:
|
||||
stack_body = self._get_stack_from_s3_url(template_url)
|
||||
stack_notification_arns = self._get_multi_param(
|
||||
'NotificationARNs.member')
|
||||
change_set_id, stack_id = self.cloudformation_backend.create_change_set(
|
||||
stack_name=stack_name,
|
||||
change_set_name=change_set_name,
|
||||
template=stack_body,
|
||||
parameters=parameters,
|
||||
region_name=self.region,
|
||||
notification_arns=stack_notification_arns,
|
||||
tags=tags,
|
||||
role_arn=role_arn,
|
||||
change_set_type=update_or_create,
|
||||
)
|
||||
if self.request_json:
|
||||
return json.dumps({
|
||||
'CreateChangeSetResponse': {
|
||||
'CreateChangeSetResult': {
|
||||
'Id': change_set_id,
|
||||
'StackId': stack_id,
|
||||
}
|
||||
}
|
||||
})
|
||||
else:
|
||||
template = self.response_template(CREATE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(stack_id=stack_id, change_set_id=change_set_id)
|
||||
|
||||
def delete_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
change_set_name = self._get_param('ChangeSetName')
|
||||
|
||||
self.cloudformation_backend.delete_change_set(change_set_name=change_set_name, stack_name=stack_name)
|
||||
if self.request_json:
|
||||
return json.dumps({
|
||||
'DeleteChangeSetResponse': {
|
||||
'DeleteChangeSetResult': {},
|
||||
}
|
||||
})
|
||||
else:
|
||||
template = self.response_template(DELETE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def describe_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
change_set_name = self._get_param('ChangeSetName')
|
||||
change_set = self.cloudformation_backend.describe_change_set(
|
||||
change_set_name=change_set_name,
|
||||
stack_name=stack_name,
|
||||
)
|
||||
template = self.response_template(DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(change_set=change_set)
|
||||
|
||||
@amzn_request_id
|
||||
def execute_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
change_set_name = self._get_param('ChangeSetName')
|
||||
self.cloudformation_backend.execute_change_set(
|
||||
stack_name=stack_name,
|
||||
change_set_name=change_set_name,
|
||||
)
|
||||
if self.request_json:
|
||||
return json.dumps({
|
||||
'ExecuteChangeSetResponse': {
|
||||
'ExecuteChangeSetResult': {},
|
||||
}
|
||||
})
|
||||
else:
|
||||
template = self.response_template(EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def describe_stacks(self):
|
||||
stack_name_or_id = None
|
||||
if self._get_param('StackName'):
|
||||
@ -126,6 +212,11 @@ class CloudFormationResponse(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_STACK_EVENTS_RESPONSE)
|
||||
return template.render(stack=stack)
|
||||
|
||||
def list_change_sets(self):
|
||||
change_sets = self.cloudformation_backend.list_change_sets()
|
||||
template = self.response_template(LIST_CHANGE_SETS_RESPONSE)
|
||||
return template.render(change_sets=change_sets)
|
||||
|
||||
def list_stacks(self):
|
||||
stacks = self.cloudformation_backend.list_stacks()
|
||||
template = self.response_template(LIST_STACKS_RESPONSE)
|
||||
@ -161,16 +252,26 @@ class CloudFormationResponse(BaseResponse):
|
||||
def update_stack(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
role_arn = self._get_param('RoleARN')
|
||||
template_url = self._get_param('TemplateURL')
|
||||
stack_body = self._get_param('TemplateBody')
|
||||
stack = self.cloudformation_backend.get_stack(stack_name)
|
||||
if self._get_param('UsePreviousTemplate') == "true":
|
||||
stack_body = self.cloudformation_backend.get_stack(
|
||||
stack_name).template
|
||||
else:
|
||||
stack_body = self._get_param('TemplateBody')
|
||||
stack_body = stack.template
|
||||
elif not stack_body and template_url:
|
||||
stack_body = self._get_stack_from_s3_url(template_url)
|
||||
|
||||
incoming_params = self._get_list_prefix("Parameters.member")
|
||||
parameters = dict([
|
||||
(parameter['parameter_key'], parameter['parameter_value'])
|
||||
for parameter
|
||||
in self._get_list_prefix("Parameters.member")
|
||||
in incoming_params if 'parameter_value' in parameter
|
||||
])
|
||||
previous = dict([
|
||||
(parameter['parameter_key'], stack.parameters[parameter['parameter_key']])
|
||||
for parameter
|
||||
in incoming_params if 'use_previous_value' in parameter
|
||||
])
|
||||
parameters.update(previous)
|
||||
# boto3 is supposed to let you clear the tags by passing an empty value, but the request body doesn't
|
||||
# end up containing anything we can use to differentiate between passing an empty value versus not
|
||||
# passing anything. so until that changes, moto won't be able to clear tags, only update them.
|
||||
@ -225,6 +326,201 @@ class CloudFormationResponse(BaseResponse):
|
||||
template = self.response_template(LIST_EXPORTS_RESPONSE)
|
||||
return template.render(exports=exports, next_token=next_token)
|
||||
|
||||
def validate_template(self):
|
||||
cfn_lint = self.cloudformation_backend.validate_template(self._get_param('TemplateBody'))
|
||||
if cfn_lint:
|
||||
raise ValidationError(cfn_lint[0].message)
|
||||
description = ""
|
||||
try:
|
||||
description = json.loads(self._get_param('TemplateBody'))['Description']
|
||||
except (ValueError, KeyError):
|
||||
pass
|
||||
try:
|
||||
description = yaml.load(self._get_param('TemplateBody'))['Description']
|
||||
except (yaml.ParserError, KeyError):
|
||||
pass
|
||||
template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE)
|
||||
return template.render(description=description)
|
||||
|
||||
def create_stack_set(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
stack_body = self._get_param('TemplateBody')
|
||||
template_url = self._get_param('TemplateURL')
|
||||
# role_arn = self._get_param('RoleARN')
|
||||
parameters_list = self._get_list_prefix("Parameters.member")
|
||||
tags = dict((item['key'], item['value'])
|
||||
for item in self._get_list_prefix("Tags.member"))
|
||||
|
||||
# Copy-Pasta - Hack dict-comprehension
|
||||
parameters = dict([
|
||||
(parameter['parameter_key'], parameter['parameter_value'])
|
||||
for parameter
|
||||
in parameters_list
|
||||
])
|
||||
if template_url:
|
||||
stack_body = self._get_stack_from_s3_url(template_url)
|
||||
|
||||
stackset = self.cloudformation_backend.create_stack_set(
|
||||
name=stackset_name,
|
||||
template=stack_body,
|
||||
parameters=parameters,
|
||||
tags=tags,
|
||||
# role_arn=role_arn,
|
||||
)
|
||||
if self.request_json:
|
||||
return json.dumps({
|
||||
'CreateStackSetResponse': {
|
||||
'CreateStackSetResult': {
|
||||
'StackSetId': stackset.stackset_id,
|
||||
}
|
||||
}
|
||||
})
|
||||
else:
|
||||
template = self.response_template(CREATE_STACK_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(stackset=stackset)
|
||||
|
||||
def create_stack_instances(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
accounts = self._get_multi_param('Accounts.member')
|
||||
regions = self._get_multi_param('Regions.member')
|
||||
parameters = self._get_multi_param('ParameterOverrides.member')
|
||||
self.cloudformation_backend.create_stack_instances(stackset_name, accounts, regions, parameters)
|
||||
template = self.response_template(CREATE_STACK_INSTANCES_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def delete_stack_set(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
self.cloudformation_backend.delete_stack_set(stackset_name)
|
||||
template = self.response_template(DELETE_STACK_SET_RESPONSE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def delete_stack_instances(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
accounts = self._get_multi_param('Accounts.member')
|
||||
regions = self._get_multi_param('Regions.member')
|
||||
operation = self.cloudformation_backend.delete_stack_instances(stackset_name, accounts, regions)
|
||||
|
||||
template = self.response_template(DELETE_STACK_INSTANCES_TEMPLATE)
|
||||
return template.render(operation=operation)
|
||||
|
||||
def describe_stack_set(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
|
||||
if not stackset.admin_role:
|
||||
stackset.admin_role = 'arn:aws:iam::123456789012:role/AWSCloudFormationStackSetAdministrationRole'
|
||||
if not stackset.execution_role:
|
||||
stackset.execution_role = 'AWSCloudFormationStackSetExecutionRole'
|
||||
|
||||
template = self.response_template(DESCRIBE_STACK_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(stackset=stackset)
|
||||
|
||||
def describe_stack_instance(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
account = self._get_param('StackInstanceAccount')
|
||||
region = self._get_param('StackInstanceRegion')
|
||||
|
||||
instance = self.cloudformation_backend.get_stack_set(stackset_name).instances.get_instance(account, region)
|
||||
template = self.response_template(DESCRIBE_STACK_INSTANCE_TEMPLATE)
|
||||
rendered = template.render(instance=instance)
|
||||
return rendered
|
||||
|
||||
def list_stack_sets(self):
|
||||
stacksets = self.cloudformation_backend.stacksets
|
||||
template = self.response_template(LIST_STACK_SETS_TEMPLATE)
|
||||
return template.render(stacksets=stacksets)
|
||||
|
||||
def list_stack_instances(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
template = self.response_template(LIST_STACK_INSTANCES_TEMPLATE)
|
||||
return template.render(stackset=stackset)
|
||||
|
||||
def list_stack_set_operations(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
template = self.response_template(LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE)
|
||||
return template.render(stackset=stackset)
|
||||
|
||||
def stop_stack_set_operation(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
operation_id = self._get_param('OperationId')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
stackset.update_operation(operation_id, 'STOPPED')
|
||||
template = self.response_template(STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def describe_stack_set_operation(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
operation_id = self._get_param('OperationId')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
operation = stackset.get_operation(operation_id)
|
||||
template = self.response_template(DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE)
|
||||
return template.render(stackset=stackset, operation=operation)
|
||||
|
||||
def list_stack_set_operation_results(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
operation_id = self._get_param('OperationId')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
operation = stackset.get_operation(operation_id)
|
||||
template = self.response_template(LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE)
|
||||
return template.render(operation=operation)
|
||||
|
||||
def update_stack_set(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
operation_id = self._get_param('OperationId')
|
||||
description = self._get_param('Description')
|
||||
execution_role = self._get_param('ExecutionRoleName')
|
||||
admin_role = self._get_param('AdministrationRoleARN')
|
||||
accounts = self._get_multi_param('Accounts.member')
|
||||
regions = self._get_multi_param('Regions.member')
|
||||
template_body = self._get_param('TemplateBody')
|
||||
template_url = self._get_param('TemplateURL')
|
||||
if template_url:
|
||||
template_body = self._get_stack_from_s3_url(template_url)
|
||||
tags = dict((item['key'], item['value'])
|
||||
for item in self._get_list_prefix("Tags.member"))
|
||||
parameters_list = self._get_list_prefix("Parameters.member")
|
||||
parameters = dict([
|
||||
(parameter['parameter_key'], parameter['parameter_value'])
|
||||
for parameter
|
||||
in parameters_list
|
||||
])
|
||||
operation = self.cloudformation_backend.update_stack_set(
|
||||
stackset_name=stackset_name,
|
||||
template=template_body,
|
||||
description=description,
|
||||
parameters=parameters,
|
||||
tags=tags,
|
||||
admin_role=admin_role,
|
||||
execution_role=execution_role,
|
||||
accounts=accounts,
|
||||
regions=regions,
|
||||
operation_id=operation_id
|
||||
)
|
||||
|
||||
template = self.response_template(UPDATE_STACK_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(operation=operation)
|
||||
|
||||
def update_stack_instances(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
accounts = self._get_multi_param('Accounts.member')
|
||||
regions = self._get_multi_param('Regions.member')
|
||||
parameters = self._get_multi_param('ParameterOverrides.member')
|
||||
operation = self.cloudformation_backend.get_stack_set(stackset_name).update_instances(accounts, regions, parameters)
|
||||
template = self.response_template(UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE)
|
||||
return template.render(operation=operation)
|
||||
|
||||
|
||||
VALIDATE_STACK_RESPONSE_TEMPLATE = """<ValidateTemplateResponse>
|
||||
<ValidateTemplateResult>
|
||||
<Capabilities></Capabilities>
|
||||
<CapabilitiesReason></CapabilitiesReason>
|
||||
<DeclaredTransforms></DeclaredTransforms>
|
||||
<Description>{{ description }}</Description>
|
||||
<Parameters></Parameters>
|
||||
</ValidateTemplateResult>
|
||||
</ValidateTemplateResponse>"""
|
||||
|
||||
CREATE_STACK_RESPONSE_TEMPLATE = """<CreateStackResponse>
|
||||
<CreateStackResult>
|
||||
@ -246,6 +542,87 @@ UPDATE_STACK_RESPONSE_TEMPLATE = """<UpdateStackResponse xmlns="http://cloudform
|
||||
</UpdateStackResponse>
|
||||
"""
|
||||
|
||||
CREATE_CHANGE_SET_RESPONSE_TEMPLATE = """<CreateStackResponse>
|
||||
<CreateChangeSetResult>
|
||||
<Id>{{change_set_id}}</Id>
|
||||
<StackId>{{ stack_id }}</StackId>
|
||||
</CreateChangeSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateStackResponse>
|
||||
"""
|
||||
|
||||
DELETE_CHANGE_SET_RESPONSE_TEMPLATE = """<DeleteChangeSetResponse>
|
||||
<DeleteChangeSetResult>
|
||||
</DeleteChangeSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>3d3200a1-810e-3023-6cc3-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteChangeSetResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE = """<DescribeChangeSetResponse>
|
||||
<DescribeChangeSetResult>
|
||||
<ChangeSetId>{{ change_set.change_set_id }}</ChangeSetId>
|
||||
<ChangeSetName>{{ change_set.change_set_name }}</ChangeSetName>
|
||||
<StackId>{{ change_set.stack_id }}</StackId>
|
||||
<StackName>{{ change_set.stack_name }}</StackName>
|
||||
<Description>{{ change_set.description }}</Description>
|
||||
<Parameters>
|
||||
{% for param_name, param_value in change_set.stack_parameters.items() %}
|
||||
<member>
|
||||
<ParameterKey>{{ param_name }}</ParameterKey>
|
||||
<ParameterValue>{{ param_value }}</ParameterValue>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Parameters>
|
||||
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
|
||||
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
|
||||
<Status>{{ change_set.status }}</Status>
|
||||
<StatusReason>{{ change_set.status_reason }}</StatusReason>
|
||||
{% if change_set.notification_arns %}
|
||||
<NotificationARNs>
|
||||
{% for notification_arn in change_set.notification_arns %}
|
||||
<member>{{ notification_arn }}</member>
|
||||
{% endfor %}
|
||||
</NotificationARNs>
|
||||
{% else %}
|
||||
<NotificationARNs/>
|
||||
{% endif %}
|
||||
{% if change_set.role_arn %}
|
||||
<RoleARN>{{ change_set.role_arn }}</RoleARN>
|
||||
{% endif %}
|
||||
{% if change_set.changes %}
|
||||
<Changes>
|
||||
{% for change in change_set.changes %}
|
||||
<member>
|
||||
<Type>Resource</Type>
|
||||
<ResourceChange>
|
||||
<Action>{{ change.action }}</Action>
|
||||
<LogicalResourceId>{{ change.logical_resource_id }}</LogicalResourceId>
|
||||
<ResourceType>{{ change.resource_type }}</ResourceType>
|
||||
</ResourceChange>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Changes>
|
||||
{% endif %}
|
||||
{% if next_token %}
|
||||
<NextToken>{{ next_token }}</NextToken>
|
||||
{% endif %}
|
||||
</DescribeChangeSetResult>
|
||||
</DescribeChangeSetResponse>"""
|
||||
|
||||
EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE = """<ExecuteChangeSetResponse>
|
||||
<ExecuteChangeSetResult>
|
||||
<ExecuteChangeSetResult/>
|
||||
</ExecuteChangeSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ExecuteChangeSetResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_STACKS_TEMPLATE = """<DescribeStacksResponse>
|
||||
<DescribeStacksResult>
|
||||
<Stacks>
|
||||
@ -277,7 +654,11 @@ DESCRIBE_STACKS_TEMPLATE = """<DescribeStacksResponse>
|
||||
{% for param_name, param_value in stack.stack_parameters.items() %}
|
||||
<member>
|
||||
<ParameterKey>{{ param_name }}</ParameterKey>
|
||||
<ParameterValue>{{ param_value }}</ParameterValue>
|
||||
{% if param_name in stack.resource_map.no_echo_parameter_keys %}
|
||||
<ParameterValue>****</ParameterValue>
|
||||
{% else %}
|
||||
<ParameterValue>{{ param_value }}</ParameterValue>
|
||||
{% endif %}
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Parameters>
|
||||
@ -361,6 +742,27 @@ DESCRIBE_STACK_EVENTS_RESPONSE = """<DescribeStackEventsResponse xmlns="http://c
|
||||
</DescribeStackEventsResponse>"""
|
||||
|
||||
|
||||
LIST_CHANGE_SETS_RESPONSE = """<ListChangeSetsResponse>
|
||||
<ListChangeSetsResult>
|
||||
<Summaries>
|
||||
{% for change_set in change_sets %}
|
||||
<member>
|
||||
<StackId>{{ change_set.stack_id }}</StackId>
|
||||
<StackName>{{ change_set.stack_name }}</StackName>
|
||||
<ChangeSetId>{{ change_set.change_set_id }}</ChangeSetId>
|
||||
<ChangeSetName>{{ change_set.change_set_name }}</ChangeSetName>
|
||||
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
|
||||
<Status>{{ change_set.status }}</Status>
|
||||
<StatusReason>{{ change_set.status_reason }}</StatusReason>
|
||||
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
|
||||
<Description>{{ change_set.description }}</Description>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListChangeSetsResult>
|
||||
</ListChangeSetsResponse>"""
|
||||
|
||||
|
||||
LIST_STACKS_RESPONSE = """<ListStacksResponse>
|
||||
<ListStacksResult>
|
||||
<StackSummaries>
|
||||
@ -435,3 +837,236 @@ LIST_EXPORTS_RESPONSE = """<ListExportsResponse xmlns="http://cloudformation.ama
|
||||
<RequestId>5ccc7dcd-744c-11e5-be70-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListExportsResponse>"""
|
||||
|
||||
CREATE_STACK_SET_RESPONSE_TEMPLATE = """<CreateStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<CreateStackSetResult>
|
||||
<StackSetId>{{ stackset.stackset_id }}</StackSetId>
|
||||
</CreateStackSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>f457258c-391d-41d1-861f-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateStackSetResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_STACK_SET_RESPONSE_TEMPLATE = """<DescribeStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DescribeStackSetResult>
|
||||
<StackSet>
|
||||
<Capabilities/>
|
||||
<StackSetARN>{{ stackset.arn }}</StackSetARN>
|
||||
<ExecutionRoleName>{{ stackset.execution_role }}</ExecutionRoleName>
|
||||
<AdministrationRoleARN>{{ stackset.admin_role }}</AdministrationRoleARN>
|
||||
<StackSetId>{{ stackset.id }}</StackSetId>
|
||||
<TemplateBody>{{ stackset.template }}</TemplateBody>
|
||||
<StackSetName>{{ stackset.name }}</StackSetName>
|
||||
<Parameters>
|
||||
{% for param_name, param_value in stackset.parameters.items() %}
|
||||
<member>
|
||||
<ParameterKey>{{ param_name }}</ParameterKey>
|
||||
<ParameterValue>{{ param_value }}</ParameterValue>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Parameters>
|
||||
<Tags>
|
||||
{% for tag_key, tag_value in stackset.tags.items() %}
|
||||
<member>
|
||||
<Key>{{ tag_key }}</Key>
|
||||
<Value>{{ tag_value }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
<Status>{{ stackset.status }}</Status>
|
||||
</StackSet>
|
||||
</DescribeStackSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>d8b64e11-5332-46e1-9603-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeStackSetResponse>"""
|
||||
|
||||
DELETE_STACK_SET_RESPONSE_TEMPLATE = """<DeleteStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DeleteStackSetResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>c35ec2d0-d69f-4c4d-9bd7-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteStackSetResponse>"""
|
||||
|
||||
CREATE_STACK_INSTANCES_TEMPLATE = """<CreateStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<CreateStackInstancesResult>
|
||||
<OperationId>1459ad6d-63cc-4c96-a73e-example</OperationId>
|
||||
</CreateStackInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>6b29f7e3-69be-4d32-b374-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateStackInstancesResponse>
|
||||
"""
|
||||
|
||||
LIST_STACK_INSTANCES_TEMPLATE = """<ListStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<ListStackInstancesResult>
|
||||
<Summaries>
|
||||
{% for instance in stackset.stack_instances %}
|
||||
<member>
|
||||
<StackId>{{ instance.StackId }}</StackId>
|
||||
<StackSetId>{{ instance.StackSetId }}</StackSetId>
|
||||
<Region>{{ instance.Region }}</Region>
|
||||
<Account>{{ instance.Account }}</Account>
|
||||
<Status>{{ instance.Status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListStackInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>83c27e73-b498-410f-993c-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListStackInstancesResponse>
|
||||
"""
|
||||
|
||||
DELETE_STACK_INSTANCES_TEMPLATE = """<DeleteStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DeleteStackInstancesResult>
|
||||
<OperationId>{{ operation.OperationId }}</OperationId>
|
||||
</DeleteStackInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>e5325090-66f6-4ecd-a531-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteStackInstancesResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_STACK_INSTANCE_TEMPLATE = """<DescribeStackInstanceResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DescribeStackInstanceResult>
|
||||
<StackInstance>
|
||||
<StackId>{{ instance.StackId }}</StackId>
|
||||
<StackSetId>{{ instance.StackSetId }}</StackSetId>
|
||||
{% if instance.ParameterOverrides %}
|
||||
<ParameterOverrides>
|
||||
{% for override in instance.ParameterOverrides %}
|
||||
{% if override['ParameterKey'] or override['ParameterValue'] %}
|
||||
<member>
|
||||
<ParameterKey>{{ override.ParameterKey }}</ParameterKey>
|
||||
<UsePreviousValue>false</UsePreviousValue>
|
||||
<ParameterValue>{{ override.ParameterValue }}</ParameterValue>
|
||||
</member>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ParameterOverrides>
|
||||
{% else %}
|
||||
<ParameterOverrides/>
|
||||
{% endif %}
|
||||
<Region>{{ instance.Region }}</Region>
|
||||
<Account>{{ instance.Account }}</Account>
|
||||
<Status>{{ instance.Status }}</Status>
|
||||
</StackInstance>
|
||||
</DescribeStackInstanceResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>c6c7be10-0343-4319-8a25-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeStackInstanceResponse>
|
||||
"""
|
||||
|
||||
LIST_STACK_SETS_TEMPLATE = """<ListStackSetsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<ListStackSetsResult>
|
||||
<Summaries>
|
||||
{% for key, value in stacksets.items() %}
|
||||
<member>
|
||||
<StackSetName>{{ value.name }}</StackSetName>
|
||||
<StackSetId>{{ value.id }}</StackSetId>
|
||||
<Status>{{ value.status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListStackSetsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>4dcacb73-841e-4ed8-b335-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListStackSetsResponse>
|
||||
"""
|
||||
|
||||
UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE = """<UpdateStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<UpdateStackInstancesResult>
|
||||
<OperationId>{{ operation }}</OperationId>
|
||||
</UpdateStackInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>bdbf8e94-19b6-4ce4-af85-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateStackInstancesResponse>
|
||||
"""
|
||||
|
||||
UPDATE_STACK_SET_RESPONSE_TEMPLATE = """<UpdateStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<UpdateStackSetResult>
|
||||
<OperationId>{{ operation.OperationId }}</OperationId>
|
||||
</UpdateStackSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>adac907b-17e3-43e6-a254-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateStackSetResponse>
|
||||
"""
|
||||
|
||||
LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE = """<ListStackSetOperationsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<ListStackSetOperationsResult>
|
||||
<Summaries>
|
||||
{% for operation in stackset.operations %}
|
||||
<member>
|
||||
<CreationTimestamp>{{ operation.CreationTimestamp }}</CreationTimestamp>
|
||||
<OperationId>{{ operation.OperationId }}</OperationId>
|
||||
<Action>{{ operation.Action }}</Action>
|
||||
<EndTimestamp>{{ operation.EndTimestamp }}</EndTimestamp>
|
||||
<Status>{{ operation.Status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListStackSetOperationsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>65b9d9be-08bb-4a43-9a21-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListStackSetOperationsResponse>
|
||||
"""
|
||||
|
||||
STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE = """<StopStackSetOperationResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<StopStackSetOperationResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>2188554a-07c6-4396-b2c5-example</RequestId>
|
||||
</ResponseMetadata> </StopStackSetOperationResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE = """<DescribeStackSetOperationResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DescribeStackSetOperationResult>
|
||||
<StackSetOperation>
|
||||
<ExecutionRoleName>{{ stackset.execution_role }}</ExecutionRoleName>
|
||||
<AdministrationRoleARN>arn:aws:iam::123456789012:role/{{ stackset.admin_role }}</AdministrationRoleARN>
|
||||
<StackSetId>{{ stackset.id }}</StackSetId>
|
||||
<CreationTimestamp>{{ operation.CreationTimestamp }}</CreationTimestamp>
|
||||
<OperationId>{{ operation.OperationId }}</OperationId>
|
||||
<Action>{{ operation.Action }}</Action>
|
||||
<OperationPreferences>
|
||||
<RegionOrder/>
|
||||
</OperationPreferences>
|
||||
<EndTimestamp>{{ operation.EndTimestamp }}</EndTimestamp>
|
||||
<Status>{{ operation.Status }}</Status>
|
||||
</StackSetOperation>
|
||||
</DescribeStackSetOperationResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>2edc27b6-9ce2-486a-a192-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeStackSetOperationResponse>
|
||||
"""
|
||||
|
||||
LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE = """<ListStackSetOperationResultsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<ListStackSetOperationResultsResult>
|
||||
<Summaries>
|
||||
{% for instance in operation.Instances %}
|
||||
{% for account, region in instance.items() %}
|
||||
<member>
|
||||
<AccountGateResult>
|
||||
<StatusReason>Function not found: arn:aws:lambda:us-west-2:123456789012:function:AWSCloudFormationStackSetAccountGate</StatusReason>
|
||||
<Status>SKIPPED</Status>
|
||||
</AccountGateResult>
|
||||
<Region>{{ region }}</Region>
|
||||
<Account>{{ account }}</Account>
|
||||
<Status>{{ operation.Status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListStackSetOperationResultsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>ac05a9ce-5f98-4197-a29b-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListStackSetOperationResultsResponse>
|
||||
"""
|
||||
|
||||
@ -3,16 +3,34 @@ import uuid
|
||||
import six
|
||||
import random
|
||||
import yaml
|
||||
import os
|
||||
import string
|
||||
|
||||
from cfnlint import decode, core
|
||||
|
||||
|
||||
def generate_stack_id(stack_name):
|
||||
def generate_stack_id(stack_name, region="us-east-1", account="123456789"):
|
||||
random_id = uuid.uuid4()
|
||||
return "arn:aws:cloudformation:us-east-1:123456789:stack/{0}/{1}".format(stack_name, random_id)
|
||||
return "arn:aws:cloudformation:{}:{}:stack/{}/{}".format(region, account, stack_name, random_id)
|
||||
|
||||
|
||||
def generate_changeset_id(changeset_name, region_name):
|
||||
random_id = uuid.uuid4()
|
||||
return 'arn:aws:cloudformation:{0}:123456789:changeSet/{1}/{2}'.format(region_name, changeset_name, random_id)
|
||||
|
||||
|
||||
def generate_stackset_id(stackset_name):
|
||||
random_id = uuid.uuid4()
|
||||
return '{}:{}'.format(stackset_name, random_id)
|
||||
|
||||
|
||||
def generate_stackset_arn(stackset_id, region_name):
|
||||
return 'arn:aws:cloudformation:{}:123456789012:stackset/{}'.format(region_name, stackset_id)
|
||||
|
||||
|
||||
def random_suffix():
|
||||
size = 12
|
||||
chars = list(range(10)) + ['A-Z']
|
||||
chars = list(range(10)) + list(string.ascii_uppercase)
|
||||
return ''.join(six.text_type(random.choice(chars)) for x in range(size))
|
||||
|
||||
|
||||
@ -33,3 +51,33 @@ def yaml_tag_constructor(loader, tag, node):
|
||||
key = 'Fn::{}'.format(tag[1:])
|
||||
|
||||
return {key: _f(loader, tag, node)}
|
||||
|
||||
|
||||
def validate_template_cfn_lint(template):
|
||||
|
||||
# Save the template to a temporary file -- cfn-lint requires a file
|
||||
filename = "file.tmp"
|
||||
with open(filename, "w") as file:
|
||||
file.write(template)
|
||||
abs_filename = os.path.abspath(filename)
|
||||
|
||||
# decode handles both yaml and json
|
||||
template, matches = decode.decode(abs_filename, False)
|
||||
|
||||
# Set cfn-lint to info
|
||||
core.configure_logging(None)
|
||||
|
||||
# Initialize the ruleset to be applied (no overrules, no excludes)
|
||||
rules = core.get_rules([], [], [])
|
||||
|
||||
# Use us-east-1 region (spec file) for validation
|
||||
regions = ['us-east-1']
|
||||
|
||||
# Process all the rules and gather the errors
|
||||
matches = core.run_checks(
|
||||
abs_filename,
|
||||
template,
|
||||
rules,
|
||||
regions)
|
||||
|
||||
return matches
|
||||
|
||||
@ -1,15 +1,17 @@
|
||||
import json
|
||||
|
||||
import json
|
||||
from moto.core.utils import iso_8601_datetime_with_milliseconds
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.exceptions import RESTError
|
||||
import boto.ec2.cloudwatch
|
||||
import datetime
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from dateutil.tz import tzutc
|
||||
from .utils import make_arn_for_dashboard
|
||||
|
||||
|
||||
DEFAULT_ACCOUNT_ID = 123456789012
|
||||
|
||||
_EMPTY_LIST = tuple()
|
||||
|
||||
|
||||
class Dimension(object):
|
||||
|
||||
@ -18,6 +20,34 @@ class Dimension(object):
|
||||
self.value = value
|
||||
|
||||
|
||||
def daterange(start, stop, step=timedelta(days=1), inclusive=False):
|
||||
"""
|
||||
This method will iterate from `start` to `stop` datetimes with a timedelta step of `step`
|
||||
(supports iteration forwards or backwards in time)
|
||||
|
||||
:param start: start datetime
|
||||
:param stop: end datetime
|
||||
:param step: step size as a timedelta
|
||||
:param inclusive: if True, last item returned will be as step closest to `end` (or `end` if no remainder).
|
||||
"""
|
||||
|
||||
# inclusive=False to behave like range by default
|
||||
total_step_secs = step.total_seconds()
|
||||
assert total_step_secs != 0
|
||||
|
||||
if total_step_secs > 0:
|
||||
while start < stop:
|
||||
yield start
|
||||
start = start + step
|
||||
else:
|
||||
while stop < start:
|
||||
yield start
|
||||
start = start + step
|
||||
|
||||
if inclusive and start == stop:
|
||||
yield start
|
||||
|
||||
|
||||
class FakeAlarm(BaseModel):
|
||||
|
||||
def __init__(self, name, namespace, metric_name, comparison_operator, evaluation_periods,
|
||||
@ -38,35 +68,36 @@ class FakeAlarm(BaseModel):
|
||||
self.ok_actions = ok_actions
|
||||
self.insufficient_data_actions = insufficient_data_actions
|
||||
self.unit = unit
|
||||
self.configuration_updated_timestamp = datetime.datetime.utcnow()
|
||||
self.configuration_updated_timestamp = datetime.utcnow()
|
||||
|
||||
self.history = []
|
||||
|
||||
self.state_reason = ''
|
||||
self.state_reason_data = '{}'
|
||||
self.state = 'OK'
|
||||
self.state_updated_timestamp = datetime.datetime.utcnow()
|
||||
self.state_value = 'OK'
|
||||
self.state_updated_timestamp = datetime.utcnow()
|
||||
|
||||
def update_state(self, reason, reason_data, state_value):
|
||||
# History type, that then decides what the rest of the items are, can be one of ConfigurationUpdate | StateUpdate | Action
|
||||
self.history.append(
|
||||
('StateUpdate', self.state_reason, self.state_reason_data, self.state, self.state_updated_timestamp)
|
||||
('StateUpdate', self.state_reason, self.state_reason_data, self.state_value, self.state_updated_timestamp)
|
||||
)
|
||||
|
||||
self.state_reason = reason
|
||||
self.state_reason_data = reason_data
|
||||
self.state = state_value
|
||||
self.state_updated_timestamp = datetime.datetime.utcnow()
|
||||
self.state_value = state_value
|
||||
self.state_updated_timestamp = datetime.utcnow()
|
||||
|
||||
|
||||
class MetricDatum(BaseModel):
|
||||
|
||||
def __init__(self, namespace, name, value, dimensions):
|
||||
def __init__(self, namespace, name, value, dimensions, timestamp):
|
||||
self.namespace = namespace
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.dimensions = [Dimension(dimension['name'], dimension[
|
||||
'value']) for dimension in dimensions]
|
||||
self.timestamp = timestamp or datetime.utcnow().replace(tzinfo=tzutc())
|
||||
self.dimensions = [Dimension(dimension['Name'], dimension[
|
||||
'Value']) for dimension in dimensions]
|
||||
|
||||
|
||||
class Dashboard(BaseModel):
|
||||
@ -75,7 +106,7 @@ class Dashboard(BaseModel):
|
||||
self.arn = make_arn_for_dashboard(DEFAULT_ACCOUNT_ID, name)
|
||||
self.name = name
|
||||
self.body = body
|
||||
self.last_modified = datetime.datetime.now()
|
||||
self.last_modified = datetime.now()
|
||||
|
||||
@property
|
||||
def last_modified_iso(self):
|
||||
@ -92,6 +123,53 @@ class Dashboard(BaseModel):
|
||||
return '<CloudWatchDashboard {0}>'.format(self.name)
|
||||
|
||||
|
||||
class Statistics:
|
||||
def __init__(self, stats, dt):
|
||||
self.timestamp = iso_8601_datetime_with_milliseconds(dt)
|
||||
self.values = []
|
||||
self.stats = stats
|
||||
|
||||
@property
|
||||
def sample_count(self):
|
||||
if 'SampleCount' not in self.stats:
|
||||
return None
|
||||
|
||||
return len(self.values)
|
||||
|
||||
@property
|
||||
def unit(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def sum(self):
|
||||
if 'Sum' not in self.stats:
|
||||
return None
|
||||
|
||||
return sum(self.values)
|
||||
|
||||
@property
|
||||
def minimum(self):
|
||||
if 'Minimum' not in self.stats:
|
||||
return None
|
||||
|
||||
return min(self.values)
|
||||
|
||||
@property
|
||||
def maximum(self):
|
||||
if 'Maximum' not in self.stats:
|
||||
return None
|
||||
|
||||
return max(self.values)
|
||||
|
||||
@property
|
||||
def average(self):
|
||||
if 'Average' not in self.stats:
|
||||
return None
|
||||
|
||||
# when moto is 3.4+ we can switch to the statistics module
|
||||
return sum(self.values) / len(self.values)
|
||||
|
||||
|
||||
class CloudWatchBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
@ -143,16 +221,46 @@ class CloudWatchBackend(BaseBackend):
|
||||
]
|
||||
|
||||
def get_alarms_by_state_value(self, target_state):
|
||||
return filter(lambda alarm: alarm.state == target_state, self.alarms.values())
|
||||
return filter(lambda alarm: alarm.state_value == target_state, self.alarms.values())
|
||||
|
||||
def delete_alarms(self, alarm_names):
|
||||
for alarm_name in alarm_names:
|
||||
self.alarms.pop(alarm_name, None)
|
||||
|
||||
def put_metric_data(self, namespace, metric_data):
|
||||
for name, value, dimensions in metric_data:
|
||||
for metric_member in metric_data:
|
||||
# Preserve "datetime" for get_metric_statistics comparisons
|
||||
timestamp = metric_member.get('Timestamp')
|
||||
if timestamp is not None and type(timestamp) != datetime:
|
||||
timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
|
||||
timestamp = timestamp.replace(tzinfo=tzutc())
|
||||
self.metric_data.append(MetricDatum(
|
||||
namespace, name, value, dimensions))
|
||||
namespace, metric_member['MetricName'], float(metric_member.get('Value', 0)), metric_member.get('Dimensions.member', _EMPTY_LIST), timestamp))
|
||||
|
||||
def get_metric_statistics(self, namespace, metric_name, start_time, end_time, period, stats):
|
||||
period_delta = timedelta(seconds=period)
|
||||
filtered_data = [md for md in self.metric_data if
|
||||
md.namespace == namespace and md.name == metric_name and start_time <= md.timestamp <= end_time]
|
||||
|
||||
# earliest to oldest
|
||||
filtered_data = sorted(filtered_data, key=lambda x: x.timestamp)
|
||||
if not filtered_data:
|
||||
return []
|
||||
|
||||
idx = 0
|
||||
data = list()
|
||||
for dt in daterange(filtered_data[0].timestamp, filtered_data[-1].timestamp + period_delta, period_delta):
|
||||
s = Statistics(stats, dt)
|
||||
while idx < len(filtered_data) and filtered_data[idx].timestamp < (dt + period_delta):
|
||||
s.values.append(filtered_data[idx].value)
|
||||
idx += 1
|
||||
|
||||
if not s.values:
|
||||
continue
|
||||
|
||||
data.append(s)
|
||||
|
||||
return data
|
||||
|
||||
def get_all_metrics(self):
|
||||
return self.metric_data
|
||||
|
||||
@ -2,6 +2,7 @@ import json
|
||||
from moto.core.utils import amzn_request_id
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import cloudwatch_backends
|
||||
from dateutil.parser import parse as dtparse
|
||||
|
||||
|
||||
class CloudWatchResponse(BaseResponse):
|
||||
@ -75,35 +76,36 @@ class CloudWatchResponse(BaseResponse):
|
||||
@amzn_request_id
|
||||
def put_metric_data(self):
|
||||
namespace = self._get_param('Namespace')
|
||||
metric_data = []
|
||||
metric_index = 1
|
||||
while True:
|
||||
try:
|
||||
metric_name = self.querystring[
|
||||
'MetricData.member.{0}.MetricName'.format(metric_index)][0]
|
||||
except KeyError:
|
||||
break
|
||||
value = self.querystring.get(
|
||||
'MetricData.member.{0}.Value'.format(metric_index), [None])[0]
|
||||
dimensions = []
|
||||
dimension_index = 1
|
||||
while True:
|
||||
try:
|
||||
dimension_name = self.querystring[
|
||||
'MetricData.member.{0}.Dimensions.member.{1}.Name'.format(metric_index, dimension_index)][0]
|
||||
except KeyError:
|
||||
break
|
||||
dimension_value = self.querystring[
|
||||
'MetricData.member.{0}.Dimensions.member.{1}.Value'.format(metric_index, dimension_index)][0]
|
||||
dimensions.append(
|
||||
{'name': dimension_name, 'value': dimension_value})
|
||||
dimension_index += 1
|
||||
metric_data.append([metric_name, value, dimensions])
|
||||
metric_index += 1
|
||||
metric_data = self._get_multi_param('MetricData.member')
|
||||
|
||||
self.cloudwatch_backend.put_metric_data(namespace, metric_data)
|
||||
template = self.response_template(PUT_METRIC_DATA_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def get_metric_statistics(self):
|
||||
namespace = self._get_param('Namespace')
|
||||
metric_name = self._get_param('MetricName')
|
||||
start_time = dtparse(self._get_param('StartTime'))
|
||||
end_time = dtparse(self._get_param('EndTime'))
|
||||
period = int(self._get_param('Period'))
|
||||
statistics = self._get_multi_param("Statistics.member")
|
||||
|
||||
# Unsupported Parameters (To Be Implemented)
|
||||
unit = self._get_param('Unit')
|
||||
extended_statistics = self._get_param('ExtendedStatistics')
|
||||
dimensions = self._get_param('Dimensions')
|
||||
if unit or extended_statistics or dimensions:
|
||||
raise NotImplemented()
|
||||
|
||||
# TODO: this should instead throw InvalidParameterCombination
|
||||
if not statistics:
|
||||
raise NotImplemented("Must specify either Statistics or ExtendedStatistics")
|
||||
|
||||
datapoints = self.cloudwatch_backend.get_metric_statistics(namespace, metric_name, start_time, end_time, period, statistics)
|
||||
template = self.response_template(GET_METRIC_STATISTICS_TEMPLATE)
|
||||
return template.render(label=metric_name, datapoints=datapoints)
|
||||
|
||||
@amzn_request_id
|
||||
def list_metrics(self):
|
||||
metrics = self.cloudwatch_backend.get_all_metrics()
|
||||
@ -150,10 +152,6 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(GET_DASHBOARD_TEMPLATE)
|
||||
return template.render(dashboard=dashboard)
|
||||
|
||||
@amzn_request_id
|
||||
def get_metric_statistics(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@amzn_request_id
|
||||
def list_dashboards(self):
|
||||
prefix = self._get_param('DashboardNamePrefix', '')
|
||||
@ -266,6 +264,50 @@ PUT_METRIC_DATA_TEMPLATE = """<PutMetricDataResponse xmlns="http://monitoring.am
|
||||
</ResponseMetadata>
|
||||
</PutMetricDataResponse>"""
|
||||
|
||||
GET_METRIC_STATISTICS_TEMPLATE = """<GetMetricStatisticsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>
|
||||
{{ request_id }}
|
||||
</RequestId>
|
||||
</ResponseMetadata>
|
||||
|
||||
<GetMetricStatisticsResult>
|
||||
<Label>{{ label }}</Label>
|
||||
<Datapoints>
|
||||
{% for datapoint in datapoints %}
|
||||
<member>
|
||||
{% if datapoint.sum is not none %}
|
||||
<Sum>{{ datapoint.sum }}</Sum>
|
||||
{% endif %}
|
||||
|
||||
{% if datapoint.average is not none %}
|
||||
<Average>{{ datapoint.average }}</Average>
|
||||
{% endif %}
|
||||
|
||||
{% if datapoint.maximum is not none %}
|
||||
<Maximum>{{ datapoint.maximum }}</Maximum>
|
||||
{% endif %}
|
||||
|
||||
{% if datapoint.minimum is not none %}
|
||||
<Minimum>{{ datapoint.minimum }}</Minimum>
|
||||
{% endif %}
|
||||
|
||||
{% if datapoint.sample_count is not none %}
|
||||
<SampleCount>{{ datapoint.sample_count }}</SampleCount>
|
||||
{% endif %}
|
||||
|
||||
{% if datapoint.extended_statistics is not none %}
|
||||
<ExtendedStatistics>{{ datapoint.extended_statistics }}</ExtendedStatistics>
|
||||
{% endif %}
|
||||
|
||||
<Timestamp>{{ datapoint.timestamp }}</Timestamp>
|
||||
<Unit>{{ datapoint.unit }}</Unit>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Datapoints>
|
||||
</GetMetricStatisticsResult>
|
||||
</GetMetricStatisticsResponse>"""
|
||||
|
||||
LIST_METRICS_TEMPLATE = """<ListMetricsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
|
||||
<ListMetricsResult>
|
||||
<Metrics>
|
||||
|
||||
7
moto/cognitoidentity/__init__.py
Normal file
7
moto/cognitoidentity/__init__.py
Normal file
@ -0,0 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import cognitoidentity_backends
|
||||
from ..core.models import base_decorator, deprecated_base_decorator
|
||||
|
||||
cognitoidentity_backend = cognitoidentity_backends['us-east-1']
|
||||
mock_cognitoidentity = base_decorator(cognitoidentity_backends)
|
||||
mock_cognitoidentity_deprecated = deprecated_base_decorator(cognitoidentity_backends)
|
||||
110
moto/cognitoidentity/models.py
Normal file
110
moto/cognitoidentity/models.py
Normal file
@ -0,0 +1,110 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import json
|
||||
|
||||
import boto.cognito.identity
|
||||
|
||||
from moto.compat import OrderedDict
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import iso_8601_datetime_with_milliseconds
|
||||
|
||||
from .utils import get_random_identity_id
|
||||
|
||||
|
||||
class CognitoIdentity(BaseModel):
|
||||
|
||||
def __init__(self, region, identity_pool_name, **kwargs):
|
||||
self.identity_pool_name = identity_pool_name
|
||||
self.allow_unauthenticated_identities = kwargs.get('allow_unauthenticated_identities', '')
|
||||
self.supported_login_providers = kwargs.get('supported_login_providers', {})
|
||||
self.developer_provider_name = kwargs.get('developer_provider_name', '')
|
||||
self.open_id_connect_provider_arns = kwargs.get('open_id_connect_provider_arns', [])
|
||||
self.cognito_identity_providers = kwargs.get('cognito_identity_providers', [])
|
||||
self.saml_provider_arns = kwargs.get('saml_provider_arns', [])
|
||||
|
||||
self.identity_pool_id = get_random_identity_id(region)
|
||||
self.creation_time = datetime.datetime.utcnow()
|
||||
|
||||
|
||||
class CognitoIdentityBackend(BaseBackend):
|
||||
|
||||
def __init__(self, region):
|
||||
super(CognitoIdentityBackend, self).__init__()
|
||||
self.region = region
|
||||
self.identity_pools = OrderedDict()
|
||||
|
||||
def reset(self):
|
||||
region = self.region
|
||||
self.__dict__ = {}
|
||||
self.__init__(region)
|
||||
|
||||
def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities,
|
||||
supported_login_providers, developer_provider_name, open_id_connect_provider_arns,
|
||||
cognito_identity_providers, saml_provider_arns):
|
||||
|
||||
new_identity = CognitoIdentity(self.region, identity_pool_name,
|
||||
allow_unauthenticated_identities=allow_unauthenticated_identities,
|
||||
supported_login_providers=supported_login_providers,
|
||||
developer_provider_name=developer_provider_name,
|
||||
open_id_connect_provider_arns=open_id_connect_provider_arns,
|
||||
cognito_identity_providers=cognito_identity_providers,
|
||||
saml_provider_arns=saml_provider_arns)
|
||||
self.identity_pools[new_identity.identity_pool_id] = new_identity
|
||||
|
||||
response = json.dumps({
|
||||
'IdentityPoolId': new_identity.identity_pool_id,
|
||||
'IdentityPoolName': new_identity.identity_pool_name,
|
||||
'AllowUnauthenticatedIdentities': new_identity.allow_unauthenticated_identities,
|
||||
'SupportedLoginProviders': new_identity.supported_login_providers,
|
||||
'DeveloperProviderName': new_identity.developer_provider_name,
|
||||
'OpenIdConnectProviderARNs': new_identity.open_id_connect_provider_arns,
|
||||
'CognitoIdentityProviders': new_identity.cognito_identity_providers,
|
||||
'SamlProviderARNs': new_identity.saml_provider_arns
|
||||
})
|
||||
|
||||
return response
|
||||
|
||||
def get_id(self):
|
||||
identity_id = {'IdentityId': get_random_identity_id(self.region)}
|
||||
return json.dumps(identity_id)
|
||||
|
||||
def get_credentials_for_identity(self, identity_id):
|
||||
duration = 90
|
||||
now = datetime.datetime.utcnow()
|
||||
expiration = now + datetime.timedelta(seconds=duration)
|
||||
expiration_str = str(iso_8601_datetime_with_milliseconds(expiration))
|
||||
response = json.dumps(
|
||||
{
|
||||
"Credentials":
|
||||
{
|
||||
"AccessKeyId": "TESTACCESSKEY12345",
|
||||
"Expiration": expiration_str,
|
||||
"SecretKey": "ABCSECRETKEY",
|
||||
"SessionToken": "ABC12345"
|
||||
},
|
||||
"IdentityId": identity_id
|
||||
})
|
||||
return response
|
||||
|
||||
def get_open_id_token_for_developer_identity(self, identity_id):
|
||||
response = json.dumps(
|
||||
{
|
||||
"IdentityId": identity_id,
|
||||
"Token": get_random_identity_id(self.region)
|
||||
})
|
||||
return response
|
||||
|
||||
def get_open_id_token(self, identity_id):
|
||||
response = json.dumps(
|
||||
{
|
||||
"IdentityId": identity_id,
|
||||
"Token": get_random_identity_id(self.region)
|
||||
}
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
cognitoidentity_backends = {}
|
||||
for region in boto.cognito.identity.regions():
|
||||
cognitoidentity_backends[region.name] = CognitoIdentityBackend(region.name)
|
||||
42
moto/cognitoidentity/responses.py
Normal file
42
moto/cognitoidentity/responses.py
Normal file
@ -0,0 +1,42 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
|
||||
from .models import cognitoidentity_backends
|
||||
from .utils import get_random_identity_id
|
||||
|
||||
|
||||
class CognitoIdentityResponse(BaseResponse):
|
||||
|
||||
def create_identity_pool(self):
|
||||
identity_pool_name = self._get_param('IdentityPoolName')
|
||||
allow_unauthenticated_identities = self._get_param('AllowUnauthenticatedIdentities')
|
||||
supported_login_providers = self._get_param('SupportedLoginProviders')
|
||||
developer_provider_name = self._get_param('DeveloperProviderName')
|
||||
open_id_connect_provider_arns = self._get_param('OpenIdConnectProviderARNs')
|
||||
cognito_identity_providers = self._get_param('CognitoIdentityProviders')
|
||||
saml_provider_arns = self._get_param('SamlProviderARNs')
|
||||
return cognitoidentity_backends[self.region].create_identity_pool(
|
||||
identity_pool_name=identity_pool_name,
|
||||
allow_unauthenticated_identities=allow_unauthenticated_identities,
|
||||
supported_login_providers=supported_login_providers,
|
||||
developer_provider_name=developer_provider_name,
|
||||
open_id_connect_provider_arns=open_id_connect_provider_arns,
|
||||
cognito_identity_providers=cognito_identity_providers,
|
||||
saml_provider_arns=saml_provider_arns)
|
||||
|
||||
def get_id(self):
|
||||
return cognitoidentity_backends[self.region].get_id()
|
||||
|
||||
def get_credentials_for_identity(self):
|
||||
return cognitoidentity_backends[self.region].get_credentials_for_identity(self._get_param('IdentityId'))
|
||||
|
||||
def get_open_id_token_for_developer_identity(self):
|
||||
return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(
|
||||
self._get_param('IdentityId') or get_random_identity_id(self.region)
|
||||
)
|
||||
|
||||
def get_open_id_token(self):
|
||||
return cognitoidentity_backends[self.region].get_open_id_token(
|
||||
self._get_param("IdentityId") or get_random_identity_id(self.region)
|
||||
)
|
||||
10
moto/cognitoidentity/urls.py
Normal file
10
moto/cognitoidentity/urls.py
Normal file
@ -0,0 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import CognitoIdentityResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://cognito-identity.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
'{0}/$': CognitoIdentityResponse.dispatch,
|
||||
}
|
||||
5
moto/cognitoidentity/utils.py
Normal file
5
moto/cognitoidentity/utils.py
Normal file
@ -0,0 +1,5 @@
|
||||
from moto.core.utils import get_random_hex
|
||||
|
||||
|
||||
def get_random_identity_id(region):
|
||||
return "{0}:{1}".format(region, get_random_hex(length=19))
|
||||
6
moto/cognitoidp/__init__.py
Normal file
6
moto/cognitoidp/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import cognitoidp_backends
|
||||
from ..core.models import base_decorator, deprecated_base_decorator
|
||||
|
||||
mock_cognitoidp = base_decorator(cognitoidp_backends)
|
||||
mock_cognitoidp_deprecated = deprecated_base_decorator(cognitoidp_backends)
|
||||
44
moto/cognitoidp/exceptions.py
Normal file
44
moto/cognitoidp/exceptions.py
Normal file
@ -0,0 +1,44 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
from werkzeug.exceptions import BadRequest
|
||||
|
||||
|
||||
class ResourceNotFoundError(BadRequest):
|
||||
|
||||
def __init__(self, message):
|
||||
super(ResourceNotFoundError, self).__init__()
|
||||
self.description = json.dumps({
|
||||
"message": message,
|
||||
'__type': 'ResourceNotFoundException',
|
||||
})
|
||||
|
||||
|
||||
class UserNotFoundError(BadRequest):
|
||||
|
||||
def __init__(self, message):
|
||||
super(UserNotFoundError, self).__init__()
|
||||
self.description = json.dumps({
|
||||
"message": message,
|
||||
'__type': 'UserNotFoundException',
|
||||
})
|
||||
|
||||
|
||||
class GroupExistsException(BadRequest):
|
||||
|
||||
def __init__(self, message):
|
||||
super(GroupExistsException, self).__init__()
|
||||
self.description = json.dumps({
|
||||
"message": message,
|
||||
'__type': 'GroupExistsException',
|
||||
})
|
||||
|
||||
|
||||
class NotAuthorizedError(BadRequest):
|
||||
|
||||
def __init__(self, message):
|
||||
super(NotAuthorizedError, self).__init__()
|
||||
self.description = json.dumps({
|
||||
"message": message,
|
||||
'__type': 'NotAuthorizedException',
|
||||
})
|
||||
746
moto/cognitoidp/models.py
Normal file
746
moto/cognitoidp/models.py
Normal file
@ -0,0 +1,746 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import hashlib
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import boto.cognito.identity
|
||||
from jose import jws
|
||||
|
||||
from moto.compat import OrderedDict
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from .exceptions import GroupExistsException, NotAuthorizedError, ResourceNotFoundError, UserNotFoundError
|
||||
|
||||
UserStatus = {
|
||||
"FORCE_CHANGE_PASSWORD": "FORCE_CHANGE_PASSWORD",
|
||||
"CONFIRMED": "CONFIRMED",
|
||||
}
|
||||
|
||||
|
||||
def paginate(limit, start_arg="next_token", limit_arg="max_results"):
|
||||
"""Returns a limited result list, and an offset into list of remaining items
|
||||
|
||||
Takes the next_token, and max_results kwargs given to a function and handles
|
||||
the slicing of the results. The kwarg `next_token` is the offset into the
|
||||
list to begin slicing from. `max_results` is the size of the result required
|
||||
|
||||
If the max_results is not supplied then the `limit` parameter is used as a
|
||||
default
|
||||
|
||||
:param limit_arg: the name of argument in the decorated function that
|
||||
controls amount of items returned
|
||||
:param start_arg: the name of the argument in the decorated that provides
|
||||
the starting offset
|
||||
:param limit: A default maximum items to return
|
||||
:return: a tuple containing a list of items, and the offset into the list
|
||||
"""
|
||||
default_start = 0
|
||||
|
||||
def outer_wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg])
|
||||
lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg])
|
||||
stop = start + lim
|
||||
result = func(*args, **kwargs)
|
||||
limited_results = list(itertools.islice(result, start, stop))
|
||||
next_token = stop if stop < len(result) else None
|
||||
return limited_results, next_token
|
||||
return wrapper
|
||||
return outer_wrapper
|
||||
|
||||
|
||||
class CognitoIdpUserPool(BaseModel):
|
||||
|
||||
def __init__(self, region, name, extended_config):
|
||||
self.region = region
|
||||
self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex))
|
||||
self.name = name
|
||||
self.status = None
|
||||
self.extended_config = extended_config or {}
|
||||
self.creation_date = datetime.datetime.utcnow()
|
||||
self.last_modified_date = datetime.datetime.utcnow()
|
||||
|
||||
self.clients = OrderedDict()
|
||||
self.identity_providers = OrderedDict()
|
||||
self.groups = OrderedDict()
|
||||
self.users = OrderedDict()
|
||||
self.refresh_tokens = {}
|
||||
self.access_tokens = {}
|
||||
self.id_tokens = {}
|
||||
|
||||
with open(os.path.join(os.path.dirname(__file__), "resources/jwks-private.json")) as f:
|
||||
self.json_web_key = json.loads(f.read())
|
||||
|
||||
def _base_json(self):
|
||||
return {
|
||||
"Id": self.id,
|
||||
"Name": self.name,
|
||||
"Status": self.status,
|
||||
"CreationDate": time.mktime(self.creation_date.timetuple()),
|
||||
"LastModifiedDate": time.mktime(self.last_modified_date.timetuple()),
|
||||
}
|
||||
|
||||
def to_json(self, extended=False):
|
||||
user_pool_json = self._base_json()
|
||||
if extended:
|
||||
user_pool_json.update(self.extended_config)
|
||||
else:
|
||||
user_pool_json["LambdaConfig"] = self.extended_config.get("LambdaConfig") or {}
|
||||
|
||||
return user_pool_json
|
||||
|
||||
def create_jwt(self, client_id, username, expires_in=60 * 60, extra_data={}):
|
||||
now = int(time.time())
|
||||
payload = {
|
||||
"iss": "https://cognito-idp.{}.amazonaws.com/{}".format(self.region, self.id),
|
||||
"sub": self.users[username].id,
|
||||
"aud": client_id,
|
||||
"token_use": "id",
|
||||
"auth_time": now,
|
||||
"exp": now + expires_in,
|
||||
}
|
||||
payload.update(extra_data)
|
||||
|
||||
return jws.sign(payload, self.json_web_key, algorithm='RS256'), expires_in
|
||||
|
||||
def create_id_token(self, client_id, username):
|
||||
id_token, expires_in = self.create_jwt(client_id, username)
|
||||
self.id_tokens[id_token] = (client_id, username)
|
||||
return id_token, expires_in
|
||||
|
||||
def create_refresh_token(self, client_id, username):
|
||||
refresh_token = str(uuid.uuid4())
|
||||
self.refresh_tokens[refresh_token] = (client_id, username)
|
||||
return refresh_token
|
||||
|
||||
def create_access_token(self, client_id, username):
|
||||
extra_data = self.get_user_extra_data_by_client_id(
|
||||
client_id, username
|
||||
)
|
||||
access_token, expires_in = self.create_jwt(client_id, username,
|
||||
extra_data=extra_data)
|
||||
self.access_tokens[access_token] = (client_id, username)
|
||||
return access_token, expires_in
|
||||
|
||||
def create_tokens_from_refresh_token(self, refresh_token):
|
||||
client_id, username = self.refresh_tokens.get(refresh_token)
|
||||
if not username:
|
||||
raise NotAuthorizedError(refresh_token)
|
||||
|
||||
access_token, expires_in = self.create_access_token(client_id, username)
|
||||
id_token, _ = self.create_id_token(client_id, username)
|
||||
return access_token, id_token, expires_in
|
||||
|
||||
def get_user_extra_data_by_client_id(self, client_id, username):
|
||||
extra_data = {}
|
||||
current_client = self.clients.get(client_id, None)
|
||||
if current_client:
|
||||
for readable_field in current_client.get_readable_fields():
|
||||
attribute = list(filter(
|
||||
lambda f: f['Name'] == readable_field,
|
||||
self.users.get(username).attributes
|
||||
))
|
||||
if len(attribute) > 0:
|
||||
extra_data.update({
|
||||
attribute[0]['Name']: attribute[0]['Value']
|
||||
})
|
||||
return extra_data
|
||||
|
||||
|
||||
class CognitoIdpUserPoolDomain(BaseModel):
|
||||
|
||||
def __init__(self, user_pool_id, domain, custom_domain_config=None):
|
||||
self.user_pool_id = user_pool_id
|
||||
self.domain = domain
|
||||
self.custom_domain_config = custom_domain_config or {}
|
||||
|
||||
def _distribution_name(self):
|
||||
if self.custom_domain_config and \
|
||||
'CertificateArn' in self.custom_domain_config:
|
||||
hash = hashlib.md5(
|
||||
self.custom_domain_config['CertificateArn'].encode('utf-8')
|
||||
).hexdigest()
|
||||
return "{hash}.cloudfront.net".format(hash=hash[:16])
|
||||
return None
|
||||
|
||||
def to_json(self, extended=True):
|
||||
distribution = self._distribution_name()
|
||||
if extended:
|
||||
return {
|
||||
"UserPoolId": self.user_pool_id,
|
||||
"AWSAccountId": str(uuid.uuid4()),
|
||||
"CloudFrontDistribution": distribution,
|
||||
"Domain": self.domain,
|
||||
"S3Bucket": None,
|
||||
"Status": "ACTIVE",
|
||||
"Version": None,
|
||||
}
|
||||
elif distribution:
|
||||
return {
|
||||
"CloudFrontDomain": distribution,
|
||||
}
|
||||
return None
|
||||
|
||||
|
||||
class CognitoIdpUserPoolClient(BaseModel):
|
||||
|
||||
def __init__(self, user_pool_id, extended_config):
|
||||
self.user_pool_id = user_pool_id
|
||||
self.id = str(uuid.uuid4())
|
||||
self.secret = str(uuid.uuid4())
|
||||
self.extended_config = extended_config or {}
|
||||
|
||||
def _base_json(self):
|
||||
return {
|
||||
"ClientId": self.id,
|
||||
"ClientName": self.extended_config.get("ClientName"),
|
||||
"UserPoolId": self.user_pool_id,
|
||||
}
|
||||
|
||||
def to_json(self, extended=False):
|
||||
user_pool_client_json = self._base_json()
|
||||
if extended:
|
||||
user_pool_client_json.update(self.extended_config)
|
||||
|
||||
return user_pool_client_json
|
||||
|
||||
def get_readable_fields(self):
|
||||
return self.extended_config.get('ReadAttributes', [])
|
||||
|
||||
|
||||
class CognitoIdpIdentityProvider(BaseModel):
|
||||
|
||||
def __init__(self, name, extended_config):
|
||||
self.name = name
|
||||
self.extended_config = extended_config or {}
|
||||
self.creation_date = datetime.datetime.utcnow()
|
||||
self.last_modified_date = datetime.datetime.utcnow()
|
||||
|
||||
def _base_json(self):
|
||||
return {
|
||||
"ProviderName": self.name,
|
||||
"ProviderType": self.extended_config.get("ProviderType"),
|
||||
"CreationDate": time.mktime(self.creation_date.timetuple()),
|
||||
"LastModifiedDate": time.mktime(self.last_modified_date.timetuple()),
|
||||
}
|
||||
|
||||
def to_json(self, extended=False):
|
||||
identity_provider_json = self._base_json()
|
||||
if extended:
|
||||
identity_provider_json.update(self.extended_config)
|
||||
|
||||
return identity_provider_json
|
||||
|
||||
|
||||
class CognitoIdpGroup(BaseModel):
|
||||
|
||||
def __init__(self, user_pool_id, group_name, description, role_arn, precedence):
|
||||
self.user_pool_id = user_pool_id
|
||||
self.group_name = group_name
|
||||
self.description = description or ""
|
||||
self.role_arn = role_arn
|
||||
self.precedence = precedence
|
||||
self.last_modified_date = datetime.datetime.now()
|
||||
self.creation_date = self.last_modified_date
|
||||
|
||||
# Users who are members of this group.
|
||||
# Note that these links are bidirectional.
|
||||
self.users = set()
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
"GroupName": self.group_name,
|
||||
"UserPoolId": self.user_pool_id,
|
||||
"Description": self.description,
|
||||
"RoleArn": self.role_arn,
|
||||
"Precedence": self.precedence,
|
||||
"LastModifiedDate": time.mktime(self.last_modified_date.timetuple()),
|
||||
"CreationDate": time.mktime(self.creation_date.timetuple()),
|
||||
}
|
||||
|
||||
|
||||
class CognitoIdpUser(BaseModel):
|
||||
|
||||
def __init__(self, user_pool_id, username, password, status, attributes):
|
||||
self.id = str(uuid.uuid4())
|
||||
self.user_pool_id = user_pool_id
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.status = status
|
||||
self.enabled = True
|
||||
self.attributes = attributes
|
||||
self.create_date = datetime.datetime.utcnow()
|
||||
self.last_modified_date = datetime.datetime.utcnow()
|
||||
|
||||
# Groups this user is a member of.
|
||||
# Note that these links are bidirectional.
|
||||
self.groups = set()
|
||||
|
||||
def _base_json(self):
|
||||
return {
|
||||
"UserPoolId": self.user_pool_id,
|
||||
"Username": self.username,
|
||||
"UserStatus": self.status,
|
||||
"UserCreateDate": time.mktime(self.create_date.timetuple()),
|
||||
"UserLastModifiedDate": time.mktime(self.last_modified_date.timetuple()),
|
||||
}
|
||||
|
||||
# list_users brings back "Attributes" while admin_get_user brings back "UserAttributes".
|
||||
def to_json(self, extended=False, attributes_key="Attributes"):
|
||||
user_json = self._base_json()
|
||||
if extended:
|
||||
user_json.update(
|
||||
{
|
||||
"Enabled": self.enabled,
|
||||
attributes_key: self.attributes,
|
||||
"MFAOptions": []
|
||||
}
|
||||
)
|
||||
|
||||
return user_json
|
||||
|
||||
def update_attributes(self, new_attributes):
|
||||
|
||||
def flatten_attrs(attrs):
|
||||
return {attr['Name']: attr['Value'] for attr in attrs}
|
||||
|
||||
def expand_attrs(attrs):
|
||||
return [{'Name': k, 'Value': v} for k, v in attrs.items()]
|
||||
|
||||
flat_attributes = flatten_attrs(self.attributes)
|
||||
flat_attributes.update(flatten_attrs(new_attributes))
|
||||
self.attributes = expand_attrs(flat_attributes)
|
||||
|
||||
|
||||
class CognitoIdpBackend(BaseBackend):
|
||||
|
||||
def __init__(self, region):
|
||||
super(CognitoIdpBackend, self).__init__()
|
||||
self.region = region
|
||||
self.user_pools = OrderedDict()
|
||||
self.user_pool_domains = OrderedDict()
|
||||
self.sessions = {}
|
||||
|
||||
def reset(self):
|
||||
region = self.region
|
||||
self.__dict__ = {}
|
||||
self.__init__(region)
|
||||
|
||||
# User pool
|
||||
def create_user_pool(self, name, extended_config):
|
||||
user_pool = CognitoIdpUserPool(self.region, name, extended_config)
|
||||
self.user_pools[user_pool.id] = user_pool
|
||||
return user_pool
|
||||
|
||||
@paginate(60)
|
||||
def list_user_pools(self, max_results=None, next_token=None):
|
||||
return self.user_pools.values()
|
||||
|
||||
def describe_user_pool(self, user_pool_id):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
return user_pool
|
||||
|
||||
def delete_user_pool(self, user_pool_id):
|
||||
if user_pool_id not in self.user_pools:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
del self.user_pools[user_pool_id]
|
||||
|
||||
# User pool domain
|
||||
def create_user_pool_domain(self, user_pool_id, domain, custom_domain_config=None):
|
||||
if user_pool_id not in self.user_pools:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
user_pool_domain = CognitoIdpUserPoolDomain(
|
||||
user_pool_id, domain, custom_domain_config=custom_domain_config
|
||||
)
|
||||
self.user_pool_domains[domain] = user_pool_domain
|
||||
return user_pool_domain
|
||||
|
||||
def describe_user_pool_domain(self, domain):
|
||||
if domain not in self.user_pool_domains:
|
||||
return None
|
||||
|
||||
return self.user_pool_domains[domain]
|
||||
|
||||
def delete_user_pool_domain(self, domain):
|
||||
if domain not in self.user_pool_domains:
|
||||
raise ResourceNotFoundError(domain)
|
||||
|
||||
del self.user_pool_domains[domain]
|
||||
|
||||
def update_user_pool_domain(self, domain, custom_domain_config):
|
||||
if domain not in self.user_pool_domains:
|
||||
raise ResourceNotFoundError(domain)
|
||||
|
||||
user_pool_domain = self.user_pool_domains[domain]
|
||||
user_pool_domain.custom_domain_config = custom_domain_config
|
||||
return user_pool_domain
|
||||
|
||||
# User pool client
|
||||
def create_user_pool_client(self, user_pool_id, extended_config):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
user_pool_client = CognitoIdpUserPoolClient(user_pool_id, extended_config)
|
||||
user_pool.clients[user_pool_client.id] = user_pool_client
|
||||
return user_pool_client
|
||||
|
||||
@paginate(60)
|
||||
def list_user_pool_clients(self, user_pool_id, max_results=None, next_token=None):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
return user_pool.clients.values()
|
||||
|
||||
def describe_user_pool_client(self, user_pool_id, client_id):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
client = user_pool.clients.get(client_id)
|
||||
if not client:
|
||||
raise ResourceNotFoundError(client_id)
|
||||
|
||||
return client
|
||||
|
||||
def update_user_pool_client(self, user_pool_id, client_id, extended_config):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
client = user_pool.clients.get(client_id)
|
||||
if not client:
|
||||
raise ResourceNotFoundError(client_id)
|
||||
|
||||
client.extended_config.update(extended_config)
|
||||
return client
|
||||
|
||||
def delete_user_pool_client(self, user_pool_id, client_id):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if client_id not in user_pool.clients:
|
||||
raise ResourceNotFoundError(client_id)
|
||||
|
||||
del user_pool.clients[client_id]
|
||||
|
||||
# Identity provider
|
||||
def create_identity_provider(self, user_pool_id, name, extended_config):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
identity_provider = CognitoIdpIdentityProvider(name, extended_config)
|
||||
user_pool.identity_providers[name] = identity_provider
|
||||
return identity_provider
|
||||
|
||||
@paginate(60)
|
||||
def list_identity_providers(self, user_pool_id, max_results=None, next_token=None):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
return user_pool.identity_providers.values()
|
||||
|
||||
def describe_identity_provider(self, user_pool_id, name):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
identity_provider = user_pool.identity_providers.get(name)
|
||||
if not identity_provider:
|
||||
raise ResourceNotFoundError(name)
|
||||
|
||||
return identity_provider
|
||||
|
||||
def update_identity_provider(self, user_pool_id, name, extended_config):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
identity_provider = user_pool.identity_providers.get(name)
|
||||
if not identity_provider:
|
||||
raise ResourceNotFoundError(name)
|
||||
|
||||
identity_provider.extended_config.update(extended_config)
|
||||
|
||||
return identity_provider
|
||||
|
||||
def delete_identity_provider(self, user_pool_id, name):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if name not in user_pool.identity_providers:
|
||||
raise ResourceNotFoundError(name)
|
||||
|
||||
del user_pool.identity_providers[name]
|
||||
|
||||
# Group
|
||||
def create_group(self, user_pool_id, group_name, description, role_arn, precedence):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
group = CognitoIdpGroup(user_pool_id, group_name, description, role_arn, precedence)
|
||||
if group.group_name in user_pool.groups:
|
||||
raise GroupExistsException("A group with the name already exists")
|
||||
user_pool.groups[group.group_name] = group
|
||||
|
||||
return group
|
||||
|
||||
def get_group(self, user_pool_id, group_name):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if group_name not in user_pool.groups:
|
||||
raise ResourceNotFoundError(group_name)
|
||||
|
||||
return user_pool.groups[group_name]
|
||||
|
||||
def list_groups(self, user_pool_id):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
return user_pool.groups.values()
|
||||
|
||||
def delete_group(self, user_pool_id, group_name):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if group_name not in user_pool.groups:
|
||||
raise ResourceNotFoundError(group_name)
|
||||
|
||||
group = user_pool.groups[group_name]
|
||||
for user in group.users:
|
||||
user.groups.remove(group)
|
||||
|
||||
del user_pool.groups[group_name]
|
||||
|
||||
def admin_add_user_to_group(self, user_pool_id, group_name, username):
|
||||
group = self.get_group(user_pool_id, group_name)
|
||||
user = self.admin_get_user(user_pool_id, username)
|
||||
|
||||
group.users.add(user)
|
||||
user.groups.add(group)
|
||||
|
||||
def list_users_in_group(self, user_pool_id, group_name):
|
||||
group = self.get_group(user_pool_id, group_name)
|
||||
return list(group.users)
|
||||
|
||||
def admin_list_groups_for_user(self, user_pool_id, username):
|
||||
user = self.admin_get_user(user_pool_id, username)
|
||||
return list(user.groups)
|
||||
|
||||
def admin_remove_user_from_group(self, user_pool_id, group_name, username):
|
||||
group = self.get_group(user_pool_id, group_name)
|
||||
user = self.admin_get_user(user_pool_id, username)
|
||||
|
||||
group.users.discard(user)
|
||||
user.groups.discard(group)
|
||||
|
||||
# User
|
||||
def admin_create_user(self, user_pool_id, username, temporary_password, attributes):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
user = CognitoIdpUser(user_pool_id, username, temporary_password, UserStatus["FORCE_CHANGE_PASSWORD"], attributes)
|
||||
user_pool.users[user.username] = user
|
||||
return user
|
||||
|
||||
def admin_get_user(self, user_pool_id, username):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if username not in user_pool.users:
|
||||
raise UserNotFoundError(username)
|
||||
|
||||
return user_pool.users[username]
|
||||
|
||||
@paginate(60, "pagination_token", "limit")
|
||||
def list_users(self, user_pool_id, pagination_token=None, limit=None):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
return user_pool.users.values()
|
||||
|
||||
def admin_disable_user(self, user_pool_id, username):
|
||||
user = self.admin_get_user(user_pool_id, username)
|
||||
user.enabled = False
|
||||
|
||||
def admin_enable_user(self, user_pool_id, username):
|
||||
user = self.admin_get_user(user_pool_id, username)
|
||||
user.enabled = True
|
||||
|
||||
def admin_delete_user(self, user_pool_id, username):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if username not in user_pool.users:
|
||||
raise UserNotFoundError(username)
|
||||
|
||||
user = user_pool.users[username]
|
||||
for group in user.groups:
|
||||
group.users.remove(user)
|
||||
|
||||
del user_pool.users[username]
|
||||
|
||||
def _log_user_in(self, user_pool, client, username):
|
||||
refresh_token = user_pool.create_refresh_token(client.id, username)
|
||||
access_token, id_token, expires_in = user_pool.create_tokens_from_refresh_token(refresh_token)
|
||||
|
||||
return {
|
||||
"AuthenticationResult": {
|
||||
"IdToken": id_token,
|
||||
"AccessToken": access_token,
|
||||
"RefreshToken": refresh_token,
|
||||
"ExpiresIn": expires_in,
|
||||
}
|
||||
}
|
||||
|
||||
def admin_initiate_auth(self, user_pool_id, client_id, auth_flow, auth_parameters):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
client = user_pool.clients.get(client_id)
|
||||
if not client:
|
||||
raise ResourceNotFoundError(client_id)
|
||||
|
||||
if auth_flow == "ADMIN_NO_SRP_AUTH":
|
||||
username = auth_parameters.get("USERNAME")
|
||||
password = auth_parameters.get("PASSWORD")
|
||||
user = user_pool.users.get(username)
|
||||
if not user:
|
||||
raise UserNotFoundError(username)
|
||||
|
||||
if user.password != password:
|
||||
raise NotAuthorizedError(username)
|
||||
|
||||
if user.status == UserStatus["FORCE_CHANGE_PASSWORD"]:
|
||||
session = str(uuid.uuid4())
|
||||
self.sessions[session] = user_pool
|
||||
|
||||
return {
|
||||
"ChallengeName": "NEW_PASSWORD_REQUIRED",
|
||||
"ChallengeParameters": {},
|
||||
"Session": session,
|
||||
}
|
||||
|
||||
return self._log_user_in(user_pool, client, username)
|
||||
elif auth_flow == "REFRESH_TOKEN":
|
||||
refresh_token = auth_parameters.get("REFRESH_TOKEN")
|
||||
id_token, access_token, expires_in = user_pool.create_tokens_from_refresh_token(refresh_token)
|
||||
|
||||
return {
|
||||
"AuthenticationResult": {
|
||||
"IdToken": id_token,
|
||||
"AccessToken": access_token,
|
||||
"ExpiresIn": expires_in,
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
def respond_to_auth_challenge(self, session, client_id, challenge_name, challenge_responses):
|
||||
user_pool = self.sessions.get(session)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(session)
|
||||
|
||||
client = user_pool.clients.get(client_id)
|
||||
if not client:
|
||||
raise ResourceNotFoundError(client_id)
|
||||
|
||||
if challenge_name == "NEW_PASSWORD_REQUIRED":
|
||||
username = challenge_responses.get("USERNAME")
|
||||
new_password = challenge_responses.get("NEW_PASSWORD")
|
||||
user = user_pool.users.get(username)
|
||||
if not user:
|
||||
raise UserNotFoundError(username)
|
||||
|
||||
user.password = new_password
|
||||
user.status = UserStatus["CONFIRMED"]
|
||||
del self.sessions[session]
|
||||
|
||||
return self._log_user_in(user_pool, client, username)
|
||||
else:
|
||||
return {}
|
||||
|
||||
def confirm_forgot_password(self, client_id, username, password):
|
||||
for user_pool in self.user_pools.values():
|
||||
if client_id in user_pool.clients and username in user_pool.users:
|
||||
user_pool.users[username].password = password
|
||||
break
|
||||
else:
|
||||
raise ResourceNotFoundError(client_id)
|
||||
|
||||
def change_password(self, access_token, previous_password, proposed_password):
|
||||
for user_pool in self.user_pools.values():
|
||||
if access_token in user_pool.access_tokens:
|
||||
_, username = user_pool.access_tokens[access_token]
|
||||
user = user_pool.users.get(username)
|
||||
if not user:
|
||||
raise UserNotFoundError(username)
|
||||
|
||||
if user.password != previous_password:
|
||||
raise NotAuthorizedError(username)
|
||||
|
||||
user.password = proposed_password
|
||||
if user.status == UserStatus["FORCE_CHANGE_PASSWORD"]:
|
||||
user.status = UserStatus["CONFIRMED"]
|
||||
|
||||
break
|
||||
else:
|
||||
raise NotAuthorizedError(access_token)
|
||||
|
||||
def admin_update_user_attributes(self, user_pool_id, username, attributes):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if username not in user_pool.users:
|
||||
raise UserNotFoundError(username)
|
||||
|
||||
user = user_pool.users[username]
|
||||
user.update_attributes(attributes)
|
||||
|
||||
|
||||
cognitoidp_backends = {}
|
||||
for region in boto.cognito.identity.regions():
|
||||
cognitoidp_backends[region.name] = CognitoIdpBackend(region.name)
|
||||
|
||||
|
||||
# Hack to help moto-server process requests on localhost, where the region isn't
|
||||
# specified in the host header. Some endpoints (change password, confirm forgot
|
||||
# password) have no authorization header from which to extract the region.
|
||||
def find_region_by_value(key, value):
|
||||
for region in cognitoidp_backends:
|
||||
backend = cognitoidp_backends[region]
|
||||
for user_pool in backend.user_pools.values():
|
||||
if key == "client_id" and value in user_pool.clients:
|
||||
return region
|
||||
|
||||
if key == "access_token" and value in user_pool.access_tokens:
|
||||
return region
|
||||
|
||||
return cognitoidp_backends.keys()[0]
|
||||
9
moto/cognitoidp/resources/jwks-private.json
Normal file
9
moto/cognitoidp/resources/jwks-private.json
Normal file
@ -0,0 +1,9 @@
|
||||
{
|
||||
"alg": "RS256",
|
||||
"d": "DrrLT2qMERN0Id-bNglOe6SVkUNF3MTIzrH-TVkMZhsHk8kyqiqt-8JbLQMh2gOgTIjpu93b2_UREGA0BGdWs34hv0v7Gx8uIngCY6e6XO8LDemOo-2VHZHl5Ew-lrRYhwq12c_c4mfavAdMzXHODrpXSnqLnbFK88S-3fu6Da4czc4Svo4v8MkGZk_fcTml3Y1jIFHxbbTWka37j4NLpAzdfvX--J086m-LbZ8CJL_lGMKbAKsWURMmzCFL9ZFH9JdzX79KeDOH0GrzGwS_cOsZHsCamF_CWrtG4asPt-SHyn_k0X4JJJgAWVA674VCqorMAPDVYIzKJOUMImmsEQ",
|
||||
"e": "AQAB",
|
||||
"kid": "dummy",
|
||||
"kty": "RSA",
|
||||
"n": "j1pT3xKbswmMySvCefmiD3mfDaRFpZ9Y3Jl4fF0hMaCRVAt_e0yR7BeueDfqmj_NhVSO0WB5ao5e8V-9RFQOtK8SrqKl3i01-CyWYPICwybaGKhbJJR0S_6cZ8n5kscF1MjpIlsJcCzm-yKgTc3Mxk6KtrLoNgRvMwGLeHUXPkhS9YHfDKRe864iMFOK4df69brIYEICG2VLduh0hXYa0i-J3drwm7vxNdX7pVpCDu34qJtYoWq6CXt3Tzfi3YfWp8cFjGNbaDa3WnCd2IXpp0TFsFS-cEsw5rJjSl5OllJGeZKBtLeyVTy9PYwnk7MW43WSYeYstbk9NluX4H8Iuw",
|
||||
"use": "sig"
|
||||
}
|
||||
12
moto/cognitoidp/resources/jwks-public.json
Normal file
12
moto/cognitoidp/resources/jwks-public.json
Normal file
@ -0,0 +1,12 @@
|
||||
{
|
||||
"keys": [
|
||||
{
|
||||
"alg": "RS256",
|
||||
"e": "AQAB",
|
||||
"kid": "dummy",
|
||||
"kty": "RSA",
|
||||
"n": "j1pT3xKbswmMySvCefmiD3mfDaRFpZ9Y3Jl4fF0hMaCRVAt_e0yR7BeueDfqmj_NhVSO0WB5ao5e8V-9RFQOtK8SrqKl3i01-CyWYPICwybaGKhbJJR0S_6cZ8n5kscF1MjpIlsJcCzm-yKgTc3Mxk6KtrLoNgRvMwGLeHUXPkhS9YHfDKRe864iMFOK4df69brIYEICG2VLduh0hXYa0i-J3drwm7vxNdX7pVpCDu34qJtYoWq6CXt3Tzfi3YfWp8cFjGNbaDa3WnCd2IXpp0TFsFS-cEsw5rJjSl5OllJGeZKBtLeyVTy9PYwnk7MW43WSYeYstbk9NluX4H8Iuw",
|
||||
"use": "sig"
|
||||
}
|
||||
]
|
||||
}
|
||||
387
moto/cognitoidp/responses.py
Normal file
387
moto/cognitoidp/responses.py
Normal file
@ -0,0 +1,387 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import cognitoidp_backends, find_region_by_value
|
||||
|
||||
|
||||
class CognitoIdpResponse(BaseResponse):
|
||||
|
||||
@property
|
||||
def parameters(self):
|
||||
return json.loads(self.body)
|
||||
|
||||
# User pool
|
||||
def create_user_pool(self):
|
||||
name = self.parameters.pop("PoolName")
|
||||
user_pool = cognitoidp_backends[self.region].create_user_pool(name, self.parameters)
|
||||
return json.dumps({
|
||||
"UserPool": user_pool.to_json(extended=True)
|
||||
})
|
||||
|
||||
def list_user_pools(self):
|
||||
max_results = self._get_param("MaxResults")
|
||||
next_token = self._get_param("NextToken", "0")
|
||||
user_pools, next_token = cognitoidp_backends[self.region].list_user_pools(
|
||||
max_results=max_results, next_token=next_token
|
||||
)
|
||||
response = {
|
||||
"UserPools": [user_pool.to_json() for user_pool in user_pools],
|
||||
}
|
||||
if next_token:
|
||||
response["NextToken"] = str(next_token)
|
||||
return json.dumps(response)
|
||||
|
||||
def describe_user_pool(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
user_pool = cognitoidp_backends[self.region].describe_user_pool(user_pool_id)
|
||||
return json.dumps({
|
||||
"UserPool": user_pool.to_json(extended=True)
|
||||
})
|
||||
|
||||
def delete_user_pool(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
cognitoidp_backends[self.region].delete_user_pool(user_pool_id)
|
||||
return ""
|
||||
|
||||
# User pool domain
|
||||
def create_user_pool_domain(self):
|
||||
domain = self._get_param("Domain")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
custom_domain_config = self._get_param("CustomDomainConfig")
|
||||
user_pool_domain = cognitoidp_backends[self.region].create_user_pool_domain(
|
||||
user_pool_id, domain, custom_domain_config
|
||||
)
|
||||
domain_description = user_pool_domain.to_json(extended=False)
|
||||
if domain_description:
|
||||
return json.dumps(domain_description)
|
||||
return ""
|
||||
|
||||
def describe_user_pool_domain(self):
|
||||
domain = self._get_param("Domain")
|
||||
user_pool_domain = cognitoidp_backends[self.region].describe_user_pool_domain(domain)
|
||||
domain_description = {}
|
||||
if user_pool_domain:
|
||||
domain_description = user_pool_domain.to_json()
|
||||
|
||||
return json.dumps({
|
||||
"DomainDescription": domain_description
|
||||
})
|
||||
|
||||
def delete_user_pool_domain(self):
|
||||
domain = self._get_param("Domain")
|
||||
cognitoidp_backends[self.region].delete_user_pool_domain(domain)
|
||||
return ""
|
||||
|
||||
def update_user_pool_domain(self):
|
||||
domain = self._get_param("Domain")
|
||||
custom_domain_config = self._get_param("CustomDomainConfig")
|
||||
user_pool_domain = cognitoidp_backends[self.region].update_user_pool_domain(
|
||||
domain, custom_domain_config
|
||||
)
|
||||
domain_description = user_pool_domain.to_json(extended=False)
|
||||
if domain_description:
|
||||
return json.dumps(domain_description)
|
||||
return ""
|
||||
|
||||
# User pool client
|
||||
def create_user_pool_client(self):
|
||||
user_pool_id = self.parameters.pop("UserPoolId")
|
||||
user_pool_client = cognitoidp_backends[self.region].create_user_pool_client(user_pool_id, self.parameters)
|
||||
return json.dumps({
|
||||
"UserPoolClient": user_pool_client.to_json(extended=True)
|
||||
})
|
||||
|
||||
def list_user_pool_clients(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
max_results = self._get_param("MaxResults")
|
||||
next_token = self._get_param("NextToken", "0")
|
||||
user_pool_clients, next_token = cognitoidp_backends[self.region].list_user_pool_clients(user_pool_id,
|
||||
max_results=max_results, next_token=next_token)
|
||||
response = {
|
||||
"UserPoolClients": [user_pool_client.to_json() for user_pool_client in user_pool_clients]
|
||||
}
|
||||
if next_token:
|
||||
response["NextToken"] = str(next_token)
|
||||
return json.dumps(response)
|
||||
|
||||
def describe_user_pool_client(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
client_id = self._get_param("ClientId")
|
||||
user_pool_client = cognitoidp_backends[self.region].describe_user_pool_client(user_pool_id, client_id)
|
||||
return json.dumps({
|
||||
"UserPoolClient": user_pool_client.to_json(extended=True)
|
||||
})
|
||||
|
||||
def update_user_pool_client(self):
|
||||
user_pool_id = self.parameters.pop("UserPoolId")
|
||||
client_id = self.parameters.pop("ClientId")
|
||||
user_pool_client = cognitoidp_backends[self.region].update_user_pool_client(user_pool_id, client_id, self.parameters)
|
||||
return json.dumps({
|
||||
"UserPoolClient": user_pool_client.to_json(extended=True)
|
||||
})
|
||||
|
||||
def delete_user_pool_client(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
client_id = self._get_param("ClientId")
|
||||
cognitoidp_backends[self.region].delete_user_pool_client(user_pool_id, client_id)
|
||||
return ""
|
||||
|
||||
# Identity provider
|
||||
def create_identity_provider(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
name = self.parameters.pop("ProviderName")
|
||||
identity_provider = cognitoidp_backends[self.region].create_identity_provider(user_pool_id, name, self.parameters)
|
||||
return json.dumps({
|
||||
"IdentityProvider": identity_provider.to_json(extended=True)
|
||||
})
|
||||
|
||||
def list_identity_providers(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
max_results = self._get_param("MaxResults")
|
||||
next_token = self._get_param("NextToken", "0")
|
||||
identity_providers, next_token = cognitoidp_backends[self.region].list_identity_providers(
|
||||
user_pool_id, max_results=max_results, next_token=next_token
|
||||
)
|
||||
response = {
|
||||
"Providers": [identity_provider.to_json() for identity_provider in identity_providers]
|
||||
}
|
||||
if next_token:
|
||||
response["NextToken"] = str(next_token)
|
||||
return json.dumps(response)
|
||||
|
||||
def describe_identity_provider(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
name = self._get_param("ProviderName")
|
||||
identity_provider = cognitoidp_backends[self.region].describe_identity_provider(user_pool_id, name)
|
||||
return json.dumps({
|
||||
"IdentityProvider": identity_provider.to_json(extended=True)
|
||||
})
|
||||
|
||||
def update_identity_provider(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
name = self._get_param("ProviderName")
|
||||
identity_provider = cognitoidp_backends[self.region].update_identity_provider(user_pool_id, name, self.parameters)
|
||||
return json.dumps({
|
||||
"IdentityProvider": identity_provider.to_json(extended=True)
|
||||
})
|
||||
|
||||
def delete_identity_provider(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
name = self._get_param("ProviderName")
|
||||
cognitoidp_backends[self.region].delete_identity_provider(user_pool_id, name)
|
||||
return ""
|
||||
|
||||
# Group
|
||||
def create_group(self):
|
||||
group_name = self._get_param("GroupName")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
description = self._get_param("Description")
|
||||
role_arn = self._get_param("RoleArn")
|
||||
precedence = self._get_param("Precedence")
|
||||
|
||||
group = cognitoidp_backends[self.region].create_group(
|
||||
user_pool_id,
|
||||
group_name,
|
||||
description,
|
||||
role_arn,
|
||||
precedence,
|
||||
)
|
||||
|
||||
return json.dumps({
|
||||
"Group": group.to_json(),
|
||||
})
|
||||
|
||||
def get_group(self):
|
||||
group_name = self._get_param("GroupName")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
group = cognitoidp_backends[self.region].get_group(user_pool_id, group_name)
|
||||
return json.dumps({
|
||||
"Group": group.to_json(),
|
||||
})
|
||||
|
||||
def list_groups(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
groups = cognitoidp_backends[self.region].list_groups(user_pool_id)
|
||||
return json.dumps({
|
||||
"Groups": [group.to_json() for group in groups],
|
||||
})
|
||||
|
||||
def delete_group(self):
|
||||
group_name = self._get_param("GroupName")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
cognitoidp_backends[self.region].delete_group(user_pool_id, group_name)
|
||||
return ""
|
||||
|
||||
def admin_add_user_to_group(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
group_name = self._get_param("GroupName")
|
||||
|
||||
cognitoidp_backends[self.region].admin_add_user_to_group(
|
||||
user_pool_id,
|
||||
group_name,
|
||||
username,
|
||||
)
|
||||
|
||||
return ""
|
||||
|
||||
def list_users_in_group(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
group_name = self._get_param("GroupName")
|
||||
users = cognitoidp_backends[self.region].list_users_in_group(user_pool_id, group_name)
|
||||
return json.dumps({
|
||||
"Users": [user.to_json(extended=True) for user in users],
|
||||
})
|
||||
|
||||
def admin_list_groups_for_user(self):
|
||||
username = self._get_param("Username")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
groups = cognitoidp_backends[self.region].admin_list_groups_for_user(user_pool_id, username)
|
||||
return json.dumps({
|
||||
"Groups": [group.to_json() for group in groups],
|
||||
})
|
||||
|
||||
def admin_remove_user_from_group(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
group_name = self._get_param("GroupName")
|
||||
|
||||
cognitoidp_backends[self.region].admin_remove_user_from_group(
|
||||
user_pool_id,
|
||||
group_name,
|
||||
username,
|
||||
)
|
||||
|
||||
return ""
|
||||
|
||||
# User
|
||||
def admin_create_user(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
temporary_password = self._get_param("TemporaryPassword")
|
||||
user = cognitoidp_backends[self.region].admin_create_user(
|
||||
user_pool_id,
|
||||
username,
|
||||
temporary_password,
|
||||
self._get_param("UserAttributes", [])
|
||||
)
|
||||
|
||||
return json.dumps({
|
||||
"User": user.to_json(extended=True)
|
||||
})
|
||||
|
||||
def admin_get_user(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
user = cognitoidp_backends[self.region].admin_get_user(user_pool_id, username)
|
||||
return json.dumps(
|
||||
user.to_json(extended=True, attributes_key="UserAttributes")
|
||||
)
|
||||
|
||||
def list_users(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
limit = self._get_param("Limit")
|
||||
token = self._get_param("PaginationToken")
|
||||
users, token = cognitoidp_backends[self.region].list_users(user_pool_id,
|
||||
limit=limit,
|
||||
pagination_token=token)
|
||||
response = {"Users": [user.to_json(extended=True) for user in users]}
|
||||
if token:
|
||||
response["PaginationToken"] = str(token)
|
||||
return json.dumps(response)
|
||||
|
||||
def admin_disable_user(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
cognitoidp_backends[self.region].admin_disable_user(user_pool_id, username)
|
||||
return ""
|
||||
|
||||
def admin_enable_user(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
cognitoidp_backends[self.region].admin_enable_user(user_pool_id, username)
|
||||
return ""
|
||||
|
||||
def admin_delete_user(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
cognitoidp_backends[self.region].admin_delete_user(user_pool_id, username)
|
||||
return ""
|
||||
|
||||
def admin_initiate_auth(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
client_id = self._get_param("ClientId")
|
||||
auth_flow = self._get_param("AuthFlow")
|
||||
auth_parameters = self._get_param("AuthParameters")
|
||||
|
||||
auth_result = cognitoidp_backends[self.region].admin_initiate_auth(
|
||||
user_pool_id,
|
||||
client_id,
|
||||
auth_flow,
|
||||
auth_parameters,
|
||||
)
|
||||
|
||||
return json.dumps(auth_result)
|
||||
|
||||
def respond_to_auth_challenge(self):
|
||||
session = self._get_param("Session")
|
||||
client_id = self._get_param("ClientId")
|
||||
challenge_name = self._get_param("ChallengeName")
|
||||
challenge_responses = self._get_param("ChallengeResponses")
|
||||
auth_result = cognitoidp_backends[self.region].respond_to_auth_challenge(
|
||||
session,
|
||||
client_id,
|
||||
challenge_name,
|
||||
challenge_responses,
|
||||
)
|
||||
|
||||
return json.dumps(auth_result)
|
||||
|
||||
def forgot_password(self):
|
||||
return json.dumps({
|
||||
"CodeDeliveryDetails": {
|
||||
"DeliveryMedium": "EMAIL",
|
||||
"Destination": "...",
|
||||
}
|
||||
})
|
||||
|
||||
# This endpoint receives no authorization header, so if moto-server is listening
|
||||
# on localhost (doesn't get a region in the host header), it doesn't know what
|
||||
# region's backend should handle the traffic, and we use `find_region_by_value` to
|
||||
# solve that problem.
|
||||
def confirm_forgot_password(self):
|
||||
client_id = self._get_param("ClientId")
|
||||
username = self._get_param("Username")
|
||||
password = self._get_param("Password")
|
||||
region = find_region_by_value("client_id", client_id)
|
||||
cognitoidp_backends[region].confirm_forgot_password(client_id, username, password)
|
||||
return ""
|
||||
|
||||
# Ditto the comment on confirm_forgot_password.
|
||||
def change_password(self):
|
||||
access_token = self._get_param("AccessToken")
|
||||
previous_password = self._get_param("PreviousPassword")
|
||||
proposed_password = self._get_param("ProposedPassword")
|
||||
region = find_region_by_value("access_token", access_token)
|
||||
cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password)
|
||||
return ""
|
||||
|
||||
def admin_update_user_attributes(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
attributes = self._get_param("UserAttributes")
|
||||
cognitoidp_backends[self.region].admin_update_user_attributes(user_pool_id, username, attributes)
|
||||
return ""
|
||||
|
||||
|
||||
class CognitoIdpJsonWebKeyResponse(BaseResponse):
|
||||
|
||||
def __init__(self):
|
||||
with open(os.path.join(os.path.dirname(__file__), "resources/jwks-public.json")) as f:
|
||||
self.json_web_key = f.read()
|
||||
|
||||
def serve_json_web_key(self, request, full_url, headers):
|
||||
return 200, {"Content-Type": "application/json"}, self.json_web_key
|
||||
11
moto/cognitoidp/urls.py
Normal file
11
moto/cognitoidp/urls.py
Normal file
@ -0,0 +1,11 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import CognitoIdpResponse, CognitoIdpJsonWebKeyResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://cognito-idp.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
'{0}/$': CognitoIdpResponse.dispatch,
|
||||
'{0}/<user_pool_id>/.well-known/jwks.json$': CognitoIdpJsonWebKeyResponse().serve_json_web_key,
|
||||
}
|
||||
4
moto/config/__init__.py
Normal file
4
moto/config/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from .models import config_backends
|
||||
from ..core.models import base_decorator
|
||||
|
||||
mock_config = base_decorator(config_backends)
|
||||
149
moto/config/exceptions.py
Normal file
149
moto/config/exceptions.py
Normal file
@ -0,0 +1,149 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
|
||||
|
||||
class NameTooLongException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name, location):
|
||||
message = '1 validation error detected: Value \'{name}\' at \'{location}\' failed to satisfy' \
|
||||
' constraint: Member must have length less than or equal to 256'.format(name=name, location=location)
|
||||
super(NameTooLongException, self).__init__("ValidationException", message)
|
||||
|
||||
|
||||
class InvalidConfigurationRecorderNameException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'The configuration recorder name \'{name}\' is not valid, blank string.'.format(name=name)
|
||||
super(InvalidConfigurationRecorderNameException, self).__init__("InvalidConfigurationRecorderNameException",
|
||||
message)
|
||||
|
||||
|
||||
class MaxNumberOfConfigurationRecordersExceededException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to put configuration recorder \'{name}\' because the maximum number of ' \
|
||||
'configuration recorders: 1 is reached.'.format(name=name)
|
||||
super(MaxNumberOfConfigurationRecordersExceededException, self).__init__(
|
||||
"MaxNumberOfConfigurationRecordersExceededException", message)
|
||||
|
||||
|
||||
class InvalidRecordingGroupException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The recording group provided is not valid'
|
||||
super(InvalidRecordingGroupException, self).__init__("InvalidRecordingGroupException", message)
|
||||
|
||||
|
||||
class InvalidResourceTypeException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, bad_list, good_list):
|
||||
message = '{num} validation error detected: Value \'{bad_list}\' at ' \
|
||||
'\'configurationRecorder.recordingGroup.resourceTypes\' failed to satisfy constraint: ' \
|
||||
'Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]'.format(
|
||||
num=len(bad_list), bad_list=bad_list, good_list=good_list)
|
||||
# For PY2:
|
||||
message = str(message)
|
||||
|
||||
super(InvalidResourceTypeException, self).__init__("ValidationException", message)
|
||||
|
||||
|
||||
class NoSuchConfigurationRecorderException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Cannot find configuration recorder with the specified name \'{name}\'.'.format(name=name)
|
||||
super(NoSuchConfigurationRecorderException, self).__init__("NoSuchConfigurationRecorderException", message)
|
||||
|
||||
|
||||
class InvalidDeliveryChannelNameException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'The delivery channel name \'{name}\' is not valid, blank string.'.format(name=name)
|
||||
super(InvalidDeliveryChannelNameException, self).__init__("InvalidDeliveryChannelNameException",
|
||||
message)
|
||||
|
||||
|
||||
class NoSuchBucketException(JsonRESTError):
|
||||
"""We are *only* validating that there is value that is not '' here."""
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Cannot find a S3 bucket with an empty bucket name.'
|
||||
super(NoSuchBucketException, self).__init__("NoSuchBucketException", message)
|
||||
|
||||
|
||||
class InvalidS3KeyPrefixException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The s3 key prefix \'\' is not valid, empty s3 key prefix.'
|
||||
super(InvalidS3KeyPrefixException, self).__init__("InvalidS3KeyPrefixException", message)
|
||||
|
||||
|
||||
class InvalidSNSTopicARNException(JsonRESTError):
|
||||
"""We are *only* validating that there is value that is not '' here."""
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The sns topic arn \'\' is not valid.'
|
||||
super(InvalidSNSTopicARNException, self).__init__("InvalidSNSTopicARNException", message)
|
||||
|
||||
|
||||
class InvalidDeliveryFrequency(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, value, good_list):
|
||||
message = '1 validation error detected: Value \'{value}\' at ' \
|
||||
'\'deliveryChannel.configSnapshotDeliveryProperties.deliveryFrequency\' failed to satisfy ' \
|
||||
'constraint: Member must satisfy enum value set: {good_list}'.format(value=value, good_list=good_list)
|
||||
super(InvalidDeliveryFrequency, self).__init__("InvalidDeliveryFrequency", message)
|
||||
|
||||
|
||||
class MaxNumberOfDeliveryChannelsExceededException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to put delivery channel \'{name}\' because the maximum number of ' \
|
||||
'delivery channels: 1 is reached.'.format(name=name)
|
||||
super(MaxNumberOfDeliveryChannelsExceededException, self).__init__(
|
||||
"MaxNumberOfDeliveryChannelsExceededException", message)
|
||||
|
||||
|
||||
class NoSuchDeliveryChannelException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Cannot find delivery channel with specified name \'{name}\'.'.format(name=name)
|
||||
super(NoSuchDeliveryChannelException, self).__init__("NoSuchDeliveryChannelException", message)
|
||||
|
||||
|
||||
class NoAvailableConfigurationRecorderException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Configuration recorder is not available to put delivery channel.'
|
||||
super(NoAvailableConfigurationRecorderException, self).__init__("NoAvailableConfigurationRecorderException",
|
||||
message)
|
||||
|
||||
|
||||
class NoAvailableDeliveryChannelException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Delivery channel is not available to start configuration recorder.'
|
||||
super(NoAvailableDeliveryChannelException, self).__init__("NoAvailableDeliveryChannelException", message)
|
||||
|
||||
|
||||
class LastDeliveryChannelDeleteFailedException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to delete last specified delivery channel with name \'{name}\', because there, ' \
|
||||
'because there is a running configuration recorder.'.format(name=name)
|
||||
super(LastDeliveryChannelDeleteFailedException, self).__init__("LastDeliveryChannelDeleteFailedException", message)
|
||||
335
moto/config/models.py
Normal file
335
moto/config/models.py
Normal file
@ -0,0 +1,335 @@
|
||||
import json
|
||||
import time
|
||||
import pkg_resources
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from boto3 import Session
|
||||
|
||||
from moto.config.exceptions import InvalidResourceTypeException, InvalidDeliveryFrequency, \
|
||||
InvalidConfigurationRecorderNameException, NameTooLongException, \
|
||||
MaxNumberOfConfigurationRecordersExceededException, InvalidRecordingGroupException, \
|
||||
NoSuchConfigurationRecorderException, NoAvailableConfigurationRecorderException, \
|
||||
InvalidDeliveryChannelNameException, NoSuchBucketException, InvalidS3KeyPrefixException, \
|
||||
InvalidSNSTopicARNException, MaxNumberOfDeliveryChannelsExceededException, NoAvailableDeliveryChannelException, \
|
||||
NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException
|
||||
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
|
||||
DEFAULT_ACCOUNT_ID = 123456789012
|
||||
|
||||
|
||||
def datetime2int(date):
|
||||
return int(time.mktime(date.timetuple()))
|
||||
|
||||
|
||||
def snake_to_camels(original):
|
||||
parts = original.split('_')
|
||||
|
||||
camel_cased = parts[0].lower() + ''.join(p.title() for p in parts[1:])
|
||||
camel_cased = camel_cased.replace('Arn', 'ARN') # Config uses 'ARN' instead of 'Arn'
|
||||
|
||||
return camel_cased
|
||||
|
||||
|
||||
class ConfigEmptyDictable(BaseModel):
|
||||
"""Base class to make serialization easy. This assumes that the sub-class will NOT return 'None's in the JSON."""
|
||||
|
||||
def to_dict(self):
|
||||
data = {}
|
||||
for item, value in self.__dict__.items():
|
||||
if value is not None:
|
||||
if isinstance(value, ConfigEmptyDictable):
|
||||
data[snake_to_camels(item)] = value.to_dict()
|
||||
else:
|
||||
data[snake_to_camels(item)] = value
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class ConfigRecorderStatus(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
self.recording = False
|
||||
self.last_start_time = None
|
||||
self.last_stop_time = None
|
||||
self.last_status = None
|
||||
self.last_error_code = None
|
||||
self.last_error_message = None
|
||||
self.last_status_change_time = None
|
||||
|
||||
def start(self):
|
||||
self.recording = True
|
||||
self.last_status = 'PENDING'
|
||||
self.last_start_time = datetime2int(datetime.utcnow())
|
||||
self.last_status_change_time = datetime2int(datetime.utcnow())
|
||||
|
||||
def stop(self):
|
||||
self.recording = False
|
||||
self.last_stop_time = datetime2int(datetime.utcnow())
|
||||
self.last_status_change_time = datetime2int(datetime.utcnow())
|
||||
|
||||
|
||||
class ConfigDeliverySnapshotProperties(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, delivery_frequency):
|
||||
self.delivery_frequency = delivery_frequency
|
||||
|
||||
|
||||
class ConfigDeliveryChannel(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, name, s3_bucket_name, prefix=None, sns_arn=None, snapshot_properties=None):
|
||||
self.name = name
|
||||
self.s3_bucket_name = s3_bucket_name
|
||||
self.s3_key_prefix = prefix
|
||||
self.sns_topic_arn = sns_arn
|
||||
self.config_snapshot_delivery_properties = snapshot_properties
|
||||
|
||||
|
||||
class RecordingGroup(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, all_supported=True, include_global_resource_types=False, resource_types=None):
|
||||
self.all_supported = all_supported
|
||||
self.include_global_resource_types = include_global_resource_types
|
||||
self.resource_types = resource_types
|
||||
|
||||
|
||||
class ConfigRecorder(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, role_arn, recording_group, name='default', status=None):
|
||||
self.name = name
|
||||
self.role_arn = role_arn
|
||||
self.recording_group = recording_group
|
||||
|
||||
if not status:
|
||||
self.status = ConfigRecorderStatus(name)
|
||||
else:
|
||||
self.status = status
|
||||
|
||||
|
||||
class ConfigBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
self.recorders = {}
|
||||
self.delivery_channels = {}
|
||||
|
||||
@staticmethod
|
||||
def _validate_resource_types(resource_list):
|
||||
# Load the service file:
|
||||
resource_package = 'botocore'
|
||||
resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json'))
|
||||
conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path))
|
||||
|
||||
# Verify that each entry exists in the supported list:
|
||||
bad_list = []
|
||||
for resource in resource_list:
|
||||
# For PY2:
|
||||
r_str = str(resource)
|
||||
|
||||
if r_str not in conifg_schema['shapes']['ResourceType']['enum']:
|
||||
bad_list.append(r_str)
|
||||
|
||||
if bad_list:
|
||||
raise InvalidResourceTypeException(bad_list, conifg_schema['shapes']['ResourceType']['enum'])
|
||||
|
||||
@staticmethod
|
||||
def _validate_delivery_snapshot_properties(properties):
|
||||
# Load the service file:
|
||||
resource_package = 'botocore'
|
||||
resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json'))
|
||||
conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path))
|
||||
|
||||
# Verify that the deliveryFrequency is set to an acceptable value:
|
||||
if properties.get('deliveryFrequency', None) not in \
|
||||
conifg_schema['shapes']['MaximumExecutionFrequency']['enum']:
|
||||
raise InvalidDeliveryFrequency(properties.get('deliveryFrequency', None),
|
||||
conifg_schema['shapes']['MaximumExecutionFrequency']['enum'])
|
||||
|
||||
def put_configuration_recorder(self, config_recorder):
|
||||
# Validate the name:
|
||||
if not config_recorder.get('name'):
|
||||
raise InvalidConfigurationRecorderNameException(config_recorder.get('name'))
|
||||
if len(config_recorder.get('name')) > 256:
|
||||
raise NameTooLongException(config_recorder.get('name'), 'configurationRecorder.name')
|
||||
|
||||
# We're going to assume that the passed in Role ARN is correct.
|
||||
|
||||
# Config currently only allows 1 configuration recorder for an account:
|
||||
if len(self.recorders) == 1 and not self.recorders.get(config_recorder['name']):
|
||||
raise MaxNumberOfConfigurationRecordersExceededException(config_recorder['name'])
|
||||
|
||||
# Is this updating an existing one?
|
||||
recorder_status = None
|
||||
if self.recorders.get(config_recorder['name']):
|
||||
recorder_status = self.recorders[config_recorder['name']].status
|
||||
|
||||
# Validate the Recording Group:
|
||||
if config_recorder.get('recordingGroup') is None:
|
||||
recording_group = RecordingGroup()
|
||||
else:
|
||||
rg = config_recorder['recordingGroup']
|
||||
|
||||
# If an empty dict is passed in, then bad:
|
||||
if not rg:
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Can't have both the resource types specified and the other flags as True.
|
||||
if rg.get('resourceTypes') and (
|
||||
rg.get('allSupported', False) or
|
||||
rg.get('includeGlobalResourceTypes', False)):
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Must supply resourceTypes if 'allSupported' is not supplied:
|
||||
if not rg.get('allSupported') and not rg.get('resourceTypes'):
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Validate that the list provided is correct:
|
||||
self._validate_resource_types(rg.get('resourceTypes', []))
|
||||
|
||||
recording_group = RecordingGroup(
|
||||
all_supported=rg.get('allSupported', True),
|
||||
include_global_resource_types=rg.get('includeGlobalResourceTypes', False),
|
||||
resource_types=rg.get('resourceTypes', [])
|
||||
)
|
||||
|
||||
self.recorders[config_recorder['name']] = \
|
||||
ConfigRecorder(config_recorder['roleARN'], recording_group, name=config_recorder['name'],
|
||||
status=recorder_status)
|
||||
|
||||
def describe_configuration_recorders(self, recorder_names):
|
||||
recorders = []
|
||||
|
||||
if recorder_names:
|
||||
for rn in recorder_names:
|
||||
if not self.recorders.get(rn):
|
||||
raise NoSuchConfigurationRecorderException(rn)
|
||||
|
||||
# Format the recorder:
|
||||
recorders.append(self.recorders[rn].to_dict())
|
||||
|
||||
else:
|
||||
for recorder in self.recorders.values():
|
||||
recorders.append(recorder.to_dict())
|
||||
|
||||
return recorders
|
||||
|
||||
def describe_configuration_recorder_status(self, recorder_names):
|
||||
recorders = []
|
||||
|
||||
if recorder_names:
|
||||
for rn in recorder_names:
|
||||
if not self.recorders.get(rn):
|
||||
raise NoSuchConfigurationRecorderException(rn)
|
||||
|
||||
# Format the recorder:
|
||||
recorders.append(self.recorders[rn].status.to_dict())
|
||||
|
||||
else:
|
||||
for recorder in self.recorders.values():
|
||||
recorders.append(recorder.status.to_dict())
|
||||
|
||||
return recorders
|
||||
|
||||
def put_delivery_channel(self, delivery_channel):
|
||||
# Must have a configuration recorder:
|
||||
if not self.recorders:
|
||||
raise NoAvailableConfigurationRecorderException()
|
||||
|
||||
# Validate the name:
|
||||
if not delivery_channel.get('name'):
|
||||
raise InvalidDeliveryChannelNameException(delivery_channel.get('name'))
|
||||
if len(delivery_channel.get('name')) > 256:
|
||||
raise NameTooLongException(delivery_channel.get('name'), 'deliveryChannel.name')
|
||||
|
||||
# We are going to assume that the bucket exists -- but will verify if the bucket provided is blank:
|
||||
if not delivery_channel.get('s3BucketName'):
|
||||
raise NoSuchBucketException()
|
||||
|
||||
# We are going to assume that the bucket has the correct policy attached to it. We are only going to verify
|
||||
# if the prefix provided is not an empty string:
|
||||
if delivery_channel.get('s3KeyPrefix', None) == '':
|
||||
raise InvalidS3KeyPrefixException()
|
||||
|
||||
# Ditto for SNS -- Only going to assume that the ARN provided is not an empty string:
|
||||
if delivery_channel.get('snsTopicARN', None) == '':
|
||||
raise InvalidSNSTopicARNException()
|
||||
|
||||
# Config currently only allows 1 delivery channel for an account:
|
||||
if len(self.delivery_channels) == 1 and not self.delivery_channels.get(delivery_channel['name']):
|
||||
raise MaxNumberOfDeliveryChannelsExceededException(delivery_channel['name'])
|
||||
|
||||
if not delivery_channel.get('configSnapshotDeliveryProperties'):
|
||||
dp = None
|
||||
|
||||
else:
|
||||
# Validate the config snapshot delivery properties:
|
||||
self._validate_delivery_snapshot_properties(delivery_channel['configSnapshotDeliveryProperties'])
|
||||
|
||||
dp = ConfigDeliverySnapshotProperties(
|
||||
delivery_channel['configSnapshotDeliveryProperties']['deliveryFrequency'])
|
||||
|
||||
self.delivery_channels[delivery_channel['name']] = \
|
||||
ConfigDeliveryChannel(delivery_channel['name'], delivery_channel['s3BucketName'],
|
||||
prefix=delivery_channel.get('s3KeyPrefix', None),
|
||||
sns_arn=delivery_channel.get('snsTopicARN', None),
|
||||
snapshot_properties=dp)
|
||||
|
||||
def describe_delivery_channels(self, channel_names):
|
||||
channels = []
|
||||
|
||||
if channel_names:
|
||||
for cn in channel_names:
|
||||
if not self.delivery_channels.get(cn):
|
||||
raise NoSuchDeliveryChannelException(cn)
|
||||
|
||||
# Format the delivery channel:
|
||||
channels.append(self.delivery_channels[cn].to_dict())
|
||||
|
||||
else:
|
||||
for channel in self.delivery_channels.values():
|
||||
channels.append(channel.to_dict())
|
||||
|
||||
return channels
|
||||
|
||||
def start_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
# Must have a delivery channel available as well:
|
||||
if not self.delivery_channels:
|
||||
raise NoAvailableDeliveryChannelException()
|
||||
|
||||
# Start recording:
|
||||
self.recorders[recorder_name].status.start()
|
||||
|
||||
def stop_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
# Stop recording:
|
||||
self.recorders[recorder_name].status.stop()
|
||||
|
||||
def delete_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
del self.recorders[recorder_name]
|
||||
|
||||
def delete_delivery_channel(self, channel_name):
|
||||
if not self.delivery_channels.get(channel_name):
|
||||
raise NoSuchDeliveryChannelException(channel_name)
|
||||
|
||||
# Check if a channel is recording -- if so, bad -- (there can only be 1 recorder):
|
||||
for recorder in self.recorders.values():
|
||||
if recorder.status.recording:
|
||||
raise LastDeliveryChannelDeleteFailedException(channel_name)
|
||||
|
||||
del self.delivery_channels[channel_name]
|
||||
|
||||
|
||||
config_backends = {}
|
||||
boto3_session = Session()
|
||||
for region in boto3_session.get_available_regions('config'):
|
||||
config_backends[region] = ConfigBackend()
|
||||
53
moto/config/responses.py
Normal file
53
moto/config/responses.py
Normal file
@ -0,0 +1,53 @@
|
||||
import json
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import config_backends
|
||||
|
||||
|
||||
class ConfigResponse(BaseResponse):
|
||||
|
||||
@property
|
||||
def config_backend(self):
|
||||
return config_backends[self.region]
|
||||
|
||||
def put_configuration_recorder(self):
|
||||
self.config_backend.put_configuration_recorder(self._get_param('ConfigurationRecorder'))
|
||||
return ""
|
||||
|
||||
def describe_configuration_recorders(self):
|
||||
recorders = self.config_backend.describe_configuration_recorders(self._get_param('ConfigurationRecorderNames'))
|
||||
schema = {'ConfigurationRecorders': recorders}
|
||||
return json.dumps(schema)
|
||||
|
||||
def describe_configuration_recorder_status(self):
|
||||
recorder_statuses = self.config_backend.describe_configuration_recorder_status(
|
||||
self._get_param('ConfigurationRecorderNames'))
|
||||
schema = {'ConfigurationRecordersStatus': recorder_statuses}
|
||||
return json.dumps(schema)
|
||||
|
||||
def put_delivery_channel(self):
|
||||
self.config_backend.put_delivery_channel(self._get_param('DeliveryChannel'))
|
||||
return ""
|
||||
|
||||
def describe_delivery_channels(self):
|
||||
delivery_channels = self.config_backend.describe_delivery_channels(self._get_param('DeliveryChannelNames'))
|
||||
schema = {'DeliveryChannels': delivery_channels}
|
||||
return json.dumps(schema)
|
||||
|
||||
def describe_delivery_channel_status(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_delivery_channel(self):
|
||||
self.config_backend.delete_delivery_channel(self._get_param('DeliveryChannelName'))
|
||||
return ""
|
||||
|
||||
def delete_configuration_recorder(self):
|
||||
self.config_backend.delete_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
||||
|
||||
def start_configuration_recorder(self):
|
||||
self.config_backend.start_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
||||
|
||||
def stop_configuration_recorder(self):
|
||||
self.config_backend.stop_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
||||
10
moto/config/urls.py
Normal file
10
moto/config/urls.py
Normal file
@ -0,0 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import ConfigResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://config.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
'{0}/$': ConfigResponse.dispatch,
|
||||
}
|
||||
@ -2,14 +2,19 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import defaultdict
|
||||
import functools
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
import six
|
||||
from io import BytesIO
|
||||
from collections import defaultdict
|
||||
from botocore.handlers import BUILTIN_HANDLERS
|
||||
from botocore.awsrequest import AWSResponse
|
||||
|
||||
import mock
|
||||
from moto import settings
|
||||
from moto.packages.responses import responses
|
||||
import responses
|
||||
from moto.packages.httpretty import HTTPretty
|
||||
from .utils import (
|
||||
convert_httpretty_response,
|
||||
@ -33,6 +38,10 @@ class BaseMockAWS(object):
|
||||
self.backends_for_urls.update(self.backends)
|
||||
self.backends_for_urls.update(default_backends)
|
||||
|
||||
# "Mock" the AWS credentials as they can't be mocked in Botocore currently
|
||||
FAKE_KEYS = {"AWS_ACCESS_KEY_ID": "foobar_key", "AWS_SECRET_ACCESS_KEY": "foobar_secret"}
|
||||
self.env_variables_mocks = mock.patch.dict(os.environ, FAKE_KEYS)
|
||||
|
||||
if self.__class__.nested_count == 0:
|
||||
self.reset()
|
||||
|
||||
@ -43,11 +52,14 @@ class BaseMockAWS(object):
|
||||
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.stop()
|
||||
|
||||
def start(self, reset=True):
|
||||
self.env_variables_mocks.start()
|
||||
|
||||
self.__class__.nested_count += 1
|
||||
if reset:
|
||||
for backend in self.backends.values():
|
||||
@ -56,6 +68,7 @@ class BaseMockAWS(object):
|
||||
self.enable_patching()
|
||||
|
||||
def stop(self):
|
||||
self.env_variables_mocks.stop()
|
||||
self.__class__.nested_count -= 1
|
||||
|
||||
if self.__class__.nested_count < 0:
|
||||
@ -89,6 +102,17 @@ class BaseMockAWS(object):
|
||||
if inspect.ismethod(attr_value) and attr_value.__self__ is klass:
|
||||
continue
|
||||
|
||||
# Check if this is a staticmethod. If so, skip patching
|
||||
for cls in inspect.getmro(klass):
|
||||
if attr_value.__name__ not in cls.__dict__:
|
||||
continue
|
||||
bound_attr_value = cls.__dict__[attr_value.__name__]
|
||||
if not isinstance(bound_attr_value, staticmethod):
|
||||
break
|
||||
else:
|
||||
# It is a staticmethod, skip patching
|
||||
continue
|
||||
|
||||
try:
|
||||
setattr(klass, attr, self(attr_value, reset=False))
|
||||
except TypeError:
|
||||
@ -124,34 +148,209 @@ RESPONSES_METHODS = [responses.GET, responses.DELETE, responses.HEAD,
|
||||
responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT]
|
||||
|
||||
|
||||
class ResponsesMockAWS(BaseMockAWS):
|
||||
class CallbackResponse(responses.CallbackResponse):
|
||||
'''
|
||||
Need to subclass so we can change a couple things
|
||||
'''
|
||||
def get_response(self, request):
|
||||
'''
|
||||
Need to override this so we can pass decode_content=False
|
||||
'''
|
||||
headers = self.get_headers()
|
||||
|
||||
result = self.callback(request)
|
||||
if isinstance(result, Exception):
|
||||
raise result
|
||||
|
||||
status, r_headers, body = result
|
||||
body = responses._handle_body(body)
|
||||
headers.update(r_headers)
|
||||
|
||||
return responses.HTTPResponse(
|
||||
status=status,
|
||||
reason=six.moves.http_client.responses.get(status),
|
||||
body=body,
|
||||
headers=headers,
|
||||
preload_content=False,
|
||||
# Need to not decode_content to mimic requests
|
||||
decode_content=False,
|
||||
)
|
||||
|
||||
def _url_matches(self, url, other, match_querystring=False):
|
||||
'''
|
||||
Need to override this so we can fix querystrings breaking regex matching
|
||||
'''
|
||||
if not match_querystring:
|
||||
other = other.split('?', 1)[0]
|
||||
|
||||
if responses._is_string(url):
|
||||
if responses._has_unicode(url):
|
||||
url = responses._clean_unicode(url)
|
||||
if not isinstance(other, six.text_type):
|
||||
other = other.encode('ascii').decode('utf8')
|
||||
return self._url_matches_strict(url, other)
|
||||
elif isinstance(url, responses.Pattern) and url.match(other):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
botocore_mock = responses.RequestsMock(assert_all_requests_are_fired=False, target='botocore.vendored.requests.adapters.HTTPAdapter.send')
|
||||
responses_mock = responses._default_mock
|
||||
|
||||
|
||||
class ResponsesMockAWS(BaseMockAWS):
|
||||
def reset(self):
|
||||
responses.reset()
|
||||
botocore_mock.reset()
|
||||
responses_mock.reset()
|
||||
|
||||
def enable_patching(self):
|
||||
responses.start()
|
||||
if not hasattr(botocore_mock, '_patcher') or not hasattr(botocore_mock._patcher, 'target'):
|
||||
# Check for unactivated patcher
|
||||
botocore_mock.start()
|
||||
|
||||
if not hasattr(responses_mock, '_patcher') or not hasattr(responses_mock._patcher, 'target'):
|
||||
responses_mock.start()
|
||||
|
||||
for method in RESPONSES_METHODS:
|
||||
for backend in self.backends_for_urls.values():
|
||||
for key, value in backend.urls.items():
|
||||
responses.add_callback(
|
||||
method=method,
|
||||
url=re.compile(key),
|
||||
callback=convert_flask_to_responses_response(value),
|
||||
responses_mock.add(
|
||||
CallbackResponse(
|
||||
method=method,
|
||||
url=re.compile(key),
|
||||
callback=convert_flask_to_responses_response(value),
|
||||
stream=True,
|
||||
match_querystring=False,
|
||||
)
|
||||
)
|
||||
botocore_mock.add(
|
||||
CallbackResponse(
|
||||
method=method,
|
||||
url=re.compile(key),
|
||||
callback=convert_flask_to_responses_response(value),
|
||||
stream=True,
|
||||
match_querystring=False,
|
||||
)
|
||||
)
|
||||
|
||||
for pattern in responses.mock._urls:
|
||||
pattern['stream'] = True
|
||||
|
||||
def disable_patching(self):
|
||||
try:
|
||||
responses.stop()
|
||||
except AttributeError:
|
||||
botocore_mock.stop()
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
responses_mock.stop()
|
||||
except RuntimeError:
|
||||
pass
|
||||
responses.reset()
|
||||
|
||||
|
||||
MockAWS = ResponsesMockAWS
|
||||
BOTOCORE_HTTP_METHODS = [
|
||||
'GET', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH', 'POST', 'PUT'
|
||||
]
|
||||
|
||||
|
||||
class MockRawResponse(BytesIO):
|
||||
def __init__(self, input):
|
||||
if isinstance(input, six.text_type):
|
||||
input = input.encode('utf-8')
|
||||
super(MockRawResponse, self).__init__(input)
|
||||
|
||||
def stream(self, **kwargs):
|
||||
contents = self.read()
|
||||
while contents:
|
||||
yield contents
|
||||
contents = self.read()
|
||||
|
||||
|
||||
class BotocoreStubber(object):
|
||||
def __init__(self):
|
||||
self.enabled = False
|
||||
self.methods = defaultdict(list)
|
||||
|
||||
def reset(self):
|
||||
self.methods.clear()
|
||||
|
||||
def register_response(self, method, pattern, response):
|
||||
matchers = self.methods[method]
|
||||
matchers.append((pattern, response))
|
||||
|
||||
def __call__(self, event_name, request, **kwargs):
|
||||
if not self.enabled:
|
||||
return None
|
||||
|
||||
response = None
|
||||
response_callback = None
|
||||
found_index = None
|
||||
matchers = self.methods.get(request.method)
|
||||
|
||||
base_url = request.url.split('?', 1)[0]
|
||||
for i, (pattern, callback) in enumerate(matchers):
|
||||
if pattern.match(base_url):
|
||||
if found_index is None:
|
||||
found_index = i
|
||||
response_callback = callback
|
||||
else:
|
||||
matchers.pop(found_index)
|
||||
break
|
||||
|
||||
if response_callback is not None:
|
||||
for header, value in request.headers.items():
|
||||
if isinstance(value, six.binary_type):
|
||||
request.headers[header] = value.decode('utf-8')
|
||||
status, headers, body = response_callback(request, request.url, request.headers)
|
||||
body = MockRawResponse(body)
|
||||
response = AWSResponse(request.url, status, headers, body)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
botocore_stubber = BotocoreStubber()
|
||||
BUILTIN_HANDLERS.append(('before-send', botocore_stubber))
|
||||
|
||||
|
||||
class BotocoreEventMockAWS(BaseMockAWS):
|
||||
def reset(self):
|
||||
botocore_stubber.reset()
|
||||
responses_mock.reset()
|
||||
|
||||
def enable_patching(self):
|
||||
botocore_stubber.enabled = True
|
||||
for method in BOTOCORE_HTTP_METHODS:
|
||||
for backend in self.backends_for_urls.values():
|
||||
for key, value in backend.urls.items():
|
||||
pattern = re.compile(key)
|
||||
botocore_stubber.register_response(method, pattern, value)
|
||||
|
||||
if not hasattr(responses_mock, '_patcher') or not hasattr(responses_mock._patcher, 'target'):
|
||||
responses_mock.start()
|
||||
|
||||
for method in RESPONSES_METHODS:
|
||||
# for backend in default_backends.values():
|
||||
for backend in self.backends_for_urls.values():
|
||||
for key, value in backend.urls.items():
|
||||
responses_mock.add(
|
||||
CallbackResponse(
|
||||
method=method,
|
||||
url=re.compile(key),
|
||||
callback=convert_flask_to_responses_response(value),
|
||||
stream=True,
|
||||
match_querystring=False,
|
||||
)
|
||||
)
|
||||
|
||||
def disable_patching(self):
|
||||
botocore_stubber.enabled = False
|
||||
self.reset()
|
||||
|
||||
try:
|
||||
responses_mock.stop()
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
|
||||
MockAWS = BotocoreEventMockAWS
|
||||
|
||||
|
||||
class ServerModeMockAWS(BaseMockAWS):
|
||||
@ -270,10 +469,14 @@ class BaseModel(object):
|
||||
|
||||
class BaseBackend(object):
|
||||
|
||||
def reset(self):
|
||||
def _reset_model_refs(self):
|
||||
# Remove all references to the models stored
|
||||
for service, models in model_data.items():
|
||||
for model_name, model in models.items():
|
||||
model.instances = []
|
||||
|
||||
def reset(self):
|
||||
self._reset_model_refs()
|
||||
self.__dict__ = {}
|
||||
self.__init__()
|
||||
|
||||
|
||||
@ -5,6 +5,7 @@ import datetime
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import io
|
||||
|
||||
import pytz
|
||||
from moto.core.exceptions import DryRunClientError
|
||||
@ -106,7 +107,9 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
|
||||
default_region = 'us-east-1'
|
||||
# to extract region, use [^.]
|
||||
region_regex = r'\.([^.]+?)\.amazonaws\.com'
|
||||
region_regex = re.compile(r'\.(?P<region>[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com')
|
||||
param_list_regex = re.compile(r'(.*)\.(\d+)\.')
|
||||
access_key_regex = re.compile(r'AWS.*(?P<access_key>(?<![A-Z0-9])[A-Z0-9]{20}(?![A-Z0-9]))[:/]')
|
||||
aws_service_spec = None
|
||||
|
||||
@classmethod
|
||||
@ -149,11 +152,18 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
for key, value in flat.items():
|
||||
querystring[key] = [value]
|
||||
elif self.body:
|
||||
querystring.update(parse_qs(raw_body, keep_blank_values=True))
|
||||
try:
|
||||
querystring.update(parse_qs(raw_body, keep_blank_values=True))
|
||||
except UnicodeEncodeError:
|
||||
pass # ignore encoding errors, as the body may not contain a legitimate querystring
|
||||
if not querystring:
|
||||
querystring.update(headers)
|
||||
|
||||
querystring = _decode_dict(querystring)
|
||||
try:
|
||||
querystring = _decode_dict(querystring)
|
||||
except UnicodeDecodeError:
|
||||
pass # ignore decoding errors, as the body may not contain a legitimate querystring
|
||||
|
||||
self.uri = full_url
|
||||
self.path = urlparse(full_url).path
|
||||
self.querystring = querystring
|
||||
@ -167,7 +177,7 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
self.response_headers = {"server": "amazon.com"}
|
||||
|
||||
def get_region_from_url(self, request, full_url):
|
||||
match = re.search(self.region_regex, full_url)
|
||||
match = self.region_regex.search(full_url)
|
||||
if match:
|
||||
region = match.group(1)
|
||||
elif 'Authorization' in request.headers and 'AWS4' in request.headers['Authorization']:
|
||||
@ -177,6 +187,21 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
region = self.default_region
|
||||
return region
|
||||
|
||||
def get_current_user(self):
|
||||
"""
|
||||
Returns the access key id used in this request as the current user id
|
||||
"""
|
||||
if 'Authorization' in self.headers:
|
||||
match = self.access_key_regex.search(self.headers['Authorization'])
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
if self.querystring.get('AWSAccessKeyId'):
|
||||
return self.querystring.get('AWSAccessKeyId')
|
||||
else:
|
||||
# Should we raise an unauthorized exception instead?
|
||||
return '111122223333'
|
||||
|
||||
def _dispatch(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
return self.call_action()
|
||||
@ -271,6 +296,9 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
headers['status'] = str(headers['status'])
|
||||
return status, headers, body
|
||||
|
||||
if not action:
|
||||
return 404, headers, ''
|
||||
|
||||
raise NotImplementedError(
|
||||
"The {0} action has not been implemented".format(action))
|
||||
|
||||
@ -311,6 +339,45 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
return False
|
||||
return if_none
|
||||
|
||||
def _get_multi_param_helper(self, param_prefix):
|
||||
value_dict = dict()
|
||||
tracked_prefixes = set() # prefixes which have already been processed
|
||||
|
||||
def is_tracked(name_param):
|
||||
for prefix_loop in tracked_prefixes:
|
||||
if name_param.startswith(prefix_loop):
|
||||
return True
|
||||
return False
|
||||
|
||||
for name, value in self.querystring.items():
|
||||
if is_tracked(name) or not name.startswith(param_prefix):
|
||||
continue
|
||||
|
||||
if len(name) > len(param_prefix) and \
|
||||
not name[len(param_prefix):].startswith('.'):
|
||||
continue
|
||||
|
||||
match = self.param_list_regex.search(name[len(param_prefix):]) if len(name) > len(param_prefix) else None
|
||||
if match:
|
||||
prefix = param_prefix + match.group(1)
|
||||
value = self._get_multi_param(prefix)
|
||||
tracked_prefixes.add(prefix)
|
||||
name = prefix
|
||||
value_dict[name] = value
|
||||
else:
|
||||
value_dict[name] = value[0]
|
||||
|
||||
if not value_dict:
|
||||
return None
|
||||
|
||||
if len(value_dict) > 1:
|
||||
# strip off period prefix
|
||||
value_dict = {name[len(param_prefix) + 1:]: value for name, value in value_dict.items()}
|
||||
else:
|
||||
value_dict = list(value_dict.values())[0]
|
||||
|
||||
return value_dict
|
||||
|
||||
def _get_multi_param(self, param_prefix):
|
||||
"""
|
||||
Given a querystring of ?LaunchConfigurationNames.member.1=my-test-1&LaunchConfigurationNames.member.2=my-test-2
|
||||
@ -323,12 +390,13 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
values = []
|
||||
index = 1
|
||||
while True:
|
||||
try:
|
||||
values.append(self.querystring[prefix + str(index)][0])
|
||||
except KeyError:
|
||||
value_dict = self._get_multi_param_helper(prefix + str(index))
|
||||
if not value_dict:
|
||||
break
|
||||
else:
|
||||
index += 1
|
||||
|
||||
values.append(value_dict)
|
||||
index += 1
|
||||
|
||||
return values
|
||||
|
||||
def _get_dict_param(self, param_prefix):
|
||||
@ -432,6 +500,54 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
|
||||
return results
|
||||
|
||||
def _get_object_map(self, prefix, name='Name', value='Value'):
|
||||
"""
|
||||
Given a query dict like
|
||||
{
|
||||
Prefix.1.Name: [u'event'],
|
||||
Prefix.1.Value.StringValue: [u'order_cancelled'],
|
||||
Prefix.1.Value.DataType: [u'String'],
|
||||
Prefix.2.Name: [u'store'],
|
||||
Prefix.2.Value.StringValue: [u'example_corp'],
|
||||
Prefix.2.Value.DataType [u'String'],
|
||||
}
|
||||
|
||||
returns
|
||||
{
|
||||
'event': {
|
||||
'DataType': 'String',
|
||||
'StringValue': 'example_corp'
|
||||
},
|
||||
'store': {
|
||||
'DataType': 'String',
|
||||
'StringValue': 'order_cancelled'
|
||||
}
|
||||
}
|
||||
"""
|
||||
object_map = {}
|
||||
index = 1
|
||||
while True:
|
||||
# Loop through looking for keys representing object name
|
||||
name_key = '{0}.{1}.{2}'.format(prefix, index, name)
|
||||
obj_name = self.querystring.get(name_key)
|
||||
if not obj_name:
|
||||
# Found all keys
|
||||
break
|
||||
|
||||
obj = {}
|
||||
value_key_prefix = '{0}.{1}.{2}.'.format(
|
||||
prefix, index, value)
|
||||
for k, v in self.querystring.items():
|
||||
if k.startswith(value_key_prefix):
|
||||
_, value_key = k.split(value_key_prefix, 1)
|
||||
obj[value_key] = v[0]
|
||||
|
||||
object_map[obj_name[0]] = obj
|
||||
|
||||
index += 1
|
||||
|
||||
return object_map
|
||||
|
||||
@property
|
||||
def request_json(self):
|
||||
return 'JSON' in self.querystring.get('ContentType', [])
|
||||
@ -514,7 +630,7 @@ class AWSServiceSpec(object):
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = resource_filename('botocore', path)
|
||||
with open(self.path) as f:
|
||||
with io.open(self.path, 'r', encoding='utf-8') as f:
|
||||
spec = json.load(f)
|
||||
self.metadata = spec['metadata']
|
||||
self.operations = spec['operations']
|
||||
@ -609,6 +725,8 @@ def to_str(value, spec):
|
||||
return str(value)
|
||||
elif vtype == 'float':
|
||||
return str(value)
|
||||
elif vtype == 'double':
|
||||
return str(value)
|
||||
elif vtype == 'timestamp':
|
||||
return datetime.datetime.utcfromtimestamp(
|
||||
value).replace(tzinfo=pytz.utc).isoformat()
|
||||
@ -628,6 +746,8 @@ def from_str(value, spec):
|
||||
return int(value)
|
||||
elif vtype == 'float':
|
||||
return float(value)
|
||||
elif vtype == 'double':
|
||||
return float(value)
|
||||
elif vtype == 'timestamp':
|
||||
return value
|
||||
elif vtype == 'string':
|
||||
|
||||
@ -8,6 +8,7 @@ import random
|
||||
import re
|
||||
import six
|
||||
import string
|
||||
from six.moves.urllib.parse import urlparse
|
||||
|
||||
|
||||
REQUEST_ID_LONG = string.digits + string.ascii_uppercase
|
||||
@ -18,6 +19,8 @@ def camelcase_to_underscores(argument):
|
||||
python underscore variable like the_new_attribute'''
|
||||
result = ''
|
||||
prev_char_title = True
|
||||
if not argument:
|
||||
return argument
|
||||
for index, char in enumerate(argument):
|
||||
try:
|
||||
next_char_title = argument[index + 1].istitle()
|
||||
@ -277,10 +280,20 @@ def amzn_request_id(f):
|
||||
|
||||
# Update request ID in XML
|
||||
try:
|
||||
body = body.replace('{{ requestid }}', request_id)
|
||||
body = re.sub(r'(?<=<RequestId>).*(?=<\/RequestId>)', request_id, body)
|
||||
except Exception: # Will just ignore if it cant work on bytes (which are str's on python2)
|
||||
pass
|
||||
|
||||
return status, headers, body
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
def path_url(url):
|
||||
parsed_url = urlparse(url)
|
||||
path = parsed_url.path
|
||||
if not path:
|
||||
path = '/'
|
||||
if parsed_url.query:
|
||||
path = path + '?' + parsed_url.query
|
||||
return path
|
||||
|
||||
@ -62,13 +62,13 @@ class DynamoHandler(BaseResponse):
|
||||
name = body['TableName']
|
||||
|
||||
key_schema = body['KeySchema']
|
||||
hash_hey = key_schema['HashKeyElement']
|
||||
hash_key_attr = hash_hey['AttributeName']
|
||||
hash_key_type = hash_hey['AttributeType']
|
||||
hash_key = key_schema['HashKeyElement']
|
||||
hash_key_attr = hash_key['AttributeName']
|
||||
hash_key_type = hash_key['AttributeType']
|
||||
|
||||
range_hey = key_schema.get('RangeKeyElement', {})
|
||||
range_key_attr = range_hey.get('AttributeName')
|
||||
range_key_type = range_hey.get('AttributeType')
|
||||
range_key = key_schema.get('RangeKeyElement', {})
|
||||
range_key_attr = range_key.get('AttributeName')
|
||||
range_key_type = range_key.get('AttributeType')
|
||||
|
||||
throughput = body["ProvisionedThroughput"]
|
||||
read_units = throughput["ReadCapacityUnits"]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
2
moto/dynamodb2/exceptions.py
Normal file
2
moto/dynamodb2/exceptions.py
Normal file
@ -0,0 +1,2 @@
|
||||
class InvalidIndexNameError(ValueError):
|
||||
pass
|
||||
@ -5,13 +5,18 @@ import datetime
|
||||
import decimal
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
import six
|
||||
|
||||
import boto3
|
||||
from moto.compat import OrderedDict
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import unix_time
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
from .comparisons import get_comparison_func, get_filter_expression, Op
|
||||
from .comparisons import get_comparison_func
|
||||
from .comparisons import get_filter_expression
|
||||
from .comparisons import get_expected
|
||||
from .exceptions import InvalidIndexNameError
|
||||
|
||||
|
||||
class DynamoJsonEncoder(json.JSONEncoder):
|
||||
@ -65,9 +70,35 @@ class DynamoType(object):
|
||||
return int(self.value)
|
||||
except ValueError:
|
||||
return float(self.value)
|
||||
elif self.is_set():
|
||||
sub_type = self.type[0]
|
||||
return set([DynamoType({sub_type: v}).cast_value
|
||||
for v in self.value])
|
||||
elif self.is_list():
|
||||
return [DynamoType(v).cast_value for v in self.value]
|
||||
elif self.is_map():
|
||||
return dict([
|
||||
(k, DynamoType(v).cast_value)
|
||||
for k, v in self.value.items()])
|
||||
else:
|
||||
return self.value
|
||||
|
||||
def child_attr(self, key):
|
||||
"""
|
||||
Get Map or List children by key. str for Map, int for List.
|
||||
|
||||
Returns DynamoType or None.
|
||||
"""
|
||||
if isinstance(key, six.string_types) and self.is_map() and key in self.value:
|
||||
return DynamoType(self.value[key])
|
||||
|
||||
if isinstance(key, int) and self.is_list():
|
||||
idx = key
|
||||
if idx >= 0 and idx < len(self.value):
|
||||
return DynamoType(self.value[idx])
|
||||
|
||||
return None
|
||||
|
||||
def to_json(self):
|
||||
return {self.type: self.value}
|
||||
|
||||
@ -85,6 +116,12 @@ class DynamoType(object):
|
||||
def is_set(self):
|
||||
return self.type == 'SS' or self.type == 'NS' or self.type == 'BS'
|
||||
|
||||
def is_list(self):
|
||||
return self.type == 'L'
|
||||
|
||||
def is_map(self):
|
||||
return self.type == 'M'
|
||||
|
||||
def same_type(self, other):
|
||||
return self.type == other.type
|
||||
|
||||
@ -135,7 +172,9 @@ class Item(BaseModel):
|
||||
assert len(parts) % 2 == 0, "Mismatched operators and values in update expression: '{}'".format(update_expression)
|
||||
for action, valstr in zip(parts[:-1:2], parts[1::2]):
|
||||
action = action.upper()
|
||||
values = valstr.split(',')
|
||||
|
||||
# "Should" retain arguments in side (...)
|
||||
values = re.split(r',(?![^(]*\))', valstr)
|
||||
for value in values:
|
||||
# A Real value
|
||||
value = value.lstrip(":").rstrip(",").strip()
|
||||
@ -145,13 +184,58 @@ class Item(BaseModel):
|
||||
if action == "REMOVE":
|
||||
self.attrs.pop(value, None)
|
||||
elif action == 'SET':
|
||||
key, value = value.split("=")
|
||||
key, value = value.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
if value in expression_attribute_values:
|
||||
self.attrs[key] = DynamoType(expression_attribute_values[value])
|
||||
|
||||
# If not exists, changes value to a default if needed, else its the same as it was
|
||||
if value.startswith('if_not_exists'):
|
||||
# Function signature
|
||||
match = re.match(r'.*if_not_exists\s*\((?P<path>.+),\s*(?P<default>.+)\).*', value)
|
||||
if not match:
|
||||
raise TypeError
|
||||
|
||||
path, value = match.groups()
|
||||
|
||||
# If it already exists, get its value so we dont overwrite it
|
||||
if path in self.attrs:
|
||||
value = self.attrs[path]
|
||||
|
||||
if type(value) != DynamoType:
|
||||
if value in expression_attribute_values:
|
||||
value = DynamoType(expression_attribute_values[value])
|
||||
else:
|
||||
value = DynamoType({"S": value})
|
||||
|
||||
if '.' not in key:
|
||||
self.attrs[key] = value
|
||||
else:
|
||||
self.attrs[key] = DynamoType({"S": value})
|
||||
# Handle nested dict updates
|
||||
key_parts = key.split('.')
|
||||
attr = key_parts.pop(0)
|
||||
if attr not in self.attrs:
|
||||
raise ValueError
|
||||
|
||||
last_val = self.attrs[attr].value
|
||||
for key_part in key_parts:
|
||||
# Hack but it'll do, traverses into a dict
|
||||
last_val_type = list(last_val.keys())
|
||||
if last_val_type and last_val_type[0] == 'M':
|
||||
last_val = last_val['M']
|
||||
|
||||
if key_part not in last_val:
|
||||
last_val[key_part] = {'M': {}}
|
||||
|
||||
last_val = last_val[key_part]
|
||||
|
||||
# We have reference to a nested object but we cant just assign to it
|
||||
current_type = list(last_val.keys())[0]
|
||||
if current_type == value.type:
|
||||
last_val[current_type] = value.value
|
||||
else:
|
||||
last_val[value.type] = value.value
|
||||
del last_val[current_type]
|
||||
|
||||
elif action == 'ADD':
|
||||
key, value = value.split(" ", 1)
|
||||
key = key.strip()
|
||||
@ -218,9 +302,9 @@ class Item(BaseModel):
|
||||
self.attrs[attribute_name] = DynamoType({"SS": new_value})
|
||||
elif isinstance(new_value, dict):
|
||||
self.attrs[attribute_name] = DynamoType({"M": new_value})
|
||||
elif update_action['Value'].keys() == ['N']:
|
||||
elif set(update_action['Value'].keys()) == set(['N']):
|
||||
self.attrs[attribute_name] = DynamoType({"N": new_value})
|
||||
elif update_action['Value'].keys() == ['NULL']:
|
||||
elif set(update_action['Value'].keys()) == set(['NULL']):
|
||||
if attribute_name in self.attrs:
|
||||
del self.attrs[attribute_name]
|
||||
else:
|
||||
@ -243,11 +327,97 @@ class Item(BaseModel):
|
||||
# TODO: implement other data types
|
||||
raise NotImplementedError(
|
||||
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
|
||||
elif action == 'DELETE':
|
||||
if set(update_action['Value'].keys()) == set(['SS']):
|
||||
existing = self.attrs.get(attribute_name, DynamoType({"SS": {}}))
|
||||
new_set = set(existing.value).difference(set(new_value))
|
||||
self.attrs[attribute_name] = DynamoType({
|
||||
"SS": list(new_set)
|
||||
})
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'%s action not support for update_with_attribute_updates' % action)
|
||||
|
||||
|
||||
class StreamRecord(BaseModel):
|
||||
def __init__(self, table, stream_type, event_name, old, new, seq):
|
||||
old_a = old.to_json()['Attributes'] if old is not None else {}
|
||||
new_a = new.to_json()['Attributes'] if new is not None else {}
|
||||
|
||||
rec = old if old is not None else new
|
||||
keys = {table.hash_key_attr: rec.hash_key.to_json()}
|
||||
if table.range_key_attr is not None:
|
||||
keys[table.range_key_attr] = rec.range_key.to_json()
|
||||
|
||||
self.record = {
|
||||
'eventID': uuid.uuid4().hex,
|
||||
'eventName': event_name,
|
||||
'eventSource': 'aws:dynamodb',
|
||||
'eventVersion': '1.0',
|
||||
'awsRegion': 'us-east-1',
|
||||
'dynamodb': {
|
||||
'StreamViewType': stream_type,
|
||||
'ApproximateCreationDateTime': datetime.datetime.utcnow().isoformat(),
|
||||
'SequenceNumber': seq,
|
||||
'SizeBytes': 1,
|
||||
'Keys': keys
|
||||
}
|
||||
}
|
||||
|
||||
if stream_type in ('NEW_IMAGE', 'NEW_AND_OLD_IMAGES'):
|
||||
self.record['dynamodb']['NewImage'] = new_a
|
||||
if stream_type in ('OLD_IMAGE', 'NEW_AND_OLD_IMAGES'):
|
||||
self.record['dynamodb']['OldImage'] = old_a
|
||||
|
||||
# This is a substantial overestimate but it's the easiest to do now
|
||||
self.record['dynamodb']['SizeBytes'] = len(
|
||||
json.dumps(self.record['dynamodb']))
|
||||
|
||||
def to_json(self):
|
||||
return self.record
|
||||
|
||||
|
||||
class StreamShard(BaseModel):
|
||||
def __init__(self, table):
|
||||
self.table = table
|
||||
self.id = 'shardId-00000001541626099285-f35f62ef'
|
||||
self.starting_sequence_number = 1100000000017454423009
|
||||
self.items = []
|
||||
self.created_on = datetime.datetime.utcnow()
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
'ShardId': self.id,
|
||||
'SequenceNumberRange': {
|
||||
'StartingSequenceNumber': str(self.starting_sequence_number)
|
||||
}
|
||||
}
|
||||
|
||||
def add(self, old, new):
|
||||
t = self.table.stream_specification['StreamViewType']
|
||||
if old is None:
|
||||
event_name = 'INSERT'
|
||||
elif new is None:
|
||||
event_name = 'DELETE'
|
||||
else:
|
||||
event_name = 'MODIFY'
|
||||
seq = len(self.items) + self.starting_sequence_number
|
||||
self.items.append(
|
||||
StreamRecord(self.table, t, event_name, old, new, seq))
|
||||
|
||||
def get(self, start, quantity):
|
||||
start -= self.starting_sequence_number
|
||||
assert start >= 0
|
||||
end = start + quantity
|
||||
return [i.to_json() for i in self.items[start:end]]
|
||||
|
||||
|
||||
class Table(BaseModel):
|
||||
|
||||
def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None):
|
||||
def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None, streams=None):
|
||||
self.name = table_name
|
||||
self.attr = attr
|
||||
self.schema = schema
|
||||
@ -278,10 +448,41 @@ class Table(BaseModel):
|
||||
'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED',
|
||||
# 'AttributeName': 'string' # Can contain this
|
||||
}
|
||||
self.set_stream_specification(streams)
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
params = {}
|
||||
|
||||
if 'KeySchema' in properties:
|
||||
params['schema'] = properties['KeySchema']
|
||||
if 'AttributeDefinitions' in properties:
|
||||
params['attr'] = properties['AttributeDefinitions']
|
||||
if 'GlobalSecondaryIndexes' in properties:
|
||||
params['global_indexes'] = properties['GlobalSecondaryIndexes']
|
||||
if 'ProvisionedThroughput' in properties:
|
||||
params['throughput'] = properties['ProvisionedThroughput']
|
||||
if 'LocalSecondaryIndexes' in properties:
|
||||
params['indexes'] = properties['LocalSecondaryIndexes']
|
||||
|
||||
table = dynamodb_backends[region_name].create_table(name=properties['TableName'], **params)
|
||||
return table
|
||||
|
||||
def _generate_arn(self, name):
|
||||
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
|
||||
|
||||
def set_stream_specification(self, streams):
|
||||
self.stream_specification = streams
|
||||
if streams and (streams.get('StreamEnabled') or streams.get('StreamViewType')):
|
||||
self.stream_specification['StreamEnabled'] = True
|
||||
self.latest_stream_label = datetime.datetime.utcnow().isoformat()
|
||||
self.stream_shard = StreamShard(self)
|
||||
else:
|
||||
self.stream_specification = {'StreamEnabled': False}
|
||||
self.latest_stream_label = None
|
||||
self.stream_shard = None
|
||||
|
||||
def describe(self, base_key='TableDescription'):
|
||||
results = {
|
||||
base_key: {
|
||||
@ -298,6 +499,11 @@ class Table(BaseModel):
|
||||
'LocalSecondaryIndexes': [index for index in self.indexes],
|
||||
}
|
||||
}
|
||||
if self.stream_specification and self.stream_specification['StreamEnabled']:
|
||||
results[base_key]['StreamSpecification'] = self.stream_specification
|
||||
if self.latest_stream_label:
|
||||
results[base_key]['LatestStreamLabel'] = self.latest_stream_label
|
||||
results[base_key]['LatestStreamArn'] = self.table_arn + '/stream/' + self.latest_stream_label
|
||||
return results
|
||||
|
||||
def __len__(self):
|
||||
@ -331,57 +537,48 @@ class Table(BaseModel):
|
||||
keys.append(range_key)
|
||||
return keys
|
||||
|
||||
def put_item(self, item_attrs, expected=None, overwrite=False):
|
||||
def put_item(self, item_attrs, expected=None, condition_expression=None,
|
||||
expression_attribute_names=None,
|
||||
expression_attribute_values=None, overwrite=False):
|
||||
hash_value = DynamoType(item_attrs.get(self.hash_key_attr))
|
||||
if self.has_range_key:
|
||||
range_value = DynamoType(item_attrs.get(self.range_key_attr))
|
||||
else:
|
||||
range_value = None
|
||||
|
||||
if expected is None:
|
||||
expected = {}
|
||||
lookup_range_value = range_value
|
||||
else:
|
||||
expected_range_value = expected.get(
|
||||
self.range_key_attr, {}).get("Value")
|
||||
if(expected_range_value is None):
|
||||
lookup_range_value = range_value
|
||||
else:
|
||||
lookup_range_value = DynamoType(expected_range_value)
|
||||
current = self.get_item(hash_value, lookup_range_value)
|
||||
|
||||
item = Item(hash_value, self.hash_key_type, range_value,
|
||||
self.range_key_type, item_attrs)
|
||||
|
||||
if not overwrite:
|
||||
if expected is None:
|
||||
expected = {}
|
||||
lookup_range_value = range_value
|
||||
else:
|
||||
expected_range_value = expected.get(
|
||||
self.range_key_attr, {}).get("Value")
|
||||
if(expected_range_value is None):
|
||||
lookup_range_value = range_value
|
||||
else:
|
||||
lookup_range_value = DynamoType(expected_range_value)
|
||||
if not get_expected(expected).expr(current):
|
||||
raise ValueError('The conditional request failed')
|
||||
condition_op = get_filter_expression(
|
||||
condition_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
if not condition_op.expr(current):
|
||||
raise ValueError('The conditional request failed')
|
||||
|
||||
current = self.get_item(hash_value, lookup_range_value)
|
||||
|
||||
if current is None:
|
||||
current_attr = {}
|
||||
elif hasattr(current, 'attrs'):
|
||||
current_attr = current.attrs
|
||||
else:
|
||||
current_attr = current
|
||||
|
||||
for key, val in expected.items():
|
||||
if 'Exists' in val and val['Exists'] is False:
|
||||
if key in current_attr:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif key not in current_attr:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif 'ComparisonOperator' in val:
|
||||
comparison_func = get_comparison_func(
|
||||
val['ComparisonOperator'])
|
||||
dynamo_types = [DynamoType(ele) for ele in val[
|
||||
"AttributeValueList"]]
|
||||
for t in dynamo_types:
|
||||
if not comparison_func(current_attr[key].value, t.value):
|
||||
raise ValueError('The conditional request failed')
|
||||
if range_value:
|
||||
self.items[hash_value][range_value] = item
|
||||
else:
|
||||
self.items[hash_value] = item
|
||||
|
||||
if self.stream_shard is not None:
|
||||
self.stream_shard.add(current, item)
|
||||
|
||||
return item
|
||||
|
||||
def __nonzero__(self):
|
||||
@ -412,9 +609,14 @@ class Table(BaseModel):
|
||||
def delete_item(self, hash_key, range_key):
|
||||
try:
|
||||
if range_key:
|
||||
return self.items[hash_key].pop(range_key)
|
||||
item = self.items[hash_key].pop(range_key)
|
||||
else:
|
||||
return self.items.pop(hash_key)
|
||||
item = self.items.pop(hash_key)
|
||||
|
||||
if self.stream_shard is not None:
|
||||
self.stream_shard.add(item, None)
|
||||
|
||||
return item
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
@ -422,8 +624,9 @@ class Table(BaseModel):
|
||||
exclusive_start_key, scan_index_forward, projection_expression,
|
||||
index_name=None, filter_expression=None, **filter_kwargs):
|
||||
results = []
|
||||
|
||||
if index_name:
|
||||
all_indexes = (self.global_indexes or []) + (self.indexes or [])
|
||||
all_indexes = self.all_indexes()
|
||||
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
||||
if index_name not in indexes_by_name:
|
||||
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
|
||||
@ -438,24 +641,28 @@ class Table(BaseModel):
|
||||
raise ValueError('Missing Hash Key. KeySchema: %s' %
|
||||
index['KeySchema'])
|
||||
|
||||
possible_results = []
|
||||
for item in self.all_items():
|
||||
if not isinstance(item, Item):
|
||||
continue
|
||||
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
|
||||
if item_hash_key and item_hash_key == hash_key:
|
||||
possible_results.append(item)
|
||||
else:
|
||||
possible_results = [item for item in list(self.all_items()) if isinstance(
|
||||
item, Item) and item.hash_key == hash_key]
|
||||
|
||||
if index_name:
|
||||
try:
|
||||
index_range_key = [key for key in index[
|
||||
'KeySchema'] if key['KeyType'] == 'RANGE'][0]
|
||||
except IndexError:
|
||||
index_range_key = None
|
||||
|
||||
possible_results = []
|
||||
for item in self.all_items():
|
||||
if not isinstance(item, Item):
|
||||
continue
|
||||
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
|
||||
if index_range_key is None:
|
||||
if item_hash_key and item_hash_key == hash_key:
|
||||
possible_results.append(item)
|
||||
else:
|
||||
item_range_key = item.attrs.get(index_range_key['AttributeName'])
|
||||
if item_hash_key and item_hash_key == hash_key and item_range_key:
|
||||
possible_results.append(item)
|
||||
else:
|
||||
possible_results = [item for item in list(self.all_items()) if isinstance(
|
||||
item, Item) and item.hash_key == hash_key]
|
||||
|
||||
if range_comparison:
|
||||
if index_name and not index_range_key:
|
||||
raise ValueError(
|
||||
@ -491,14 +698,6 @@ class Table(BaseModel):
|
||||
else:
|
||||
results.sort(key=lambda item: item.range_key)
|
||||
|
||||
if projection_expression:
|
||||
expressions = [x.strip() for x in projection_expression.split(',')]
|
||||
results = copy.deepcopy(results)
|
||||
for result in results:
|
||||
for attr in list(result.attrs):
|
||||
if attr not in expressions:
|
||||
result.attrs.pop(attr)
|
||||
|
||||
if scan_index_forward is False:
|
||||
results.reverse()
|
||||
|
||||
@ -507,6 +706,14 @@ class Table(BaseModel):
|
||||
if filter_expression is not None:
|
||||
results = [item for item in results if filter_expression.expr(item)]
|
||||
|
||||
if projection_expression:
|
||||
expressions = [x.strip() for x in projection_expression.split(',')]
|
||||
results = copy.deepcopy(results)
|
||||
for result in results:
|
||||
for attr in list(result.attrs):
|
||||
if attr not in expressions:
|
||||
result.attrs.pop(attr)
|
||||
|
||||
results, last_evaluated_key = self._trim_results(results, limit,
|
||||
exclusive_start_key)
|
||||
return results, scanned_count, last_evaluated_key
|
||||
@ -519,11 +726,39 @@ class Table(BaseModel):
|
||||
else:
|
||||
yield hash_set
|
||||
|
||||
def scan(self, filters, limit, exclusive_start_key, filter_expression=None):
|
||||
def all_indexes(self):
|
||||
return (self.global_indexes or []) + (self.indexes or [])
|
||||
|
||||
def has_idx_items(self, index_name):
|
||||
|
||||
all_indexes = self.all_indexes()
|
||||
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
||||
idx = indexes_by_name[index_name]
|
||||
idx_col_set = set([i['AttributeName'] for i in idx['KeySchema']])
|
||||
|
||||
for hash_set in self.items.values():
|
||||
if self.range_key_attr:
|
||||
for item in hash_set.values():
|
||||
if idx_col_set.issubset(set(item.attrs)):
|
||||
yield item
|
||||
else:
|
||||
if idx_col_set.issubset(set(hash_set.attrs)):
|
||||
yield hash_set
|
||||
|
||||
def scan(self, filters, limit, exclusive_start_key, filter_expression=None, index_name=None, projection_expression=None):
|
||||
results = []
|
||||
scanned_count = 0
|
||||
all_indexes = self.all_indexes()
|
||||
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
||||
|
||||
for item in self.all_items():
|
||||
if index_name:
|
||||
if index_name not in indexes_by_name:
|
||||
raise InvalidIndexNameError('The table does not have the specified index: %s' % index_name)
|
||||
items = self.has_idx_items(index_name)
|
||||
else:
|
||||
items = self.all_items()
|
||||
|
||||
for item in items:
|
||||
scanned_count += 1
|
||||
passes_all_conditions = True
|
||||
for attribute_name, (comparison_operator, comparison_objs) in filters.items():
|
||||
@ -549,11 +784,19 @@ class Table(BaseModel):
|
||||
if passes_all_conditions:
|
||||
results.append(item)
|
||||
|
||||
if projection_expression:
|
||||
expressions = [x.strip() for x in projection_expression.split(',')]
|
||||
results = copy.deepcopy(results)
|
||||
for result in results:
|
||||
for attr in list(result.attrs):
|
||||
if attr not in expressions:
|
||||
result.attrs.pop(attr)
|
||||
|
||||
results, last_evaluated_key = self._trim_results(results, limit,
|
||||
exclusive_start_key)
|
||||
exclusive_start_key, index_name)
|
||||
return results, scanned_count, last_evaluated_key
|
||||
|
||||
def _trim_results(self, results, limit, exclusive_start_key):
|
||||
def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None):
|
||||
if exclusive_start_key is not None:
|
||||
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
|
||||
range_key = exclusive_start_key.get(self.range_key_attr)
|
||||
@ -573,6 +816,14 @@ class Table(BaseModel):
|
||||
if results[-1].range_key is not None:
|
||||
last_evaluated_key[self.range_key_attr] = results[-1].range_key
|
||||
|
||||
if scaned_index:
|
||||
all_indexes = self.all_indexes()
|
||||
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
||||
idx = indexes_by_name[scaned_index]
|
||||
idx_col_list = [i['AttributeName'] for i in idx['KeySchema']]
|
||||
for col in idx_col_list:
|
||||
last_evaluated_key[col] = results[-1].attrs[col]
|
||||
|
||||
return results, last_evaluated_key
|
||||
|
||||
def lookup(self, *args, **kwargs):
|
||||
@ -630,6 +881,13 @@ class DynamoDBBackend(BaseBackend):
|
||||
table.throughput = throughput
|
||||
return table
|
||||
|
||||
def update_table_streams(self, name, stream_specification):
|
||||
table = self.tables[name]
|
||||
if (stream_specification.get('StreamEnabled') or stream_specification.get('StreamViewType')) and table.latest_stream_label:
|
||||
raise ValueError('Table already has stream enabled')
|
||||
table.set_stream_specification(stream_specification)
|
||||
return table
|
||||
|
||||
def update_table_global_indexes(self, name, global_index_updates):
|
||||
table = self.tables[name]
|
||||
gsis_by_name = dict((i['IndexName'], i) for i in table.global_indexes)
|
||||
@ -660,14 +918,20 @@ class DynamoDBBackend(BaseBackend):
|
||||
|
||||
gsis_by_name[gsi_to_create['IndexName']] = gsi_to_create
|
||||
|
||||
table.global_indexes = gsis_by_name.values()
|
||||
# in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other
|
||||
# parts of the codebase
|
||||
table.global_indexes = list(gsis_by_name.values())
|
||||
return table
|
||||
|
||||
def put_item(self, table_name, item_attrs, expected=None, overwrite=False):
|
||||
def put_item(self, table_name, item_attrs, expected=None,
|
||||
condition_expression=None, expression_attribute_names=None,
|
||||
expression_attribute_values=None, overwrite=False):
|
||||
table = self.tables.get(table_name)
|
||||
if not table:
|
||||
return None
|
||||
return table.put_item(item_attrs, expected, overwrite)
|
||||
return table.put_item(item_attrs, expected, condition_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values, overwrite)
|
||||
|
||||
def get_table_keys_name(self, table_name, keys):
|
||||
"""
|
||||
@ -723,15 +987,12 @@ class DynamoDBBackend(BaseBackend):
|
||||
range_values = [DynamoType(range_value)
|
||||
for range_value in range_value_dicts]
|
||||
|
||||
if filter_expression is not None:
|
||||
filter_expression = get_filter_expression(filter_expression, expr_names, expr_values)
|
||||
else:
|
||||
filter_expression = Op(None, None) # Will always eval to true
|
||||
filter_expression = get_filter_expression(filter_expression, expr_names, expr_values)
|
||||
|
||||
return table.query(hash_key, range_comparison, range_values, limit,
|
||||
exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs)
|
||||
|
||||
def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values):
|
||||
def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name, projection_expression):
|
||||
table = self.tables.get(table_name)
|
||||
if not table:
|
||||
return None, None, None
|
||||
@ -741,15 +1002,14 @@ class DynamoDBBackend(BaseBackend):
|
||||
dynamo_types = [DynamoType(value) for value in comparison_values]
|
||||
scan_filters[key] = (comparison_operator, dynamo_types)
|
||||
|
||||
if filter_expression is not None:
|
||||
filter_expression = get_filter_expression(filter_expression, expr_names, expr_values)
|
||||
else:
|
||||
filter_expression = Op(None, None) # Will always eval to true
|
||||
filter_expression = get_filter_expression(filter_expression, expr_names, expr_values)
|
||||
|
||||
return table.scan(scan_filters, limit, exclusive_start_key, filter_expression)
|
||||
projection_expression = ','.join([expr_names.get(attr, attr) for attr in projection_expression.replace(' ', '').split(',')])
|
||||
|
||||
return table.scan(scan_filters, limit, exclusive_start_key, filter_expression, index_name, projection_expression)
|
||||
|
||||
def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names,
|
||||
expression_attribute_values, expected=None):
|
||||
expression_attribute_values, expected=None, condition_expression=None):
|
||||
table = self.get_table(table_name)
|
||||
|
||||
if all([table.hash_key_attr in key, table.range_key_attr in key]):
|
||||
@ -768,32 +1028,17 @@ class DynamoDBBackend(BaseBackend):
|
||||
|
||||
item = table.get_item(hash_value, range_value)
|
||||
|
||||
if item is None:
|
||||
item_attr = {}
|
||||
elif hasattr(item, 'attrs'):
|
||||
item_attr = item.attrs
|
||||
else:
|
||||
item_attr = item
|
||||
|
||||
if not expected:
|
||||
expected = {}
|
||||
|
||||
for key, val in expected.items():
|
||||
if 'Exists' in val and val['Exists'] is False:
|
||||
if key in item_attr:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif key not in item_attr:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif 'Value' in val and DynamoType(val['Value']).value != item_attr[key].value:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif 'ComparisonOperator' in val:
|
||||
comparison_func = get_comparison_func(
|
||||
val['ComparisonOperator'])
|
||||
dynamo_types = [DynamoType(ele) for ele in val[
|
||||
"AttributeValueList"]]
|
||||
for t in dynamo_types:
|
||||
if not comparison_func(item_attr[key].value, t.value):
|
||||
raise ValueError('The conditional request failed')
|
||||
if not get_expected(expected).expr(item):
|
||||
raise ValueError('The conditional request failed')
|
||||
condition_op = get_filter_expression(
|
||||
condition_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
if not condition_op.expr(item):
|
||||
raise ValueError('The conditional request failed')
|
||||
|
||||
# Update does not fail on new items, so create one
|
||||
if item is None:
|
||||
|
||||
@ -5,9 +5,33 @@ import re
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from moto.core.utils import camelcase_to_underscores, amzn_request_id
|
||||
from .exceptions import InvalidIndexNameError
|
||||
from .models import dynamodb_backends, dynamo_json_dump
|
||||
|
||||
|
||||
def has_empty_keys_or_values(_dict):
|
||||
if _dict == "":
|
||||
return True
|
||||
if not isinstance(_dict, dict):
|
||||
return False
|
||||
return any(
|
||||
key == '' or value == '' or
|
||||
has_empty_keys_or_values(value)
|
||||
for key, value in _dict.items()
|
||||
)
|
||||
|
||||
|
||||
def get_empty_str_error():
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return (400,
|
||||
{'server': 'amazon.com'},
|
||||
dynamo_json_dump({'__type': er,
|
||||
'message': ('One or more parameter values were '
|
||||
'invalid: An AttributeValue may not '
|
||||
'contain an empty string')}
|
||||
))
|
||||
|
||||
|
||||
class DynamoHandler(BaseResponse):
|
||||
|
||||
def get_endpoint_name(self, headers):
|
||||
@ -72,8 +96,16 @@ class DynamoHandler(BaseResponse):
|
||||
body = self.body
|
||||
# get the table name
|
||||
table_name = body['TableName']
|
||||
# get the throughput
|
||||
throughput = body["ProvisionedThroughput"]
|
||||
# check billing mode and get the throughput
|
||||
if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST":
|
||||
if "ProvisionedThroughput" in body.keys():
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return self.error(er,
|
||||
'ProvisionedThroughput cannot be specified \
|
||||
when BillingMode is PAY_PER_REQUEST')
|
||||
throughput = None
|
||||
else: # Provisioned (default billing mode)
|
||||
throughput = body.get("ProvisionedThroughput")
|
||||
# getting the schema
|
||||
key_schema = body['KeySchema']
|
||||
# getting attribute definition
|
||||
@ -81,13 +113,16 @@ class DynamoHandler(BaseResponse):
|
||||
# getting the indexes
|
||||
global_indexes = body.get("GlobalSecondaryIndexes", [])
|
||||
local_secondary_indexes = body.get("LocalSecondaryIndexes", [])
|
||||
# get the stream specification
|
||||
streams = body.get("StreamSpecification")
|
||||
|
||||
table = self.dynamodb_backend.create_table(table_name,
|
||||
schema=key_schema,
|
||||
throughput=throughput,
|
||||
attr=attr,
|
||||
global_indexes=global_indexes,
|
||||
indexes=local_secondary_indexes)
|
||||
indexes=local_secondary_indexes,
|
||||
streams=streams)
|
||||
if table is not None:
|
||||
return dynamo_json_dump(table.describe())
|
||||
else:
|
||||
@ -140,12 +175,20 @@ class DynamoHandler(BaseResponse):
|
||||
|
||||
def update_table(self):
|
||||
name = self.body['TableName']
|
||||
table = self.dynamodb_backend.get_table(name)
|
||||
if 'GlobalSecondaryIndexUpdates' in self.body:
|
||||
table = self.dynamodb_backend.update_table_global_indexes(
|
||||
name, self.body['GlobalSecondaryIndexUpdates'])
|
||||
if 'ProvisionedThroughput' in self.body:
|
||||
throughput = self.body["ProvisionedThroughput"]
|
||||
table = self.dynamodb_backend.update_table_throughput(name, throughput)
|
||||
if 'StreamSpecification' in self.body:
|
||||
try:
|
||||
table = self.dynamodb_backend.update_table_streams(name, self.body['StreamSpecification'])
|
||||
except ValueError:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ResourceInUseException'
|
||||
return self.error(er, 'Cannot enable stream')
|
||||
|
||||
return dynamo_json_dump(table.describe())
|
||||
|
||||
def describe_table(self):
|
||||
@ -160,17 +203,14 @@ class DynamoHandler(BaseResponse):
|
||||
def put_item(self):
|
||||
name = self.body['TableName']
|
||||
item = self.body['Item']
|
||||
return_values = self.body.get('ReturnValues', 'NONE')
|
||||
|
||||
res = re.search('\"\"', json.dumps(item))
|
||||
if res:
|
||||
if return_values not in ('ALL_OLD', 'NONE'):
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return (400,
|
||||
{'server': 'amazon.com'},
|
||||
dynamo_json_dump({'__type': er,
|
||||
'message': ('One or more parameter values were '
|
||||
'invalid: An AttributeValue may not '
|
||||
'contain an empty string')}
|
||||
))
|
||||
return self.error(er, 'Return values set to invalid value')
|
||||
|
||||
if has_empty_keys_or_values(item):
|
||||
return get_empty_str_error()
|
||||
|
||||
overwrite = 'Expected' not in self.body
|
||||
if not overwrite:
|
||||
@ -178,31 +218,27 @@ class DynamoHandler(BaseResponse):
|
||||
else:
|
||||
expected = None
|
||||
|
||||
if return_values == 'ALL_OLD':
|
||||
existing_item = self.dynamodb_backend.get_item(name, item)
|
||||
if existing_item:
|
||||
existing_attributes = existing_item.to_json()['Attributes']
|
||||
else:
|
||||
existing_attributes = {}
|
||||
|
||||
# Attempt to parse simple ConditionExpressions into an Expected
|
||||
# expression
|
||||
if not expected:
|
||||
condition_expression = self.body.get('ConditionExpression')
|
||||
if condition_expression and 'OR' not in condition_expression:
|
||||
cond_items = [c.strip()
|
||||
for c in condition_expression.split('AND')]
|
||||
condition_expression = self.body.get('ConditionExpression')
|
||||
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
|
||||
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
|
||||
|
||||
if cond_items:
|
||||
expected = {}
|
||||
overwrite = False
|
||||
exists_re = re.compile('^attribute_exists\((.*)\)$')
|
||||
not_exists_re = re.compile(
|
||||
'^attribute_not_exists\((.*)\)$')
|
||||
|
||||
for cond in cond_items:
|
||||
exists_m = exists_re.match(cond)
|
||||
not_exists_m = not_exists_re.match(cond)
|
||||
if exists_m:
|
||||
expected[exists_m.group(1)] = {'Exists': True}
|
||||
elif not_exists_m:
|
||||
expected[not_exists_m.group(1)] = {'Exists': False}
|
||||
if condition_expression:
|
||||
overwrite = False
|
||||
|
||||
try:
|
||||
result = self.dynamodb_backend.put_item(name, item, expected, overwrite)
|
||||
result = self.dynamodb_backend.put_item(
|
||||
name, item, expected, condition_expression,
|
||||
expression_attribute_names, expression_attribute_values,
|
||||
overwrite)
|
||||
except ValueError:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
|
||||
return self.error(er, 'A condition specified in the operation could not be evaluated.')
|
||||
@ -213,6 +249,10 @@ class DynamoHandler(BaseResponse):
|
||||
'TableName': name,
|
||||
'CapacityUnits': 1
|
||||
}
|
||||
if return_values == 'ALL_OLD':
|
||||
item_dict['Attributes'] = existing_attributes
|
||||
else:
|
||||
item_dict.pop('Attributes', None)
|
||||
return dynamo_json_dump(item_dict)
|
||||
else:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
|
||||
@ -370,7 +410,7 @@ class DynamoHandler(BaseResponse):
|
||||
range_values = [value_alias_map[
|
||||
range_key_expression_components[2]]]
|
||||
else:
|
||||
hash_key_expression = key_condition_expression
|
||||
hash_key_expression = key_condition_expression.strip('()')
|
||||
range_comparison = None
|
||||
range_values = []
|
||||
|
||||
@ -457,9 +497,10 @@ class DynamoHandler(BaseResponse):
|
||||
filter_expression = self.body.get('FilterExpression')
|
||||
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
|
||||
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
|
||||
|
||||
projection_expression = self.body.get('ProjectionExpression', '')
|
||||
exclusive_start_key = self.body.get('ExclusiveStartKey')
|
||||
limit = self.body.get("Limit")
|
||||
index_name = self.body.get('IndexName')
|
||||
|
||||
try:
|
||||
items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters,
|
||||
@ -467,7 +508,12 @@ class DynamoHandler(BaseResponse):
|
||||
exclusive_start_key,
|
||||
filter_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
expression_attribute_values,
|
||||
index_name,
|
||||
projection_expression)
|
||||
except InvalidIndexNameError as err:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return self.error(er, str(err))
|
||||
except ValueError as err:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationError'
|
||||
return self.error(er, 'Bad Filter Expression: {0}'.format(err))
|
||||
@ -497,7 +543,11 @@ class DynamoHandler(BaseResponse):
|
||||
def delete_item(self):
|
||||
name = self.body['TableName']
|
||||
keys = self.body['Key']
|
||||
return_values = self.body.get('ReturnValues', '')
|
||||
return_values = self.body.get('ReturnValues', 'NONE')
|
||||
if return_values not in ('ALL_OLD', 'NONE'):
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return self.error(er, 'Return values set to invalid value')
|
||||
|
||||
table = self.dynamodb_backend.get_table(name)
|
||||
if not table:
|
||||
er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException'
|
||||
@ -514,13 +564,26 @@ class DynamoHandler(BaseResponse):
|
||||
def update_item(self):
|
||||
name = self.body['TableName']
|
||||
key = self.body['Key']
|
||||
update_expression = self.body.get('UpdateExpression')
|
||||
return_values = self.body.get('ReturnValues', 'NONE')
|
||||
update_expression = self.body.get('UpdateExpression', '').strip()
|
||||
attribute_updates = self.body.get('AttributeUpdates')
|
||||
expression_attribute_names = self.body.get(
|
||||
'ExpressionAttributeNames', {})
|
||||
expression_attribute_values = self.body.get(
|
||||
'ExpressionAttributeValues', {})
|
||||
existing_item = self.dynamodb_backend.get_item(name, key)
|
||||
if existing_item:
|
||||
existing_attributes = existing_item.to_json()['Attributes']
|
||||
else:
|
||||
existing_attributes = {}
|
||||
|
||||
if return_values not in ('NONE', 'ALL_OLD', 'ALL_NEW', 'UPDATED_OLD',
|
||||
'UPDATED_NEW'):
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return self.error(er, 'Return values set to invalid value')
|
||||
|
||||
if has_empty_keys_or_values(expression_attribute_values):
|
||||
return get_empty_str_error()
|
||||
|
||||
if 'Expected' in self.body:
|
||||
expected = self.body['Expected']
|
||||
@ -529,25 +592,9 @@ class DynamoHandler(BaseResponse):
|
||||
|
||||
# Attempt to parse simple ConditionExpressions into an Expected
|
||||
# expression
|
||||
if not expected:
|
||||
condition_expression = self.body.get('ConditionExpression')
|
||||
if condition_expression and 'OR' not in condition_expression:
|
||||
cond_items = [c.strip()
|
||||
for c in condition_expression.split('AND')]
|
||||
|
||||
if cond_items:
|
||||
expected = {}
|
||||
exists_re = re.compile('^attribute_exists\((.*)\)$')
|
||||
not_exists_re = re.compile(
|
||||
'^attribute_not_exists\((.*)\)$')
|
||||
|
||||
for cond in cond_items:
|
||||
exists_m = exists_re.match(cond)
|
||||
not_exists_m = not_exists_re.match(cond)
|
||||
if exists_m:
|
||||
expected[exists_m.group(1)] = {'Exists': True}
|
||||
elif not_exists_m:
|
||||
expected[not_exists_m.group(1)] = {'Exists': False}
|
||||
condition_expression = self.body.get('ConditionExpression')
|
||||
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
|
||||
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
|
||||
|
||||
# Support spaces between operators in an update expression
|
||||
# E.g. `a = b + c` -> `a=b+c`
|
||||
@ -558,7 +605,7 @@ class DynamoHandler(BaseResponse):
|
||||
try:
|
||||
item = self.dynamodb_backend.update_item(
|
||||
name, key, update_expression, attribute_updates, expression_attribute_names,
|
||||
expression_attribute_values, expected
|
||||
expression_attribute_values, expected, condition_expression
|
||||
)
|
||||
except ValueError:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
|
||||
@ -572,8 +619,26 @@ class DynamoHandler(BaseResponse):
|
||||
'TableName': name,
|
||||
'CapacityUnits': 0.5
|
||||
}
|
||||
if not existing_item:
|
||||
unchanged_attributes = {
|
||||
k for k in existing_attributes.keys()
|
||||
if existing_attributes[k] == item_dict['Attributes'].get(k)
|
||||
}
|
||||
changed_attributes = set(existing_attributes.keys()).union(item_dict['Attributes'].keys()).difference(unchanged_attributes)
|
||||
|
||||
if return_values == 'NONE':
|
||||
item_dict['Attributes'] = {}
|
||||
elif return_values == 'ALL_OLD':
|
||||
item_dict['Attributes'] = existing_attributes
|
||||
elif return_values == 'UPDATED_OLD':
|
||||
item_dict['Attributes'] = {
|
||||
k: v for k, v in existing_attributes.items()
|
||||
if k in changed_attributes
|
||||
}
|
||||
elif return_values == 'UPDATED_NEW':
|
||||
item_dict['Attributes'] = {
|
||||
k: v for k, v in item_dict['Attributes'].items()
|
||||
if k in changed_attributes
|
||||
}
|
||||
|
||||
return dynamo_json_dump(item_dict)
|
||||
|
||||
|
||||
6
moto/dynamodbstreams/__init__.py
Normal file
6
moto/dynamodbstreams/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import dynamodbstreams_backends
|
||||
from ..core.models import base_decorator
|
||||
|
||||
dynamodbstreams_backend = dynamodbstreams_backends['us-east-1']
|
||||
mock_dynamodbstreams = base_decorator(dynamodbstreams_backends)
|
||||
129
moto/dynamodbstreams/models.py
Normal file
129
moto/dynamodbstreams/models.py
Normal file
@ -0,0 +1,129 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import json
|
||||
import boto3
|
||||
import base64
|
||||
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.dynamodb2.models import dynamodb_backends
|
||||
|
||||
|
||||
class ShardIterator(BaseModel):
|
||||
def __init__(self, streams_backend, stream_shard, shard_iterator_type, sequence_number=None):
|
||||
self.id = base64.b64encode(os.urandom(472)).decode('utf-8')
|
||||
self.streams_backend = streams_backend
|
||||
self.stream_shard = stream_shard
|
||||
self.shard_iterator_type = shard_iterator_type
|
||||
if shard_iterator_type == 'TRIM_HORIZON':
|
||||
self.sequence_number = stream_shard.starting_sequence_number
|
||||
elif shard_iterator_type == 'LATEST':
|
||||
self.sequence_number = stream_shard.starting_sequence_number + len(stream_shard.items)
|
||||
elif shard_iterator_type == 'AT_SEQUENCE_NUMBER':
|
||||
self.sequence_number = sequence_number
|
||||
elif shard_iterator_type == 'AFTER_SEQUENCE_NUMBER':
|
||||
self.sequence_number = sequence_number + 1
|
||||
|
||||
@property
|
||||
def arn(self):
|
||||
return '{}/stream/{}|1|{}'.format(
|
||||
self.stream_shard.table.table_arn,
|
||||
self.stream_shard.table.latest_stream_label,
|
||||
self.id)
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
'ShardIterator': self.arn
|
||||
}
|
||||
|
||||
def get(self, limit=1000):
|
||||
items = self.stream_shard.get(self.sequence_number, limit)
|
||||
try:
|
||||
last_sequence_number = max(i['dynamodb']['SequenceNumber'] for i in items)
|
||||
new_shard_iterator = ShardIterator(self.streams_backend,
|
||||
self.stream_shard,
|
||||
'AFTER_SEQUENCE_NUMBER',
|
||||
last_sequence_number)
|
||||
except ValueError:
|
||||
new_shard_iterator = ShardIterator(self.streams_backend,
|
||||
self.stream_shard,
|
||||
'AT_SEQUENCE_NUMBER',
|
||||
self.sequence_number)
|
||||
|
||||
self.streams_backend.shard_iterators[new_shard_iterator.arn] = new_shard_iterator
|
||||
return {
|
||||
'NextShardIterator': new_shard_iterator.arn,
|
||||
'Records': items
|
||||
}
|
||||
|
||||
|
||||
class DynamoDBStreamsBackend(BaseBackend):
|
||||
def __init__(self, region):
|
||||
self.region = region
|
||||
self.shard_iterators = {}
|
||||
|
||||
def reset(self):
|
||||
region = self.region
|
||||
self.__dict__ = {}
|
||||
self.__init__(region)
|
||||
|
||||
@property
|
||||
def dynamodb(self):
|
||||
return dynamodb_backends[self.region]
|
||||
|
||||
def _get_table_from_arn(self, arn):
|
||||
table_name = arn.split(':', 6)[5].split('/')[1]
|
||||
return self.dynamodb.get_table(table_name)
|
||||
|
||||
def describe_stream(self, arn):
|
||||
table = self._get_table_from_arn(arn)
|
||||
resp = {'StreamDescription': {
|
||||
'StreamArn': arn,
|
||||
'StreamLabel': table.latest_stream_label,
|
||||
'StreamStatus': ('ENABLED' if table.latest_stream_label
|
||||
else 'DISABLED'),
|
||||
'StreamViewType': table.stream_specification['StreamViewType'],
|
||||
'CreationRequestDateTime': table.stream_shard.created_on.isoformat(),
|
||||
'TableName': table.name,
|
||||
'KeySchema': table.schema,
|
||||
'Shards': ([table.stream_shard.to_json()] if table.stream_shard
|
||||
else [])
|
||||
}}
|
||||
|
||||
return json.dumps(resp)
|
||||
|
||||
def list_streams(self, table_name=None):
|
||||
streams = []
|
||||
for table in self.dynamodb.tables.values():
|
||||
if table_name is not None and table.name != table_name:
|
||||
continue
|
||||
if table.latest_stream_label:
|
||||
d = table.describe(base_key='Table')
|
||||
streams.append({
|
||||
'StreamArn': d['Table']['LatestStreamArn'],
|
||||
'TableName': d['Table']['TableName'],
|
||||
'StreamLabel': d['Table']['LatestStreamLabel']
|
||||
})
|
||||
|
||||
return json.dumps({'Streams': streams})
|
||||
|
||||
def get_shard_iterator(self, arn, shard_id, shard_iterator_type, sequence_number=None):
|
||||
table = self._get_table_from_arn(arn)
|
||||
assert table.stream_shard.id == shard_id
|
||||
|
||||
shard_iterator = ShardIterator(self, table.stream_shard,
|
||||
shard_iterator_type,
|
||||
sequence_number)
|
||||
self.shard_iterators[shard_iterator.arn] = shard_iterator
|
||||
|
||||
return json.dumps(shard_iterator.to_json())
|
||||
|
||||
def get_records(self, iterator_arn, limit):
|
||||
shard_iterator = self.shard_iterators[iterator_arn]
|
||||
return json.dumps(shard_iterator.get(limit))
|
||||
|
||||
|
||||
available_regions = boto3.session.Session().get_available_regions(
|
||||
'dynamodbstreams')
|
||||
dynamodbstreams_backends = {region: DynamoDBStreamsBackend(region=region)
|
||||
for region in available_regions}
|
||||
34
moto/dynamodbstreams/responses.py
Normal file
34
moto/dynamodbstreams/responses.py
Normal file
@ -0,0 +1,34 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
|
||||
from .models import dynamodbstreams_backends
|
||||
|
||||
|
||||
class DynamoDBStreamsHandler(BaseResponse):
|
||||
|
||||
@property
|
||||
def backend(self):
|
||||
return dynamodbstreams_backends[self.region]
|
||||
|
||||
def describe_stream(self):
|
||||
arn = self._get_param('StreamArn')
|
||||
return self.backend.describe_stream(arn)
|
||||
|
||||
def list_streams(self):
|
||||
table_name = self._get_param('TableName')
|
||||
return self.backend.list_streams(table_name)
|
||||
|
||||
def get_shard_iterator(self):
|
||||
arn = self._get_param('StreamArn')
|
||||
shard_id = self._get_param('ShardId')
|
||||
shard_iterator_type = self._get_param('ShardIteratorType')
|
||||
return self.backend.get_shard_iterator(arn, shard_id,
|
||||
shard_iterator_type)
|
||||
|
||||
def get_records(self):
|
||||
arn = self._get_param('ShardIterator')
|
||||
limit = self._get_param('Limit')
|
||||
if limit is None:
|
||||
limit = 1000
|
||||
return self.backend.get_records(arn, limit)
|
||||
10
moto/dynamodbstreams/urls.py
Normal file
10
moto/dynamodbstreams/urls.py
Normal file
@ -0,0 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import DynamoDBStreamsHandler
|
||||
|
||||
url_bases = [
|
||||
"https?://streams.dynamodb.(.+).amazonaws.com"
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
"{0}/$": DynamoDBStreamsHandler.dispatch,
|
||||
}
|
||||
@ -58,6 +58,14 @@ class InvalidKeyPairDuplicateError(EC2ClientError):
|
||||
.format(key))
|
||||
|
||||
|
||||
class InvalidKeyPairFormatError(EC2ClientError):
|
||||
|
||||
def __init__(self):
|
||||
super(InvalidKeyPairFormatError, self).__init__(
|
||||
"InvalidKeyPair.Format",
|
||||
"Key is not in valid OpenSSH public key format")
|
||||
|
||||
|
||||
class InvalidVPCIdError(EC2ClientError):
|
||||
|
||||
def __init__(self, vpc_id):
|
||||
@ -289,6 +297,15 @@ class InvalidAssociationIdError(EC2ClientError):
|
||||
.format(association_id))
|
||||
|
||||
|
||||
class InvalidVpcCidrBlockAssociationIdError(EC2ClientError):
|
||||
|
||||
def __init__(self, association_id):
|
||||
super(InvalidVpcCidrBlockAssociationIdError, self).__init__(
|
||||
"InvalidVpcCidrBlockAssociationIdError.NotFound",
|
||||
"The vpc CIDR block association ID '{0}' does not exist"
|
||||
.format(association_id))
|
||||
|
||||
|
||||
class InvalidVPCPeeringConnectionIdError(EC2ClientError):
|
||||
|
||||
def __init__(self, vpc_peering_connection_id):
|
||||
@ -324,6 +341,15 @@ class InvalidParameterValueErrorTagNull(EC2ClientError):
|
||||
"Tag value cannot be null. Use empty string instead.")
|
||||
|
||||
|
||||
class InvalidParameterValueErrorUnknownAttribute(EC2ClientError):
|
||||
|
||||
def __init__(self, parameter_value):
|
||||
super(InvalidParameterValueErrorUnknownAttribute, self).__init__(
|
||||
"InvalidParameterValue",
|
||||
"Value ({0}) for parameter attribute is invalid. Unknown attribute."
|
||||
.format(parameter_value))
|
||||
|
||||
|
||||
class InvalidInternetGatewayIdError(EC2ClientError):
|
||||
|
||||
def __init__(self, internet_gateway_id):
|
||||
@ -401,3 +427,108 @@ class FilterNotImplementedError(MotoNotImplementedError):
|
||||
super(FilterNotImplementedError, self).__init__(
|
||||
"The filter '{0}' for {1}".format(
|
||||
filter_name, method_name))
|
||||
|
||||
|
||||
class CidrLimitExceeded(EC2ClientError):
|
||||
|
||||
def __init__(self, vpc_id, max_cidr_limit):
|
||||
super(CidrLimitExceeded, self).__init__(
|
||||
"CidrLimitExceeded",
|
||||
"This network '{0}' has met its maximum number of allowed CIDRs: {1}".format(vpc_id, max_cidr_limit)
|
||||
)
|
||||
|
||||
|
||||
class OperationNotPermitted(EC2ClientError):
|
||||
|
||||
def __init__(self, association_id):
|
||||
super(OperationNotPermitted, self).__init__(
|
||||
"OperationNotPermitted",
|
||||
"The vpc CIDR block with association ID {} may not be disassociated. "
|
||||
"It is the primary IPv4 CIDR block of the VPC".format(association_id)
|
||||
)
|
||||
|
||||
|
||||
class InvalidAvailabilityZoneError(EC2ClientError):
|
||||
|
||||
def __init__(self, availability_zone_value, valid_availability_zones):
|
||||
super(InvalidAvailabilityZoneError, self).__init__(
|
||||
"InvalidParameterValue",
|
||||
"Value ({0}) for parameter availabilityZone is invalid. "
|
||||
"Subnets can currently only be created in the following availability zones: {1}.".format(availability_zone_value, valid_availability_zones)
|
||||
)
|
||||
|
||||
|
||||
class NetworkAclEntryAlreadyExistsError(EC2ClientError):
|
||||
|
||||
def __init__(self, rule_number):
|
||||
super(NetworkAclEntryAlreadyExistsError, self).__init__(
|
||||
"NetworkAclEntryAlreadyExists",
|
||||
"The network acl entry identified by {} already exists.".format(rule_number)
|
||||
)
|
||||
|
||||
|
||||
class InvalidSubnetRangeError(EC2ClientError):
|
||||
|
||||
def __init__(self, cidr_block):
|
||||
super(InvalidSubnetRangeError, self).__init__(
|
||||
"InvalidSubnet.Range",
|
||||
"The CIDR '{}' is invalid.".format(cidr_block)
|
||||
)
|
||||
|
||||
|
||||
class InvalidCIDRBlockParameterError(EC2ClientError):
|
||||
|
||||
def __init__(self, cidr_block):
|
||||
super(InvalidCIDRBlockParameterError, self).__init__(
|
||||
"InvalidParameterValue",
|
||||
"Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block)
|
||||
)
|
||||
|
||||
|
||||
class InvalidDestinationCIDRBlockParameterError(EC2ClientError):
|
||||
|
||||
def __init__(self, cidr_block):
|
||||
super(InvalidDestinationCIDRBlockParameterError, self).__init__(
|
||||
"InvalidParameterValue",
|
||||
"Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block)
|
||||
)
|
||||
|
||||
|
||||
class InvalidSubnetConflictError(EC2ClientError):
|
||||
|
||||
def __init__(self, cidr_block):
|
||||
super(InvalidSubnetConflictError, self).__init__(
|
||||
"InvalidSubnet.Conflict",
|
||||
"The CIDR '{}' conflicts with another subnet".format(cidr_block)
|
||||
)
|
||||
|
||||
|
||||
class InvalidVPCRangeError(EC2ClientError):
|
||||
|
||||
def __init__(self, cidr_block):
|
||||
super(InvalidVPCRangeError, self).__init__(
|
||||
"InvalidVpc.Range",
|
||||
"The CIDR '{}' is invalid.".format(cidr_block)
|
||||
)
|
||||
|
||||
|
||||
# accept exception
|
||||
class OperationNotPermitted2(EC2ClientError):
|
||||
def __init__(self, client_region, pcx_id, acceptor_region):
|
||||
super(OperationNotPermitted2, self).__init__(
|
||||
"OperationNotPermitted",
|
||||
"Incorrect region ({0}) specified for this request."
|
||||
"VPC peering connection {1} must be accepted in region {2}".format(client_region, pcx_id, acceptor_region)
|
||||
)
|
||||
|
||||
|
||||
# reject exception
|
||||
class OperationNotPermitted3(EC2ClientError):
|
||||
def __init__(self, client_region, pcx_id, acceptor_region):
|
||||
super(OperationNotPermitted3, self).__init__(
|
||||
"OperationNotPermitted",
|
||||
"Incorrect region ({0}) specified for this request."
|
||||
"VPC peering connection {1} must be accepted or rejected in region {2}".format(client_region,
|
||||
pcx_id,
|
||||
acceptor_region)
|
||||
)
|
||||
|
||||
635
moto/ec2/models.py
Executable file → Normal file
635
moto/ec2/models.py
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
@ -4,6 +4,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -20,6 +21,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -36,6 +38,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -52,6 +55,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "099720109477",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -68,6 +72,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -84,6 +89,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -100,6 +106,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -116,6 +123,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "013907871322",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -132,6 +140,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -148,6 +157,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -163,6 +173,7 @@
|
||||
"ami_id": "ami-56ec3e2f",
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"image_location": "amazon/getting-started",
|
||||
"owner_id": "801119661308",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
@ -180,6 +191,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -196,6 +208,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -212,6 +225,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "137112412989",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/xvda",
|
||||
@ -228,6 +242,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -244,6 +259,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "099720109477",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -260,6 +276,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "137112412989",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -276,6 +293,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -292,6 +310,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -308,6 +327,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "898082745236",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/xvda",
|
||||
@ -324,6 +344,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "898082745236",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -340,6 +361,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -356,6 +378,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -372,6 +395,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -388,6 +412,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "309956199498",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -404,6 +429,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -420,6 +446,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -436,6 +463,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -452,6 +480,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -468,6 +497,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "898082745236",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -484,6 +514,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -500,6 +531,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "801119661308",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda1",
|
||||
@ -516,6 +548,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "898082745236",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/xvda",
|
||||
@ -532,6 +565,7 @@
|
||||
"state": "available",
|
||||
"public": true,
|
||||
"owner_id": "013907871322",
|
||||
"image_location": "amazon/getting-started",
|
||||
"sriov": "simple",
|
||||
"root_device_type": "ebs",
|
||||
"root_device_name": "/dev/sda",
|
||||
|
||||
@ -11,7 +11,7 @@ class AmisResponse(BaseResponse):
|
||||
instance_id = self._get_param('InstanceId')
|
||||
if self.is_not_dryrun('CreateImage'):
|
||||
image = self.ec2_backend.create_image(
|
||||
instance_id, name, description)
|
||||
instance_id, name, description, context=self)
|
||||
template = self.response_template(CREATE_IMAGE_RESPONSE)
|
||||
return template.render(image=image)
|
||||
|
||||
@ -39,7 +39,8 @@ class AmisResponse(BaseResponse):
|
||||
owners = self._get_multi_param('Owner')
|
||||
exec_users = self._get_multi_param('ExecutableBy')
|
||||
images = self.ec2_backend.describe_images(
|
||||
ami_ids=ami_ids, filters=filters, exec_users=exec_users, owners=owners)
|
||||
ami_ids=ami_ids, filters=filters, exec_users=exec_users,
|
||||
owners=owners, context=self)
|
||||
template = self.response_template(DESCRIBE_IMAGES_RESPONSE)
|
||||
return template.render(images=images)
|
||||
|
||||
@ -112,12 +113,12 @@ DESCRIBE_IMAGES_RESPONSE = """<DescribeImagesResponse xmlns="http://ec2.amazonaw
|
||||
<rootDeviceName>{{ image.root_device_name }}</rootDeviceName>
|
||||
<blockDeviceMapping>
|
||||
<item>
|
||||
<deviceName>/dev/sda1</deviceName>
|
||||
<deviceName>{{ image.root_device_name }}</deviceName>
|
||||
<ebs>
|
||||
<snapshotId>{{ image.ebs_snapshot.id }}</snapshotId>
|
||||
<volumeSize>15</volumeSize>
|
||||
<deleteOnTermination>false</deleteOnTermination>
|
||||
<volumeType>standard</volumeType>
|
||||
<volumeType>{{ image.root_device_type }}</volumeType>
|
||||
</ebs>
|
||||
</item>
|
||||
</blockDeviceMapping>
|
||||
|
||||
@ -10,7 +10,8 @@ class AvailabilityZonesAndRegions(BaseResponse):
|
||||
return template.render(zones=zones)
|
||||
|
||||
def describe_regions(self):
|
||||
regions = self.ec2_backend.describe_regions()
|
||||
region_names = self._get_multi_param('RegionName')
|
||||
regions = self.ec2_backend.describe_regions(region_names)
|
||||
template = self.response_template(DESCRIBE_REGIONS_RESPONSE)
|
||||
return template.render(regions=regions)
|
||||
|
||||
|
||||
@ -16,15 +16,23 @@ class ElasticBlockStore(BaseResponse):
|
||||
return template.render(attachment=attachment)
|
||||
|
||||
def copy_snapshot(self):
|
||||
source_snapshot_id = self._get_param('SourceSnapshotId')
|
||||
source_region = self._get_param('SourceRegion')
|
||||
description = self._get_param('Description')
|
||||
if self.is_not_dryrun('CopySnapshot'):
|
||||
raise NotImplementedError(
|
||||
'ElasticBlockStore.copy_snapshot is not yet implemented')
|
||||
snapshot = self.ec2_backend.copy_snapshot(
|
||||
source_snapshot_id, source_region, description)
|
||||
template = self.response_template(COPY_SNAPSHOT_RESPONSE)
|
||||
return template.render(snapshot=snapshot)
|
||||
|
||||
def create_snapshot(self):
|
||||
volume_id = self._get_param('VolumeId')
|
||||
description = self._get_param('Description')
|
||||
tags = self._parse_tag_specification("TagSpecification")
|
||||
snapshot_tags = tags.get('snapshot', {})
|
||||
if self.is_not_dryrun('CreateSnapshot'):
|
||||
snapshot = self.ec2_backend.create_snapshot(volume_id, description)
|
||||
snapshot.add_tags(snapshot_tags)
|
||||
template = self.response_template(CREATE_SNAPSHOT_RESPONSE)
|
||||
return template.render(snapshot=snapshot)
|
||||
|
||||
@ -32,10 +40,13 @@ class ElasticBlockStore(BaseResponse):
|
||||
size = self._get_param('Size')
|
||||
zone = self._get_param('AvailabilityZone')
|
||||
snapshot_id = self._get_param('SnapshotId')
|
||||
tags = self._parse_tag_specification("TagSpecification")
|
||||
volume_tags = tags.get('volume', {})
|
||||
encrypted = self._get_param('Encrypted', if_none=False)
|
||||
if self.is_not_dryrun('CreateVolume'):
|
||||
volume = self.ec2_backend.create_volume(
|
||||
size, zone, snapshot_id, encrypted)
|
||||
volume.add_tags(volume_tags)
|
||||
template = self.response_template(CREATE_VOLUME_RESPONSE)
|
||||
return template.render(volume=volume)
|
||||
|
||||
@ -139,6 +150,18 @@ CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.co
|
||||
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
|
||||
<status>creating</status>
|
||||
<createTime>{{ volume.create_time}}</createTime>
|
||||
{% if volume.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
<volumeType>standard</volumeType>
|
||||
</CreateVolumeResponse>"""
|
||||
|
||||
@ -170,16 +193,18 @@ DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazon
|
||||
</item>
|
||||
{% endif %}
|
||||
</attachmentSet>
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% if volume.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
<volumeType>standard</volumeType>
|
||||
</item>
|
||||
{% endfor %}
|
||||
@ -216,12 +241,27 @@ CREATE_SNAPSHOT_RESPONSE = """<CreateSnapshotResponse xmlns="http://ec2.amazonaw
|
||||
<status>pending</status>
|
||||
<startTime>{{ snapshot.start_time}}</startTime>
|
||||
<progress>60%</progress>
|
||||
<ownerId>123456789012</ownerId>
|
||||
<ownerId>{{ snapshot.owner_id }}</ownerId>
|
||||
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
|
||||
<description>{{ snapshot.description }}</description>
|
||||
<encrypted>{{ snapshot.encrypted }}</encrypted>
|
||||
<tagSet>
|
||||
{% for tag in snapshot.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
</CreateSnapshotResponse>"""
|
||||
|
||||
COPY_SNAPSHOT_RESPONSE = """<CopySnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
|
||||
<snapshotId>{{ snapshot.id }}</snapshotId>
|
||||
</CopySnapshotResponse>"""
|
||||
|
||||
DESCRIBE_SNAPSHOTS_RESPONSE = """<DescribeSnapshotsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
|
||||
<snapshotSet>
|
||||
@ -232,7 +272,7 @@ DESCRIBE_SNAPSHOTS_RESPONSE = """<DescribeSnapshotsResponse xmlns="http://ec2.am
|
||||
<status>{{ snapshot.status }}</status>
|
||||
<startTime>{{ snapshot.start_time}}</startTime>
|
||||
<progress>100%</progress>
|
||||
<ownerId>123456789012</ownerId>
|
||||
<ownerId>{{ snapshot.owner_id }}</ownerId>
|
||||
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
|
||||
<description>{{ snapshot.description }}</description>
|
||||
<encrypted>{{ snapshot.encrypted }}</encrypted>
|
||||
|
||||
@ -7,8 +7,13 @@ class ElasticIPAddresses(BaseResponse):
|
||||
|
||||
def allocate_address(self):
|
||||
domain = self._get_param('Domain', if_none='standard')
|
||||
reallocate_address = self._get_param('Address', if_none=None)
|
||||
if self.is_not_dryrun('AllocateAddress'):
|
||||
address = self.ec2_backend.allocate_address(domain)
|
||||
if reallocate_address:
|
||||
address = self.ec2_backend.allocate_address(
|
||||
domain, address=reallocate_address)
|
||||
else:
|
||||
address = self.ec2_backend.allocate_address(domain)
|
||||
template = self.response_template(ALLOCATE_ADDRESS_RESPONSE)
|
||||
return template.render(address=address)
|
||||
|
||||
|
||||
@ -10,9 +10,10 @@ class ElasticNetworkInterfaces(BaseResponse):
|
||||
private_ip_address = self._get_param('PrivateIpAddress')
|
||||
groups = self._get_multi_param('SecurityGroupId')
|
||||
subnet = self.ec2_backend.get_subnet(subnet_id)
|
||||
description = self._get_param('Description')
|
||||
if self.is_not_dryrun('CreateNetworkInterface'):
|
||||
eni = self.ec2_backend.create_network_interface(
|
||||
subnet, private_ip_address, groups)
|
||||
subnet, private_ip_address, groups, description)
|
||||
template = self.response_template(
|
||||
CREATE_NETWORK_INTERFACE_RESPONSE)
|
||||
return template.render(eni=eni)
|
||||
@ -78,7 +79,11 @@ CREATE_NETWORK_INTERFACE_RESPONSE = """
|
||||
<subnetId>{{ eni.subnet.id }}</subnetId>
|
||||
<vpcId>{{ eni.subnet.vpc_id }}</vpcId>
|
||||
<availabilityZone>us-west-2a</availabilityZone>
|
||||
{% if eni.description %}
|
||||
<description>{{ eni.description }}</description>
|
||||
{% else %}
|
||||
<description/>
|
||||
{% endif %}
|
||||
<ownerId>498654062920</ownerId>
|
||||
<requesterManaged>false</requesterManaged>
|
||||
<status>pending</status>
|
||||
@ -121,7 +126,7 @@ DESCRIBE_NETWORK_INTERFACES_RESPONSE = """<DescribeNetworkInterfacesResponse xml
|
||||
<subnetId>{{ eni.subnet.id }}</subnetId>
|
||||
<vpcId>{{ eni.subnet.vpc_id }}</vpcId>
|
||||
<availabilityZone>us-west-2a</availabilityZone>
|
||||
<description>Primary network interface</description>
|
||||
<description>{{ eni.description }}</description>
|
||||
<ownerId>190610284047</ownerId>
|
||||
<requesterManaged>false</requesterManaged>
|
||||
{% if eni.attachment_id %}
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
from boto.ec2.instancetype import InstanceType
|
||||
|
||||
from moto.autoscaling import autoscaling_backends
|
||||
from moto.core.responses import BaseResponse
|
||||
from moto.core.utils import camelcase_to_underscores
|
||||
from moto.ec2.utils import filters_from_querystring, \
|
||||
@ -45,6 +47,8 @@ class InstanceResponse(BaseResponse):
|
||||
private_ip = self._get_param('PrivateIpAddress')
|
||||
associate_public_ip = self._get_param('AssociatePublicIpAddress')
|
||||
key_name = self._get_param('KeyName')
|
||||
ebs_optimized = self._get_param('EbsOptimized')
|
||||
instance_initiated_shutdown_behavior = self._get_param("InstanceInitiatedShutdownBehavior")
|
||||
tags = self._parse_tag_specification("TagSpecification")
|
||||
region_name = self.region
|
||||
|
||||
@ -54,7 +58,7 @@ class InstanceResponse(BaseResponse):
|
||||
instance_type=instance_type, placement=placement, region_name=region_name, subnet_id=subnet_id,
|
||||
owner_id=owner_id, key_name=key_name, security_group_ids=security_group_ids,
|
||||
nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip,
|
||||
tags=tags)
|
||||
tags=tags, ebs_optimized=ebs_optimized, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior)
|
||||
|
||||
template = self.response_template(EC2_RUN_INSTANCES)
|
||||
return template.render(reservation=new_reservation)
|
||||
@ -63,6 +67,7 @@ class InstanceResponse(BaseResponse):
|
||||
instance_ids = self._get_multi_param('InstanceId')
|
||||
if self.is_not_dryrun('TerminateInstance'):
|
||||
instances = self.ec2_backend.terminate_instances(instance_ids)
|
||||
autoscaling_backends[self.region].notify_terminate_instances(instance_ids)
|
||||
template = self.response_template(EC2_TERMINATE_INSTANCES)
|
||||
return template.render(instances=instances)
|
||||
|
||||
@ -112,12 +117,11 @@ class InstanceResponse(BaseResponse):
|
||||
# TODO this and modify below should raise IncorrectInstanceState if
|
||||
# instance not in stopped state
|
||||
attribute = self._get_param('Attribute')
|
||||
key = camelcase_to_underscores(attribute)
|
||||
instance_id = self._get_param('InstanceId')
|
||||
instance, value = self.ec2_backend.describe_instance_attribute(
|
||||
instance_id, key)
|
||||
instance_id, attribute)
|
||||
|
||||
if key == "group_set":
|
||||
if attribute == "groupSet":
|
||||
template = self.response_template(
|
||||
EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE)
|
||||
else:
|
||||
@ -242,7 +246,8 @@ EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc
|
||||
<dnsName>{{ instance.public_dns }}</dnsName>
|
||||
<reason/>
|
||||
<keyName>{{ instance.key_name }}</keyName>
|
||||
<amiLaunchIndex>0</amiLaunchIndex>
|
||||
<ebsOptimized>{{ instance.ebs_optimized }}</ebsOptimized>
|
||||
<amiLaunchIndex>{{ instance.ami_launch_index }}</amiLaunchIndex>
|
||||
<instanceType>{{ instance.instance_type }}</instanceType>
|
||||
<launchTime>{{ instance.launch_time }}</launchTime>
|
||||
<placement>
|
||||
@ -381,7 +386,8 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
|
||||
<dnsName>{{ instance.public_dns }}</dnsName>
|
||||
<reason>{{ instance._reason }}</reason>
|
||||
<keyName>{{ instance.key_name }}</keyName>
|
||||
<amiLaunchIndex>0</amiLaunchIndex>
|
||||
<ebsOptimized>{{ instance.ebs_optimized }}</ebsOptimized>
|
||||
<amiLaunchIndex>{{ instance.ami_launch_index }}</amiLaunchIndex>
|
||||
<productCodes/>
|
||||
<instanceType>{{ instance.instance_type }}</instanceType>
|
||||
<launchTime>{{ instance.launch_time }}</launchTime>
|
||||
@ -447,6 +453,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
|
||||
</blockDeviceMapping>
|
||||
<virtualizationType>{{ instance.virtualization_type }}</virtualizationType>
|
||||
<clientToken>ABCDE1234567890123</clientToken>
|
||||
{% if instance.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in instance.get_tags() %}
|
||||
<item>
|
||||
@ -457,6 +464,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
<hypervisor>xen</hypervisor>
|
||||
<networkInterfaceSet>
|
||||
{% for nic in instance.nics.values() %}
|
||||
@ -592,7 +600,9 @@ EC2_DESCRIBE_INSTANCE_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="h
|
||||
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
|
||||
<instanceId>{{ instance.id }}</instanceId>
|
||||
<{{ attribute }}>
|
||||
{% if value is not none %}
|
||||
<value>{{ value }}</value>
|
||||
{% endif %}
|
||||
</{{ attribute }}>
|
||||
</DescribeInstanceAttributeResponse>"""
|
||||
|
||||
@ -600,9 +610,9 @@ EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """<DescribeInstanceAttributeResponse
|
||||
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
|
||||
<instanceId>{{ instance.id }}</instanceId>
|
||||
<{{ attribute }}>
|
||||
{% for sg_id in value %}
|
||||
{% for sg in value %}
|
||||
<item>
|
||||
<groupId>{{ sg_id }}</groupId>
|
||||
<groupId>{{ sg.id }}</groupId>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</{{ attribute }}>
|
||||
|
||||
@ -11,6 +11,29 @@ def try_parse_int(value, default=None):
|
||||
return default
|
||||
|
||||
|
||||
def parse_sg_attributes_from_dict(sg_attributes):
|
||||
ip_protocol = sg_attributes.get('IpProtocol', [None])[0]
|
||||
from_port = sg_attributes.get('FromPort', [None])[0]
|
||||
to_port = sg_attributes.get('ToPort', [None])[0]
|
||||
|
||||
ip_ranges = []
|
||||
ip_ranges_tree = sg_attributes.get('IpRanges') or {}
|
||||
for ip_range_idx in sorted(ip_ranges_tree.keys()):
|
||||
ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])
|
||||
|
||||
source_groups = []
|
||||
source_group_ids = []
|
||||
groups_tree = sg_attributes.get('Groups') or {}
|
||||
for group_idx in sorted(groups_tree.keys()):
|
||||
group_dict = groups_tree[group_idx]
|
||||
if 'GroupId' in group_dict:
|
||||
source_group_ids.append(group_dict['GroupId'][0])
|
||||
elif 'GroupName' in group_dict:
|
||||
source_groups.append(group_dict['GroupName'][0])
|
||||
|
||||
return ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids
|
||||
|
||||
|
||||
class SecurityGroups(BaseResponse):
|
||||
|
||||
def _process_rules_from_querystring(self):
|
||||
@ -29,28 +52,17 @@ class SecurityGroups(BaseResponse):
|
||||
d = d[subkey]
|
||||
d[key_splitted[-1]] = value
|
||||
|
||||
if 'IpPermissions' not in querytree:
|
||||
# Handle single rule syntax
|
||||
ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(querytree)
|
||||
yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,
|
||||
source_groups, source_group_ids)
|
||||
|
||||
ip_permissions = querytree.get('IpPermissions') or {}
|
||||
for ip_permission_idx in sorted(ip_permissions.keys()):
|
||||
ip_permission = ip_permissions[ip_permission_idx]
|
||||
|
||||
ip_protocol = ip_permission.get('IpProtocol', [None])[0]
|
||||
from_port = ip_permission.get('FromPort', [None])[0]
|
||||
to_port = ip_permission.get('ToPort', [None])[0]
|
||||
|
||||
ip_ranges = []
|
||||
ip_ranges_tree = ip_permission.get('IpRanges') or {}
|
||||
for ip_range_idx in sorted(ip_ranges_tree.keys()):
|
||||
ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])
|
||||
|
||||
source_groups = []
|
||||
source_group_ids = []
|
||||
groups_tree = ip_permission.get('Groups') or {}
|
||||
for group_idx in sorted(groups_tree.keys()):
|
||||
group_dict = groups_tree[group_idx]
|
||||
if 'GroupId' in group_dict:
|
||||
source_group_ids.append(group_dict['GroupId'][0])
|
||||
elif 'GroupName' in group_dict:
|
||||
source_groups.append(group_dict['GroupName'][0])
|
||||
ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(ip_permission)
|
||||
|
||||
yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,
|
||||
source_groups, source_group_ids)
|
||||
@ -179,8 +191,12 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = """<DescribeSecurityGroupsResponse xmlns="ht
|
||||
{% for rule in group.egress_rules %}
|
||||
<item>
|
||||
<ipProtocol>{{ rule.ip_protocol }}</ipProtocol>
|
||||
{% if rule.from_port %}
|
||||
<fromPort>{{ rule.from_port }}</fromPort>
|
||||
{% endif %}
|
||||
{% if rule.to_port %}
|
||||
<toPort>{{ rule.to_port }}</toPort>
|
||||
{% endif %}
|
||||
<groups>
|
||||
{% for source_group in rule.source_groups %}
|
||||
<item>
|
||||
|
||||
@ -40,7 +40,7 @@ class SpotFleets(BaseResponse):
|
||||
|
||||
def request_spot_fleet(self):
|
||||
spot_config = self._get_dict_param("SpotFleetRequestConfig.")
|
||||
spot_price = spot_config['spot_price']
|
||||
spot_price = spot_config.get('spot_price')
|
||||
target_capacity = spot_config['target_capacity']
|
||||
iam_fleet_role = spot_config['iam_fleet_role']
|
||||
allocation_strategy = spot_config['allocation_strategy']
|
||||
@ -78,7 +78,9 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """<DescribeSpotFleetRequestsResponse xmlns="http
|
||||
<spotFleetRequestId>{{ request.id }}</spotFleetRequestId>
|
||||
<spotFleetRequestState>{{ request.state }}</spotFleetRequestState>
|
||||
<spotFleetRequestConfig>
|
||||
{% if request.spot_price %}
|
||||
<spotPrice>{{ request.spot_price }}</spotPrice>
|
||||
{% endif %}
|
||||
<targetCapacity>{{ request.target_capacity }}</targetCapacity>
|
||||
<iamFleetRole>{{ request.iam_fleet_role }}</iamFleetRole>
|
||||
<allocationStrategy>{{ request.allocation_strategy }}</allocationStrategy>
|
||||
@ -93,7 +95,9 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """<DescribeSpotFleetRequestsResponse xmlns="http
|
||||
<iamInstanceProfile><arn>{{ launch_spec.iam_instance_profile }}</arn></iamInstanceProfile>
|
||||
<keyName>{{ launch_spec.key_name }}</keyName>
|
||||
<monitoring><enabled>{{ launch_spec.monitoring }}</enabled></monitoring>
|
||||
{% if launch_spec.spot_price %}
|
||||
<spotPrice>{{ launch_spec.spot_price }}</spotPrice>
|
||||
{% endif %}
|
||||
<userData>{{ launch_spec.user_data }}</userData>
|
||||
<weightedCapacity>{{ launch_spec.weighted_capacity }}</weightedCapacity>
|
||||
<groupSet>
|
||||
@ -103,6 +107,21 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """<DescribeSpotFleetRequestsResponse xmlns="http
|
||||
</item>
|
||||
{% endfor %}
|
||||
</groupSet>
|
||||
<tagSpecificationSet>
|
||||
{% for resource_type in launch_spec.tag_specifications %}
|
||||
<item>
|
||||
<resourceType>{{ resource_type }}</resourceType>
|
||||
<tag>
|
||||
{% for key, value in launch_spec.tag_specifications[resource_type].items() %}
|
||||
<item>
|
||||
<key>{{ key }}</key>
|
||||
<value>{{ value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tag>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSpecificationSet>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</launchSpecifications>
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
import random
|
||||
from moto.core.responses import BaseResponse
|
||||
from moto.core.utils import camelcase_to_underscores
|
||||
from moto.ec2.utils import filters_from_querystring
|
||||
|
||||
|
||||
@ -16,6 +17,7 @@ class Subnets(BaseResponse):
|
||||
vpc_id,
|
||||
cidr_block,
|
||||
availability_zone,
|
||||
context=self,
|
||||
)
|
||||
template = self.response_template(CREATE_SUBNET_RESPONSE)
|
||||
return template.render(subnet=subnet)
|
||||
@ -35,9 +37,14 @@ class Subnets(BaseResponse):
|
||||
|
||||
def modify_subnet_attribute(self):
|
||||
subnet_id = self._get_param('SubnetId')
|
||||
map_public_ip = self._get_param('MapPublicIpOnLaunch.Value')
|
||||
self.ec2_backend.modify_subnet_attribute(subnet_id, map_public_ip)
|
||||
return MODIFY_SUBNET_ATTRIBUTE_RESPONSE
|
||||
|
||||
for attribute in ('MapPublicIpOnLaunch', 'AssignIpv6AddressOnCreation'):
|
||||
if self.querystring.get('%s.Value' % attribute):
|
||||
attr_name = camelcase_to_underscores(attribute)
|
||||
attr_value = self.querystring.get('%s.Value' % attribute)[0]
|
||||
self.ec2_backend.modify_subnet_attribute(
|
||||
subnet_id, attr_name, attr_value)
|
||||
return MODIFY_SUBNET_ATTRIBUTE_RESPONSE
|
||||
|
||||
|
||||
CREATE_SUBNET_RESPONSE = """
|
||||
@ -49,17 +56,14 @@ CREATE_SUBNET_RESPONSE = """
|
||||
<vpcId>{{ subnet.vpc_id }}</vpcId>
|
||||
<cidrBlock>{{ subnet.cidr_block }}</cidrBlock>
|
||||
<availableIpAddressCount>251</availableIpAddressCount>
|
||||
<availabilityZone>{{ subnet.availability_zone }}</availabilityZone>
|
||||
<tagSet>
|
||||
{% for tag in subnet.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
<availabilityZone>{{ subnet._availability_zone.name }}</availabilityZone>
|
||||
<availabilityZoneId>{{ subnet._availability_zone.zone_id }}</availabilityZoneId>
|
||||
<defaultForAz>{{ subnet.default_for_az }}</defaultForAz>
|
||||
<mapPublicIpOnLaunch>{{ subnet.map_public_ip_on_launch }}</mapPublicIpOnLaunch>
|
||||
<ownerId>{{ subnet.owner_id }}</ownerId>
|
||||
<assignIpv6AddressOnCreation>{{ subnet.assign_ipv6_address_on_creation }}</assignIpv6AddressOnCreation>
|
||||
<ipv6CidrBlockAssociationSet>{{ subnet.ipv6_cidr_block_associations }}</ipv6CidrBlockAssociationSet>
|
||||
<subnetArn>arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }}</subnetArn>
|
||||
</subnet>
|
||||
</CreateSubnetResponse>"""
|
||||
|
||||
@ -80,19 +84,26 @@ DESCRIBE_SUBNETS_RESPONSE = """
|
||||
<vpcId>{{ subnet.vpc_id }}</vpcId>
|
||||
<cidrBlock>{{ subnet.cidr_block }}</cidrBlock>
|
||||
<availableIpAddressCount>251</availableIpAddressCount>
|
||||
<availabilityZone>{{ subnet.availability_zone }}</availabilityZone>
|
||||
<availabilityZone>{{ subnet._availability_zone.name }}</availabilityZone>
|
||||
<availabilityZoneId>{{ subnet._availability_zone.zone_id }}</availabilityZoneId>
|
||||
<defaultForAz>{{ subnet.default_for_az }}</defaultForAz>
|
||||
<mapPublicIpOnLaunch>{{ subnet.map_public_ip_on_launch }}</mapPublicIpOnLaunch>
|
||||
<tagSet>
|
||||
{% for tag in subnet.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
<ownerId>{{ subnet.owner_id }}</ownerId>
|
||||
<assignIpv6AddressOnCreation>{{ subnet.assign_ipv6_address_on_creation }}</assignIpv6AddressOnCreation>
|
||||
<ipv6CidrBlockAssociationSet>{{ subnet.ipv6_cidr_block_associations }}</ipv6CidrBlockAssociationSet>
|
||||
<subnetArn>arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }}</subnetArn>
|
||||
{% if subnet.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in subnet.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
</item>
|
||||
{% endfor %}
|
||||
</subnetSet>
|
||||
|
||||
@ -5,8 +5,12 @@ from moto.core.responses import BaseResponse
|
||||
class VPCPeeringConnections(BaseResponse):
|
||||
|
||||
def create_vpc_peering_connection(self):
|
||||
peer_region = self._get_param('PeerRegion')
|
||||
if peer_region == self.region or peer_region is None:
|
||||
peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId'))
|
||||
else:
|
||||
peer_vpc = self.ec2_backend.get_cross_vpc(self._get_param('PeerVpcId'), peer_region)
|
||||
vpc = self.ec2_backend.get_vpc(self._get_param('VpcId'))
|
||||
peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId'))
|
||||
vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc)
|
||||
template = self.response_template(
|
||||
CREATE_VPC_PEERING_CONNECTION_RESPONSE)
|
||||
@ -41,54 +45,64 @@ class VPCPeeringConnections(BaseResponse):
|
||||
|
||||
|
||||
CREATE_VPC_PEERING_CONNECTION_RESPONSE = """
|
||||
<CreateVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpcPeeringConnection>
|
||||
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
|
||||
<CreateVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpcPeeringConnection>
|
||||
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
|
||||
<requesterVpcInfo>
|
||||
<ownerId>777788889999</ownerId>
|
||||
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
|
||||
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
|
||||
<ownerId>777788889999</ownerId>
|
||||
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
|
||||
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
|
||||
<peeringOptions>
|
||||
<allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
|
||||
<allowEgressFromLocalVpcToRemoteClassicLink>false</allowEgressFromLocalVpcToRemoteClassicLink>
|
||||
<allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
|
||||
</peeringOptions>
|
||||
</requesterVpcInfo>
|
||||
<accepterVpcInfo>
|
||||
<ownerId>123456789012</ownerId>
|
||||
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
|
||||
</accepterVpcInfo>
|
||||
<status>
|
||||
<code>initiating-request</code>
|
||||
<message>Initiating request to {accepter ID}.</message>
|
||||
<code>initiating-request</code>
|
||||
<message>Initiating Request to {accepter ID}</message>
|
||||
</status>
|
||||
<expirationTime>2014-02-18T14:37:25.000Z</expirationTime>
|
||||
<tagSet/>
|
||||
</vpcPeeringConnection>
|
||||
</vpcPeeringConnection>
|
||||
</CreateVpcPeeringConnectionResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """
|
||||
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpcPeeringConnectionSet>
|
||||
{% for vpc_pcx in vpc_pcxs %}
|
||||
<item>
|
||||
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
|
||||
<requesterVpcInfo>
|
||||
<ownerId>777788889999</ownerId>
|
||||
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
|
||||
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
|
||||
</requesterVpcInfo>
|
||||
<accepterVpcInfo>
|
||||
<ownerId>123456789012</ownerId>
|
||||
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
|
||||
</accepterVpcInfo>
|
||||
<status>
|
||||
<code>{{ vpc_pcx._status.code }}</code>
|
||||
<message>{{ vpc_pcx._status.message }}</message>
|
||||
</status>
|
||||
<expirationTime>2014-02-17T16:00:50.000Z</expirationTime>
|
||||
<tagSet/>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</vpcPeeringConnectionSet>
|
||||
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpcPeeringConnectionSet>
|
||||
{% for vpc_pcx in vpc_pcxs %}
|
||||
<item>
|
||||
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
|
||||
<requesterVpcInfo>
|
||||
<ownerId>777788889999</ownerId>
|
||||
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
|
||||
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
|
||||
</requesterVpcInfo>
|
||||
<accepterVpcInfo>
|
||||
<ownerId>123456789012</ownerId>
|
||||
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
|
||||
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
|
||||
<peeringOptions>
|
||||
<allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
|
||||
<allowEgressFromLocalVpcToRemoteClassicLink>true</allowEgressFromLocalVpcToRemoteClassicLink>
|
||||
<allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
|
||||
</peeringOptions>
|
||||
</accepterVpcInfo>
|
||||
<status>
|
||||
<code>{{ vpc_pcx._status.code }}</code>
|
||||
<message>{{ vpc_pcx._status.message }}</message>
|
||||
</status>
|
||||
<tagSet/>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</vpcPeeringConnectionSet>
|
||||
</DescribeVpcPeeringConnectionsResponse>
|
||||
"""
|
||||
|
||||
@ -100,19 +114,24 @@ DELETE_VPC_PEERING_CONNECTION_RESPONSE = """
|
||||
"""
|
||||
|
||||
ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """
|
||||
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpcPeeringConnection>
|
||||
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
|
||||
<requesterVpcInfo>
|
||||
<ownerId>123456789012</ownerId>
|
||||
<ownerId>777788889999</ownerId>
|
||||
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
|
||||
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
|
||||
</requesterVpcInfo>
|
||||
<accepterVpcInfo>
|
||||
<ownerId>777788889999</ownerId>
|
||||
<ownerId>123456789012</ownerId>
|
||||
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
|
||||
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
|
||||
<peeringOptions>
|
||||
<allowEgressFromLocalClassicLinkToRemoteVpc>false</allowEgressFromLocalClassicLinkToRemoteVpc>
|
||||
<allowEgressFromLocalVpcToRemoteClassicLink>false</allowEgressFromLocalVpcToRemoteClassicLink>
|
||||
<allowDnsResolutionFromRemoteVpc>false</allowDnsResolutionFromRemoteVpc>
|
||||
</peeringOptions>
|
||||
</accepterVpcInfo>
|
||||
<status>
|
||||
<code>{{ vpc_pcx._status.code }}</code>
|
||||
|
||||
@ -9,9 +9,12 @@ class VPCs(BaseResponse):
|
||||
def create_vpc(self):
|
||||
cidr_block = self._get_param('CidrBlock')
|
||||
instance_tenancy = self._get_param('InstanceTenancy', if_none='default')
|
||||
vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy)
|
||||
amazon_provided_ipv6_cidr_blocks = self._get_param('AmazonProvidedIpv6CidrBlock')
|
||||
vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy,
|
||||
amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_blocks)
|
||||
doc_date = '2013-10-15' if 'Boto/' in self.headers.get('user-agent', '') else '2016-11-15'
|
||||
template = self.response_template(CREATE_VPC_RESPONSE)
|
||||
return template.render(vpc=vpc)
|
||||
return template.render(vpc=vpc, doc_date=doc_date)
|
||||
|
||||
def delete_vpc(self):
|
||||
vpc_id = self._get_param('VpcId')
|
||||
@ -23,8 +26,9 @@ class VPCs(BaseResponse):
|
||||
vpc_ids = self._get_multi_param('VpcId')
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters)
|
||||
doc_date = '2013-10-15' if 'Boto/' in self.headers.get('user-agent', '') else '2016-11-15'
|
||||
template = self.response_template(DESCRIBE_VPCS_RESPONSE)
|
||||
return template.render(vpcs=vpcs)
|
||||
return template.render(vpcs=vpcs, doc_date=doc_date)
|
||||
|
||||
def describe_vpc_attribute(self):
|
||||
vpc_id = self._get_param('VpcId')
|
||||
@ -45,14 +49,63 @@ class VPCs(BaseResponse):
|
||||
vpc_id, attr_name, attr_value)
|
||||
return MODIFY_VPC_ATTRIBUTE_RESPONSE
|
||||
|
||||
def associate_vpc_cidr_block(self):
|
||||
vpc_id = self._get_param('VpcId')
|
||||
amazon_provided_ipv6_cidr_blocks = self._get_param('AmazonProvidedIpv6CidrBlock')
|
||||
# todo test on AWS if can create an association for IPV4 and IPV6 in the same call?
|
||||
cidr_block = self._get_param('CidrBlock') if not amazon_provided_ipv6_cidr_blocks else None
|
||||
value = self.ec2_backend.associate_vpc_cidr_block(vpc_id, cidr_block, amazon_provided_ipv6_cidr_blocks)
|
||||
if not amazon_provided_ipv6_cidr_blocks:
|
||||
render_template = ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE
|
||||
else:
|
||||
render_template = IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE
|
||||
template = self.response_template(render_template)
|
||||
return template.render(vpc_id=vpc_id, value=value, cidr_block=value['cidr_block'],
|
||||
association_id=value['association_id'], cidr_block_state='associating')
|
||||
|
||||
def disassociate_vpc_cidr_block(self):
|
||||
association_id = self._get_param('AssociationId')
|
||||
value = self.ec2_backend.disassociate_vpc_cidr_block(association_id)
|
||||
if "::" in value.get('cidr_block', ''):
|
||||
render_template = IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE
|
||||
else:
|
||||
render_template = DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE
|
||||
template = self.response_template(render_template)
|
||||
return template.render(vpc_id=value['vpc_id'], cidr_block=value['cidr_block'],
|
||||
association_id=value['association_id'], cidr_block_state='disassociating')
|
||||
|
||||
|
||||
CREATE_VPC_RESPONSE = """
|
||||
<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/{{doc_date}}/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpc>
|
||||
<vpcId>{{ vpc.id }}</vpcId>
|
||||
<state>pending</state>
|
||||
<cidrBlock>{{ vpc.cidr_block }}</cidrBlock>
|
||||
{% if doc_date == "2016-11-15" %}
|
||||
<cidrBlockAssociationSet>
|
||||
{% for assoc in vpc.get_cidr_block_association_set() %}
|
||||
<item>
|
||||
<cidrBlock>{{assoc.cidr_block}}</cidrBlock>
|
||||
<associationId>{{ assoc.association_id }}</associationId>
|
||||
<cidrBlockState>
|
||||
<state>{{assoc.cidr_block_state.state}}</state>
|
||||
</cidrBlockState>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</cidrBlockAssociationSet>
|
||||
<ipv6CidrBlockAssociationSet>
|
||||
{% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %}
|
||||
<item>
|
||||
<ipv6CidrBlock>{{assoc.cidr_block}}</ipv6CidrBlock>
|
||||
<associationId>{{ assoc.association_id }}</associationId>
|
||||
<ipv6CidrBlockState>
|
||||
<state>{{assoc.cidr_block_state.state}}</state>
|
||||
</ipv6CidrBlockState>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</ipv6CidrBlockAssociationSet>
|
||||
{% endif %}
|
||||
<dhcpOptionsId>{% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-1a2b3c4d2{% endif %}</dhcpOptionsId>
|
||||
<instanceTenancy>{{ vpc.instance_tenancy }}</instanceTenancy>
|
||||
<tagSet>
|
||||
@ -69,14 +122,38 @@ CREATE_VPC_RESPONSE = """
|
||||
</CreateVpcResponse>"""
|
||||
|
||||
DESCRIBE_VPCS_RESPONSE = """
|
||||
<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/{{doc_date}}/">
|
||||
<requestId>7a62c442-3484-4f42-9342-6942EXAMPLE</requestId>
|
||||
<vpcSet>
|
||||
{% for vpc in vpcs %}
|
||||
<item>
|
||||
<vpcId>{{ vpc.id }}</vpcId>
|
||||
<state>{{ vpc.state }}</state>
|
||||
<cidrBlock>{{ vpc.cidr_block }}</cidrBlock>
|
||||
{% if doc_date == "2016-11-15" %}
|
||||
<cidrBlockAssociationSet>
|
||||
{% for assoc in vpc.get_cidr_block_association_set() %}
|
||||
<item>
|
||||
<cidrBlock>{{assoc.cidr_block}}</cidrBlock>
|
||||
<associationId>{{ assoc.association_id }}</associationId>
|
||||
<cidrBlockState>
|
||||
<state>{{assoc.cidr_block_state.state}}</state>
|
||||
</cidrBlockState>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</cidrBlockAssociationSet>
|
||||
<ipv6CidrBlockAssociationSet>
|
||||
{% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %}
|
||||
<item>
|
||||
<ipv6CidrBlock>{{assoc.cidr_block}}</ipv6CidrBlock>
|
||||
<associationId>{{ assoc.association_id }}</associationId>
|
||||
<ipv6CidrBlockState>
|
||||
<state>{{assoc.cidr_block_state.state}}</state>
|
||||
</ipv6CidrBlockState>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</ipv6CidrBlockAssociationSet>
|
||||
{% endif %}
|
||||
<dhcpOptionsId>{% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-7a8b9c2d{% endif %}</dhcpOptionsId>
|
||||
<instanceTenancy>{{ vpc.instance_tenancy }}</instanceTenancy>
|
||||
<isDefault>{{ vpc.is_default }}</isDefault>
|
||||
@ -96,14 +173,14 @@ DESCRIBE_VPCS_RESPONSE = """
|
||||
</DescribeVpcsResponse>"""
|
||||
|
||||
DELETE_VPC_RESPONSE = """
|
||||
<DeleteVpcResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<DeleteVpcResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<return>true</return>
|
||||
</DeleteVpcResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """
|
||||
<DescribeVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<DescribeVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpcId>{{ vpc_id }}</vpcId>
|
||||
<{{ attribute }}>
|
||||
@ -112,7 +189,59 @@ DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """
|
||||
</DescribeVpcAttributeResponse>"""
|
||||
|
||||
MODIFY_VPC_ATTRIBUTE_RESPONSE = """
|
||||
<ModifyVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
|
||||
<ModifyVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<return>true</return>
|
||||
</ModifyVpcAttributeResponse>"""
|
||||
|
||||
ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """
|
||||
<AssociateVpcCidrBlockResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpcId>{{vpc_id}}</vpcId>
|
||||
<cidrBlockAssociation>
|
||||
<associationId>{{association_id}}</associationId>
|
||||
<cidrBlock>{{cidr_block}}</cidrBlock>
|
||||
<cidrBlockState>
|
||||
<state>{{cidr_block_state}}</state>
|
||||
</cidrBlockState>
|
||||
</cidrBlockAssociation>
|
||||
</AssociateVpcCidrBlockResponse>"""
|
||||
|
||||
DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """
|
||||
<DisassociateVpcCidrBlockResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<vpcId>{{vpc_id}}</vpcId>
|
||||
<cidrBlockAssociation>
|
||||
<associationId>{{association_id}}</associationId>
|
||||
<cidrBlock>{{cidr_block}}</cidrBlock>
|
||||
<cidrBlockState>
|
||||
<state>{{cidr_block_state}}</state>
|
||||
</cidrBlockState>
|
||||
</cidrBlockAssociation>
|
||||
</DisassociateVpcCidrBlockResponse>"""
|
||||
|
||||
IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """
|
||||
<AssociateVpcCidrBlockResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>33af6c54-1139-4d50-b4f7-15a8example</requestId>
|
||||
<vpcId>{{vpc_id}}</vpcId>
|
||||
<ipv6CidrBlockAssociation>
|
||||
<associationId>{{association_id}}</associationId>
|
||||
<ipv6CidrBlock>{{cidr_block}}</ipv6CidrBlock>
|
||||
<ipv6CidrBlockState>
|
||||
<state>{{cidr_block_state}}</state>
|
||||
</ipv6CidrBlockState>
|
||||
</ipv6CidrBlockAssociation>
|
||||
</AssociateVpcCidrBlockResponse>"""
|
||||
|
||||
IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """
|
||||
<DisassociateVpcCidrBlockResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>33af6c54-1139-4d50-b4f7-15a8example</requestId>
|
||||
<vpcId>{{vpc_id}}</vpcId>
|
||||
<ipv6CidrBlockAssociation>
|
||||
<associationId>{{association_id}}</associationId>
|
||||
<ipv6CidrBlock>{{cidr_block}}</ipv6CidrBlock>
|
||||
<ipv6CidrBlockState>
|
||||
<state>{{cidr_block_state}}</state>
|
||||
</ipv6CidrBlockState>
|
||||
</ipv6CidrBlockAssociation>
|
||||
</DisassociateVpcCidrBlockResponse>"""
|
||||
|
||||
@ -1,10 +1,19 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import fnmatch
|
||||
import random
|
||||
import re
|
||||
import six
|
||||
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
import sshpubkeys.exceptions
|
||||
from sshpubkeys.keys import SSHKey
|
||||
|
||||
|
||||
EC2_RESOURCE_TO_PREFIX = {
|
||||
'customer-gateway': 'cgw',
|
||||
'dhcp-options': 'dopt',
|
||||
@ -27,6 +36,7 @@ EC2_RESOURCE_TO_PREFIX = {
|
||||
'reservation': 'r',
|
||||
'volume': 'vol',
|
||||
'vpc': 'vpc',
|
||||
'vpc-cidr-association-id': 'vpc-cidr-assoc',
|
||||
'vpc-elastic-ip': 'eipalloc',
|
||||
'vpc-elastic-ip-association': 'eipassoc',
|
||||
'vpc-peering-connection': 'pcx',
|
||||
@ -34,16 +44,17 @@ EC2_RESOURCE_TO_PREFIX = {
|
||||
'vpn-gateway': 'vgw'}
|
||||
|
||||
|
||||
EC2_PREFIX_TO_RESOURCE = dict((v, k)
|
||||
for (k, v) in EC2_RESOURCE_TO_PREFIX.items())
|
||||
EC2_PREFIX_TO_RESOURCE = dict((v, k) for (k, v) in EC2_RESOURCE_TO_PREFIX.items())
|
||||
|
||||
|
||||
def random_resource_id(size=8):
|
||||
chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f']
|
||||
resource_id = ''.join(six.text_type(random.choice(chars)) for x in range(size))
|
||||
return resource_id
|
||||
|
||||
|
||||
def random_id(prefix='', size=8):
|
||||
chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f']
|
||||
|
||||
resource_id = ''.join(six.text_type(random.choice(chars))
|
||||
for x in range(size))
|
||||
return '{0}-{1}'.format(prefix, resource_id)
|
||||
return '{0}-{1}'.format(prefix, random_resource_id(size))
|
||||
|
||||
|
||||
def random_ami_id():
|
||||
@ -110,6 +121,10 @@ def random_vpc_id():
|
||||
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc'])
|
||||
|
||||
|
||||
def random_vpc_cidr_association_id():
|
||||
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-cidr-association-id'])
|
||||
|
||||
|
||||
def random_vpc_peering_connection_id():
|
||||
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-peering-connection'])
|
||||
|
||||
@ -165,6 +180,10 @@ def random_ip():
|
||||
)
|
||||
|
||||
|
||||
def random_ipv6_cidr():
|
||||
return "2400:6500:{}:{}::/56".format(random_resource_id(4), random_resource_id(4))
|
||||
|
||||
|
||||
def generate_route_id(route_table_id, cidr_block):
|
||||
return "%s~%s" % (route_table_id, cidr_block)
|
||||
|
||||
@ -443,23 +462,19 @@ def simple_aws_filter_to_re(filter_string):
|
||||
|
||||
|
||||
def random_key_pair():
|
||||
def random_hex():
|
||||
return chr(random.choice(list(range(48, 58)) + list(range(97, 102))))
|
||||
private_key = rsa.generate_private_key(
|
||||
public_exponent=65537,
|
||||
key_size=2048,
|
||||
backend=default_backend())
|
||||
private_key_material = private_key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption())
|
||||
public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key())
|
||||
|
||||
def random_fingerprint():
|
||||
return ':'.join([random_hex() + random_hex() for i in range(20)])
|
||||
|
||||
def random_material():
|
||||
return ''.join([
|
||||
chr(random.choice(list(range(65, 91)) + list(range(48, 58)) +
|
||||
list(range(97, 102))))
|
||||
for i in range(1000)
|
||||
])
|
||||
material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \
|
||||
"-----END RSA PRIVATE KEY-----"
|
||||
return {
|
||||
'fingerprint': random_fingerprint(),
|
||||
'material': material
|
||||
'fingerprint': public_key_fingerprint,
|
||||
'material': private_key_material.decode('ascii')
|
||||
}
|
||||
|
||||
|
||||
@ -525,3 +540,28 @@ def generate_instance_identity_document(instance):
|
||||
}
|
||||
|
||||
return document
|
||||
|
||||
|
||||
def rsa_public_key_parse(key_material):
|
||||
try:
|
||||
if not isinstance(key_material, six.binary_type):
|
||||
key_material = key_material.encode("ascii")
|
||||
|
||||
decoded_key = base64.b64decode(key_material).decode("ascii")
|
||||
public_key = SSHKey(decoded_key)
|
||||
except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError):
|
||||
raise ValueError('bad key')
|
||||
|
||||
if not public_key.rsa:
|
||||
raise ValueError('bad key')
|
||||
|
||||
return public_key.rsa
|
||||
|
||||
|
||||
def rsa_public_key_fingerprint(rsa_public_key):
|
||||
key_data = rsa_public_key.public_bytes(
|
||||
encoding=serialization.Encoding.DER,
|
||||
format=serialization.PublicFormat.SubjectPublicKeyInfo)
|
||||
fingerprint_hex = hashlib.md5(key_data).hexdigest()
|
||||
fingerprint = re.sub(r'([a-f0-9]{2})(?!$)', r'\1:', fingerprint_hex)
|
||||
return fingerprint
|
||||
|
||||
@ -1,15 +1,17 @@
|
||||
from __future__ import unicode_literals
|
||||
# from datetime import datetime
|
||||
|
||||
import hashlib
|
||||
import re
|
||||
from copy import copy
|
||||
from datetime import datetime
|
||||
from random import random
|
||||
|
||||
from botocore.exceptions import ParamValidationError
|
||||
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.ec2 import ec2_backends
|
||||
from copy import copy
|
||||
import hashlib
|
||||
|
||||
from moto.ecr.exceptions import ImageNotFoundException, RepositoryNotFoundException
|
||||
|
||||
|
||||
DEFAULT_REGISTRY_ID = '012345678910'
|
||||
|
||||
|
||||
@ -97,14 +99,15 @@ class Repository(BaseObject):
|
||||
|
||||
class Image(BaseObject):
|
||||
|
||||
def __init__(self, tag, manifest, repository, registry_id=DEFAULT_REGISTRY_ID):
|
||||
def __init__(self, tag, manifest, repository, digest=None, registry_id=DEFAULT_REGISTRY_ID):
|
||||
self.image_tag = tag
|
||||
self.image_tags = [tag] if tag is not None else []
|
||||
self.image_manifest = manifest
|
||||
self.image_size_in_bytes = 50 * 1024 * 1024
|
||||
self.repository = repository
|
||||
self.registry_id = registry_id
|
||||
self.image_digest = None
|
||||
self.image_pushed_at = None
|
||||
self.image_digest = digest
|
||||
self.image_pushed_at = str(datetime.utcnow().isoformat())
|
||||
|
||||
def _create_digest(self):
|
||||
image_contents = 'docker_image{0}'.format(int(random() * 10 ** 6))
|
||||
@ -115,6 +118,20 @@ class Image(BaseObject):
|
||||
self._create_digest()
|
||||
return self.image_digest
|
||||
|
||||
def get_image_manifest(self):
|
||||
return self.image_manifest
|
||||
|
||||
def remove_tag(self, tag):
|
||||
if tag is not None and tag in self.image_tags:
|
||||
self.image_tags.remove(tag)
|
||||
if self.image_tags:
|
||||
self.image_tag = self.image_tags[-1]
|
||||
|
||||
def update_tag(self, tag):
|
||||
self.image_tag = tag
|
||||
if tag not in self.image_tags and tag is not None:
|
||||
self.image_tags.append(tag)
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
@ -124,26 +141,44 @@ class Image(BaseObject):
|
||||
response_object['imageManifest'] = self.image_manifest
|
||||
response_object['repositoryName'] = self.repository
|
||||
response_object['registryId'] = self.registry_id
|
||||
return response_object
|
||||
return {k: v for k, v in response_object.items() if v is not None and v != [None]}
|
||||
|
||||
@property
|
||||
def response_list_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
response_object['imageTag'] = self.image_tag
|
||||
response_object['imageDigest'] = "i don't know"
|
||||
return response_object
|
||||
return {k: v for k, v in response_object.items() if v is not None and v != [None]}
|
||||
|
||||
@property
|
||||
def response_describe_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
response_object['imageTags'] = [self.image_tag]
|
||||
response_object['imageTags'] = self.image_tags
|
||||
response_object['imageDigest'] = self.get_image_digest()
|
||||
response_object['imageManifest'] = self.image_manifest
|
||||
response_object['repositoryName'] = self.repository
|
||||
response_object['registryId'] = self.registry_id
|
||||
response_object['imageSizeInBytes'] = self.image_size_in_bytes
|
||||
response_object['imagePushedAt'] = '2017-05-09'
|
||||
return response_object
|
||||
response_object['imagePushedAt'] = self.image_pushed_at
|
||||
return {k: v for k, v in response_object.items() if v is not None and v != []}
|
||||
|
||||
@property
|
||||
def response_batch_get_image(self):
|
||||
response_object = {}
|
||||
response_object['imageId'] = {}
|
||||
response_object['imageId']['imageTag'] = self.image_tag
|
||||
response_object['imageId']['imageDigest'] = self.get_image_digest()
|
||||
response_object['imageManifest'] = self.image_manifest
|
||||
response_object['repositoryName'] = self.repository
|
||||
response_object['registryId'] = self.registry_id
|
||||
return {k: v for k, v in response_object.items() if v is not None and v != [None]}
|
||||
|
||||
@property
|
||||
def response_batch_delete_image(self):
|
||||
response_object = {}
|
||||
response_object['imageDigest'] = self.get_image_digest()
|
||||
response_object['imageTag'] = self.image_tag
|
||||
return {k: v for k, v in response_object.items() if v is not None and v != [None]}
|
||||
|
||||
|
||||
class ECRBackend(BaseBackend):
|
||||
@ -189,17 +224,22 @@ class ECRBackend(BaseBackend):
|
||||
"""
|
||||
maxResults and filtering not implemented
|
||||
"""
|
||||
images = []
|
||||
for repository in self.repositories.values():
|
||||
if repository_name:
|
||||
if repository.name != repository_name:
|
||||
continue
|
||||
repository = None
|
||||
found = False
|
||||
if repository_name in self.repositories:
|
||||
repository = self.repositories[repository_name]
|
||||
if registry_id:
|
||||
if repository.registry_id != registry_id:
|
||||
continue
|
||||
if repository.registry_id == registry_id:
|
||||
found = True
|
||||
else:
|
||||
found = True
|
||||
|
||||
for image in repository.images:
|
||||
images.append(image)
|
||||
if not found:
|
||||
raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID)
|
||||
|
||||
images = []
|
||||
for image in repository.images:
|
||||
images.append(image)
|
||||
return images
|
||||
|
||||
def describe_images(self, repository_name, registry_id=None, image_ids=None):
|
||||
@ -215,7 +255,7 @@ class ECRBackend(BaseBackend):
|
||||
found = False
|
||||
for image in repository.images:
|
||||
if (('imageDigest' in image_id and image.get_image_digest() == image_id['imageDigest']) or
|
||||
('imageTag' in image_id and image.image_tag == image_id['imageTag'])):
|
||||
('imageTag' in image_id and image_id['imageTag'] in image.image_tags)):
|
||||
found = True
|
||||
response.add(image)
|
||||
if not found:
|
||||
@ -241,9 +281,149 @@ class ECRBackend(BaseBackend):
|
||||
else:
|
||||
raise Exception("{0} is not a repository".format(repository_name))
|
||||
|
||||
image = Image(image_tag, image_manifest, repository_name)
|
||||
repository.images.append(image)
|
||||
return image
|
||||
existing_images = list(filter(lambda x: x.response_object['imageManifest'] == image_manifest, repository.images))
|
||||
if not existing_images:
|
||||
# this image is not in ECR yet
|
||||
image = Image(image_tag, image_manifest, repository_name)
|
||||
repository.images.append(image)
|
||||
return image
|
||||
else:
|
||||
# update existing image
|
||||
existing_images[0].update_tag(image_tag)
|
||||
return existing_images[0]
|
||||
|
||||
def batch_get_image(self, repository_name, registry_id=None, image_ids=None, accepted_media_types=None):
|
||||
if repository_name in self.repositories:
|
||||
repository = self.repositories[repository_name]
|
||||
else:
|
||||
raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID)
|
||||
|
||||
if not image_ids:
|
||||
raise ParamValidationError(msg='Missing required parameter in input: "imageIds"')
|
||||
|
||||
response = {
|
||||
'images': [],
|
||||
'failures': [],
|
||||
}
|
||||
|
||||
for image_id in image_ids:
|
||||
found = False
|
||||
for image in repository.images:
|
||||
if (('imageDigest' in image_id and image.get_image_digest() == image_id['imageDigest']) or
|
||||
('imageTag' in image_id and image.image_tag == image_id['imageTag'])):
|
||||
found = True
|
||||
response['images'].append(image.response_batch_get_image)
|
||||
|
||||
if not found:
|
||||
response['failures'].append({
|
||||
'imageId': {
|
||||
'imageTag': image_id.get('imageTag', 'null')
|
||||
},
|
||||
'failureCode': 'ImageNotFound',
|
||||
'failureReason': 'Requested image not found'
|
||||
})
|
||||
|
||||
return response
|
||||
|
||||
def batch_delete_image(self, repository_name, registry_id=None, image_ids=None):
|
||||
if repository_name in self.repositories:
|
||||
repository = self.repositories[repository_name]
|
||||
else:
|
||||
raise RepositoryNotFoundException(
|
||||
repository_name, registry_id or DEFAULT_REGISTRY_ID
|
||||
)
|
||||
|
||||
if not image_ids:
|
||||
raise ParamValidationError(
|
||||
msg='Missing required parameter in input: "imageIds"'
|
||||
)
|
||||
|
||||
response = {
|
||||
"imageIds": [],
|
||||
"failures": []
|
||||
}
|
||||
|
||||
for image_id in image_ids:
|
||||
image_found = False
|
||||
|
||||
# Is request missing both digest and tag?
|
||||
if "imageDigest" not in image_id and "imageTag" not in image_id:
|
||||
response["failures"].append(
|
||||
{
|
||||
"imageId": {},
|
||||
"failureCode": "MissingDigestAndTag",
|
||||
"failureReason": "Invalid request parameters: both tag and digest cannot be null",
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
# If we have a digest, is it valid?
|
||||
if "imageDigest" in image_id:
|
||||
pattern = re.compile("^[0-9a-zA-Z_+\.-]+:[0-9a-fA-F]{64}")
|
||||
if not pattern.match(image_id.get("imageDigest")):
|
||||
response["failures"].append(
|
||||
{
|
||||
"imageId": {
|
||||
"imageDigest": image_id.get("imageDigest", "null")
|
||||
},
|
||||
"failureCode": "InvalidImageDigest",
|
||||
"failureReason": "Invalid request parameters: image digest should satisfy the regex '[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+'",
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
for num, image in enumerate(repository.images):
|
||||
|
||||
# Search by matching both digest and tag
|
||||
if "imageDigest" in image_id and "imageTag" in image_id:
|
||||
if (
|
||||
image_id["imageDigest"] == image.get_image_digest() and
|
||||
image_id["imageTag"] in image.image_tags
|
||||
):
|
||||
image_found = True
|
||||
for image_tag in reversed(image.image_tags):
|
||||
repository.images[num].image_tag = image_tag
|
||||
response["imageIds"].append(
|
||||
image.response_batch_delete_image
|
||||
)
|
||||
repository.images[num].remove_tag(image_tag)
|
||||
del repository.images[num]
|
||||
|
||||
# Search by matching digest
|
||||
elif "imageDigest" in image_id and image.get_image_digest() == image_id["imageDigest"]:
|
||||
image_found = True
|
||||
for image_tag in reversed(image.image_tags):
|
||||
repository.images[num].image_tag = image_tag
|
||||
response["imageIds"].append(image.response_batch_delete_image)
|
||||
repository.images[num].remove_tag(image_tag)
|
||||
del repository.images[num]
|
||||
|
||||
# Search by matching tag
|
||||
elif "imageTag" in image_id and image_id["imageTag"] in image.image_tags:
|
||||
image_found = True
|
||||
repository.images[num].image_tag = image_id["imageTag"]
|
||||
response["imageIds"].append(image.response_batch_delete_image)
|
||||
if len(image.image_tags) > 1:
|
||||
repository.images[num].remove_tag(image_id["imageTag"])
|
||||
else:
|
||||
repository.images.remove(image)
|
||||
|
||||
if not image_found:
|
||||
failure_response = {
|
||||
"imageId": {},
|
||||
"failureCode": "ImageNotFound",
|
||||
"failureReason": "Requested image not found",
|
||||
}
|
||||
|
||||
if "imageDigest" in image_id:
|
||||
failure_response["imageId"]["imageDigest"] = image_id.get("imageDigest", "null")
|
||||
|
||||
if "imageTag" in image_id:
|
||||
failure_response["imageId"]["imageTag"] = image_id.get("imageTag", "null")
|
||||
|
||||
response["failures"].append(failure_response)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
ecr_backends = {}
|
||||
|
||||
@ -5,7 +5,7 @@ from datetime import datetime
|
||||
import time
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import ecr_backends
|
||||
from .models import ecr_backends, DEFAULT_REGISTRY_ID
|
||||
|
||||
|
||||
class ECRResponse(BaseResponse):
|
||||
@ -84,14 +84,21 @@ class ECRResponse(BaseResponse):
|
||||
'ECR.batch_check_layer_availability is not yet implemented')
|
||||
|
||||
def batch_delete_image(self):
|
||||
if self.is_not_dryrun('BatchDeleteImage'):
|
||||
raise NotImplementedError(
|
||||
'ECR.batch_delete_image is not yet implemented')
|
||||
repository_str = self._get_param('repositoryName')
|
||||
registry_id = self._get_param('registryId')
|
||||
image_ids = self._get_param('imageIds')
|
||||
|
||||
response = self.ecr_backend.batch_delete_image(repository_str, registry_id, image_ids)
|
||||
return json.dumps(response)
|
||||
|
||||
def batch_get_image(self):
|
||||
if self.is_not_dryrun('BatchGetImage'):
|
||||
raise NotImplementedError(
|
||||
'ECR.batch_get_image is not yet implemented')
|
||||
repository_str = self._get_param('repositoryName')
|
||||
registry_id = self._get_param('registryId')
|
||||
image_ids = self._get_param('imageIds')
|
||||
accepted_media_types = self._get_param('acceptedMediaTypes')
|
||||
|
||||
response = self.ecr_backend.batch_get_image(repository_str, registry_id, image_ids, accepted_media_types)
|
||||
return json.dumps(response)
|
||||
|
||||
def can_paginate(self):
|
||||
if self.is_not_dryrun('CanPaginate'):
|
||||
@ -116,7 +123,7 @@ class ECRResponse(BaseResponse):
|
||||
def get_authorization_token(self):
|
||||
registry_ids = self._get_param('registryIds')
|
||||
if not registry_ids:
|
||||
registry_ids = [self.region]
|
||||
registry_ids = [DEFAULT_REGISTRY_ID]
|
||||
auth_data = []
|
||||
for registry_id in registry_ids:
|
||||
password = '{}-auth-token'.format(registry_id)
|
||||
@ -124,7 +131,7 @@ class ECRResponse(BaseResponse):
|
||||
auth_data.append({
|
||||
'authorizationToken': auth_token,
|
||||
'expiresAt': time.mktime(datetime(2015, 1, 1).timetuple()),
|
||||
'proxyEndpoint': 'https://012345678910.dkr.ecr.{}.amazonaws.com'.format(registry_id)
|
||||
'proxyEndpoint': 'https://{}.dkr.ecr.{}.amazonaws.com'.format(registry_id, self.region)
|
||||
})
|
||||
return json.dumps({'authorizationData': auth_data})
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ from .responses import ECRResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://ecr.(.+).amazonaws.com",
|
||||
"https?://api.ecr.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
|
||||
13
moto/ecs/exceptions.py
Normal file
13
moto/ecs/exceptions.py
Normal file
@ -0,0 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import RESTError
|
||||
|
||||
|
||||
class ServiceNotFoundException(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, service_name):
|
||||
super(ServiceNotFoundException, self).__init__(
|
||||
error_type="ServiceNotFoundException",
|
||||
message="The service {0} does not exist".format(service_name),
|
||||
template='error_json',
|
||||
)
|
||||
@ -2,6 +2,7 @@ from __future__ import unicode_literals
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from random import random, randint
|
||||
import boto3
|
||||
|
||||
import pytz
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
@ -9,6 +10,8 @@ from moto.core import BaseBackend, BaseModel
|
||||
from moto.ec2 import ec2_backends
|
||||
from copy import copy
|
||||
|
||||
from .exceptions import ServiceNotFoundException
|
||||
|
||||
|
||||
class BaseObject(BaseModel):
|
||||
|
||||
@ -23,7 +26,7 @@ class BaseObject(BaseModel):
|
||||
|
||||
def gen_response_object(self):
|
||||
response_object = copy(self.__dict__)
|
||||
for key, value in response_object.items():
|
||||
for key, value in self.__dict__.items():
|
||||
if '_' in key:
|
||||
response_object[self.camelCase(key)] = value
|
||||
del response_object[key]
|
||||
@ -60,7 +63,11 @@ class Cluster(BaseObject):
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
# if properties is not provided, cloudformation will use the default values for all properties
|
||||
if 'Properties' in cloudformation_json:
|
||||
properties = cloudformation_json['Properties']
|
||||
else:
|
||||
properties = {}
|
||||
|
||||
ecs_backend = ecs_backends[region_name]
|
||||
return ecs_backend.create_cluster(
|
||||
@ -87,6 +94,12 @@ class Cluster(BaseObject):
|
||||
# no-op when nothing changed between old and new resources
|
||||
return original_resource
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
if attribute_name == 'Arn':
|
||||
return self.arn
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
|
||||
class TaskDefinition(BaseObject):
|
||||
|
||||
@ -108,6 +121,10 @@ class TaskDefinition(BaseObject):
|
||||
del response_object['arn']
|
||||
return response_object
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
return self.arn
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
@ -168,7 +185,7 @@ class Task(BaseObject):
|
||||
|
||||
class Service(BaseObject):
|
||||
|
||||
def __init__(self, cluster, service_name, task_definition, desired_count):
|
||||
def __init__(self, cluster, service_name, task_definition, desired_count, load_balancers=None, scheduling_strategy=None):
|
||||
self.cluster_arn = cluster.arn
|
||||
self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format(
|
||||
service_name)
|
||||
@ -190,7 +207,8 @@ class Service(BaseObject):
|
||||
'updatedAt': datetime.now(pytz.utc),
|
||||
}
|
||||
]
|
||||
self.load_balancers = []
|
||||
self.load_balancers = load_balancers if load_balancers is not None else []
|
||||
self.scheduling_strategy = scheduling_strategy if scheduling_strategy is not None else 'REPLICA'
|
||||
self.pending_count = 0
|
||||
|
||||
@property
|
||||
@ -203,6 +221,7 @@ class Service(BaseObject):
|
||||
del response_object['name'], response_object['arn']
|
||||
response_object['serviceName'] = self.name
|
||||
response_object['serviceArn'] = self.arn
|
||||
response_object['schedulingStrategy'] = self.scheduling_strategy
|
||||
|
||||
for deployment in response_object['deployments']:
|
||||
if isinstance(deployment['createdAt'], datetime):
|
||||
@ -258,10 +277,16 @@ class Service(BaseObject):
|
||||
else:
|
||||
return ecs_backend.update_service(cluster_name, service_name, task_definition, desired_count)
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
if attribute_name == 'Name':
|
||||
return self.name
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
|
||||
class ContainerInstance(BaseObject):
|
||||
|
||||
def __init__(self, ec2_instance_id):
|
||||
def __init__(self, ec2_instance_id, region_name):
|
||||
self.ec2_instance_id = ec2_instance_id
|
||||
self.agent_connected = True
|
||||
self.status = 'ACTIVE'
|
||||
@ -321,12 +346,41 @@ class ContainerInstance(BaseObject):
|
||||
'agentHash': '4023248',
|
||||
'dockerVersion': 'DockerVersion: 1.5.0'
|
||||
}
|
||||
|
||||
self.attributes = {}
|
||||
ec2_backend = ec2_backends[region_name]
|
||||
ec2_instance = ec2_backend.get_instance(ec2_instance_id)
|
||||
self.attributes = {
|
||||
'ecs.ami-id': ec2_instance.image_id,
|
||||
'ecs.availability-zone': ec2_instance.placement,
|
||||
'ecs.instance-type': ec2_instance.instance_type,
|
||||
'ecs.os-type': ec2_instance.platform if ec2_instance.platform == 'windows' else 'linux' # options are windows and linux, linux is default
|
||||
}
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
response_object['attributes'] = [self._format_attribute(name, value) for name, value in response_object['attributes'].items()]
|
||||
return response_object
|
||||
|
||||
def _format_attribute(self, name, value):
|
||||
formatted_attr = {
|
||||
'name': name,
|
||||
}
|
||||
if value is not None:
|
||||
formatted_attr['value'] = value
|
||||
return formatted_attr
|
||||
|
||||
|
||||
class ClusterFailure(BaseObject):
|
||||
def __init__(self, reason, cluster_name):
|
||||
self.reason = reason
|
||||
self.arn = "arn:aws:ecs:us-east-1:012345678910:cluster/{0}".format(
|
||||
cluster_name)
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
response_object['reason'] = self.reason
|
||||
response_object['arn'] = self.arn
|
||||
return response_object
|
||||
|
||||
|
||||
@ -347,12 +401,19 @@ class ContainerInstanceFailure(BaseObject):
|
||||
|
||||
class EC2ContainerServiceBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, region_name):
|
||||
super(EC2ContainerServiceBackend, self).__init__()
|
||||
self.clusters = {}
|
||||
self.task_definitions = {}
|
||||
self.tasks = {}
|
||||
self.services = {}
|
||||
self.container_instances = {}
|
||||
self.region_name = region_name
|
||||
|
||||
def reset(self):
|
||||
region_name = self.region_name
|
||||
self.__dict__ = {}
|
||||
self.__init__(region_name)
|
||||
|
||||
def describe_task_definition(self, task_definition_str):
|
||||
task_definition_name = task_definition_str.split('/')[-1]
|
||||
@ -384,6 +445,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
|
||||
def describe_clusters(self, list_clusters_name=None):
|
||||
list_clusters = []
|
||||
failures = []
|
||||
if list_clusters_name is None:
|
||||
if 'default' in self.clusters:
|
||||
list_clusters.append(self.clusters['default'].response_object)
|
||||
@ -394,9 +456,8 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
list_clusters.append(
|
||||
self.clusters[cluster_name].response_object)
|
||||
else:
|
||||
raise Exception(
|
||||
"{0} is not a cluster".format(cluster_name))
|
||||
return list_clusters
|
||||
failures.append(ClusterFailure('MISSING', cluster_name))
|
||||
return list_clusters, failures
|
||||
|
||||
def delete_cluster(self, cluster_str):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
@ -479,10 +540,27 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
def _calculate_task_resource_requirements(task_definition):
|
||||
resource_requirements = {"CPU": 0, "MEMORY": 0, "PORTS": [], "PORTS_UDP": []}
|
||||
for container_definition in task_definition.container_definitions:
|
||||
resource_requirements["CPU"] += container_definition.get('cpu')
|
||||
resource_requirements["MEMORY"] += container_definition.get("memory")
|
||||
for port_mapping in container_definition.get("portMappings", []):
|
||||
resource_requirements["PORTS"].append(port_mapping.get('hostPort'))
|
||||
# cloudformation uses capitalized properties, while boto uses all lower case
|
||||
|
||||
# CPU is optional
|
||||
resource_requirements["CPU"] += container_definition.get('cpu',
|
||||
container_definition.get('Cpu', 0))
|
||||
|
||||
# either memory or memory reservation must be provided
|
||||
if 'Memory' in container_definition or 'MemoryReservation' in container_definition:
|
||||
resource_requirements["MEMORY"] += container_definition.get(
|
||||
"Memory", container_definition.get('MemoryReservation'))
|
||||
else:
|
||||
resource_requirements["MEMORY"] += container_definition.get(
|
||||
"memory", container_definition.get('memoryReservation'))
|
||||
|
||||
port_mapping_key = 'PortMappings' if 'PortMappings' in container_definition else 'portMappings'
|
||||
for port_mapping in container_definition.get(port_mapping_key, []):
|
||||
if 'hostPort' in port_mapping:
|
||||
resource_requirements["PORTS"].append(port_mapping.get('hostPort'))
|
||||
elif 'HostPort' in port_mapping:
|
||||
resource_requirements["PORTS"].append(port_mapping.get('HostPort'))
|
||||
|
||||
return resource_requirements
|
||||
|
||||
@staticmethod
|
||||
@ -553,8 +631,9 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
raise Exception("tasks cannot be empty")
|
||||
response = []
|
||||
for cluster, cluster_tasks in self.tasks.items():
|
||||
for task_id, task in cluster_tasks.items():
|
||||
if task_id in tasks or task.task_arn in tasks:
|
||||
for task_arn, task in cluster_tasks.items():
|
||||
task_id = task_arn.split("/")[-1]
|
||||
if task_arn in tasks or task.task_arn in tasks or any(task_id in task for task in tasks):
|
||||
response.append(task)
|
||||
return response
|
||||
|
||||
@ -604,7 +683,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
raise Exception("Could not find task {} on cluster {}".format(
|
||||
task_str, cluster_name))
|
||||
|
||||
def create_service(self, cluster_str, service_name, task_definition_str, desired_count):
|
||||
def create_service(self, cluster_str, service_name, task_definition_str, desired_count, load_balancers=None, scheduling_strategy=None):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
if cluster_name in self.clusters:
|
||||
cluster = self.clusters[cluster_name]
|
||||
@ -612,18 +691,23 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
raise Exception("{0} is not a cluster".format(cluster_name))
|
||||
task_definition = self.describe_task_definition(task_definition_str)
|
||||
desired_count = desired_count if desired_count is not None else 0
|
||||
|
||||
service = Service(cluster, service_name,
|
||||
task_definition, desired_count)
|
||||
task_definition, desired_count, load_balancers, scheduling_strategy)
|
||||
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
|
||||
self.services[cluster_service_pair] = service
|
||||
|
||||
return service
|
||||
|
||||
def list_services(self, cluster_str):
|
||||
def list_services(self, cluster_str, scheduling_strategy=None):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
service_arns = []
|
||||
for key, value in self.services.items():
|
||||
if cluster_name + ':' in key:
|
||||
service_arns.append(self.services[key].arn)
|
||||
service = self.services[key]
|
||||
if scheduling_strategy is None or service.scheduling_strategy == scheduling_strategy:
|
||||
service_arns.append(service.arn)
|
||||
|
||||
return sorted(service_arns)
|
||||
|
||||
def describe_services(self, cluster_str, service_names_or_arns):
|
||||
@ -650,8 +734,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
cluster_service_pair].desired_count = desired_count
|
||||
return self.services[cluster_service_pair]
|
||||
else:
|
||||
raise Exception("cluster {0} or service {1} does not exist".format(
|
||||
cluster_name, service_name))
|
||||
raise ServiceNotFoundException(service_name)
|
||||
|
||||
def delete_service(self, cluster_name, service_name):
|
||||
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
|
||||
@ -669,7 +752,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
if cluster_name not in self.clusters:
|
||||
raise Exception("{0} is not a cluster".format(cluster_name))
|
||||
container_instance = ContainerInstance(ec2_instance_id)
|
||||
container_instance = ContainerInstance(ec2_instance_id, self.region_name)
|
||||
if not self.container_instances.get(cluster_name):
|
||||
self.container_instances[cluster_name] = {}
|
||||
container_instance_id = container_instance.container_instance_arn.split(
|
||||
@ -715,6 +798,8 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
Container instances status should be one of [ACTIVE,DRAINING]")
|
||||
failures = []
|
||||
container_instance_objects = []
|
||||
list_container_instance_ids = [x.split('/')[-1]
|
||||
for x in list_container_instance_ids]
|
||||
for container_instance_id in list_container_instance_ids:
|
||||
container_instance = self.container_instances[cluster_name].get(container_instance_id, None)
|
||||
if container_instance is not None:
|
||||
@ -866,6 +951,5 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
yield task_fam
|
||||
|
||||
|
||||
ecs_backends = {}
|
||||
for region, ec2_backend in ec2_backends.items():
|
||||
ecs_backends[region] = EC2ContainerServiceBackend()
|
||||
available_regions = boto3.session.Session().get_available_regions("ecs")
|
||||
ecs_backends = {region: EC2ContainerServiceBackend(region) for region in available_regions}
|
||||
|
||||
@ -45,10 +45,10 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
|
||||
def describe_clusters(self):
|
||||
list_clusters_name = self._get_param('clusters')
|
||||
clusters = self.ecs_backend.describe_clusters(list_clusters_name)
|
||||
clusters, failures = self.ecs_backend.describe_clusters(list_clusters_name)
|
||||
return json.dumps({
|
||||
'clusters': clusters,
|
||||
'failures': []
|
||||
'failures': [cluster.response_object for cluster in failures]
|
||||
})
|
||||
|
||||
def delete_cluster(self):
|
||||
@ -153,15 +153,18 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
service_name = self._get_param('serviceName')
|
||||
task_definition_str = self._get_param('taskDefinition')
|
||||
desired_count = self._get_int_param('desiredCount')
|
||||
load_balancers = self._get_param('loadBalancers')
|
||||
scheduling_strategy = self._get_param('schedulingStrategy')
|
||||
service = self.ecs_backend.create_service(
|
||||
cluster_str, service_name, task_definition_str, desired_count)
|
||||
cluster_str, service_name, task_definition_str, desired_count, load_balancers, scheduling_strategy)
|
||||
return json.dumps({
|
||||
'service': service.response_object
|
||||
})
|
||||
|
||||
def list_services(self):
|
||||
cluster_str = self._get_param('cluster')
|
||||
service_arns = self.ecs_backend.list_services(cluster_str)
|
||||
scheduling_strategy = self._get_param('schedulingStrategy')
|
||||
service_arns = self.ecs_backend.list_services(cluster_str, scheduling_strategy)
|
||||
return json.dumps({
|
||||
'serviceArns': service_arns
|
||||
# ,
|
||||
|
||||
@ -268,7 +268,7 @@ class ELBBackend(BaseBackend):
|
||||
protocol = port['protocol']
|
||||
instance_port = port['instance_port']
|
||||
lb_port = port['load_balancer_port']
|
||||
ssl_certificate_id = port.get('sslcertificate_id')
|
||||
ssl_certificate_id = port.get('ssl_certificate_id')
|
||||
for listener in balancer.listeners:
|
||||
if lb_port == listener.load_balancer_port:
|
||||
if protocol != listener.protocol:
|
||||
|
||||
@ -61,7 +61,7 @@ class ELBResponse(BaseResponse):
|
||||
start = all_names.index(marker) + 1
|
||||
else:
|
||||
start = 0
|
||||
page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier
|
||||
page_size = self._get_int_param('PageSize', 50) # the default is 400, but using 50 to make testing easier
|
||||
load_balancers_resp = all_load_balancers[start:start + page_size]
|
||||
next_marker = None
|
||||
if len(all_load_balancers) > start + page_size:
|
||||
@ -259,12 +259,22 @@ class ELBResponse(BaseResponse):
|
||||
|
||||
def describe_instance_health(self):
|
||||
load_balancer_name = self._get_param('LoadBalancerName')
|
||||
instance_ids = [list(param.values())[0] for param in self._get_list_prefix('Instances.member')]
|
||||
if len(instance_ids) == 0:
|
||||
instance_ids = self.elb_backend.get_load_balancer(
|
||||
load_balancer_name).instance_ids
|
||||
provided_instance_ids = [
|
||||
list(param.values())[0]
|
||||
for param in self._get_list_prefix('Instances.member')
|
||||
]
|
||||
registered_instances_id = self.elb_backend.get_load_balancer(
|
||||
load_balancer_name).instance_ids
|
||||
if len(provided_instance_ids) == 0:
|
||||
provided_instance_ids = registered_instances_id
|
||||
template = self.response_template(DESCRIBE_INSTANCE_HEALTH_TEMPLATE)
|
||||
return template.render(instance_ids=instance_ids)
|
||||
instances = []
|
||||
for instance_id in provided_instance_ids:
|
||||
state = "InService" \
|
||||
if instance_id in registered_instances_id\
|
||||
else "Unknown"
|
||||
instances.append({"InstanceId": instance_id, "State": state})
|
||||
return template.render(instances=instances)
|
||||
|
||||
def add_tags(self):
|
||||
|
||||
@ -689,11 +699,11 @@ SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE = """<SetLoadBalancerPoli
|
||||
DESCRIBE_INSTANCE_HEALTH_TEMPLATE = """<DescribeInstanceHealthResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
|
||||
<DescribeInstanceHealthResult>
|
||||
<InstanceStates>
|
||||
{% for instance_id in instance_ids %}
|
||||
{% for instance in instances %}
|
||||
<member>
|
||||
<Description>N/A</Description>
|
||||
<InstanceId>{{ instance_id }}</InstanceId>
|
||||
<State>InService</State>
|
||||
<InstanceId>{{ instance['InstanceId'] }}</InstanceId>
|
||||
<State>{{ instance['State'] }}</State>
|
||||
<ReasonCode>N/A</ReasonCode>
|
||||
</member>
|
||||
{% endfor %}
|
||||
|
||||
@ -131,7 +131,7 @@ class InvalidActionTypeError(ELBClientError):
|
||||
def __init__(self, invalid_name, index):
|
||||
super(InvalidActionTypeError, self).__init__(
|
||||
"ValidationError",
|
||||
"1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward]" % (invalid_name, index)
|
||||
"1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward, redirect]" % (invalid_name, index)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -35,12 +35,13 @@ from .exceptions import (
|
||||
|
||||
class FakeHealthStatus(BaseModel):
|
||||
|
||||
def __init__(self, instance_id, port, health_port, status, reason=None):
|
||||
def __init__(self, instance_id, port, health_port, status, reason=None, description=None):
|
||||
self.instance_id = instance_id
|
||||
self.port = port
|
||||
self.health_port = health_port
|
||||
self.status = status
|
||||
self.reason = reason
|
||||
self.description = description
|
||||
|
||||
|
||||
class FakeTargetGroup(BaseModel):
|
||||
@ -52,30 +53,35 @@ class FakeTargetGroup(BaseModel):
|
||||
vpc_id,
|
||||
protocol,
|
||||
port,
|
||||
healthcheck_protocol,
|
||||
healthcheck_port,
|
||||
healthcheck_path,
|
||||
healthcheck_interval_seconds,
|
||||
healthcheck_timeout_seconds,
|
||||
healthy_threshold_count,
|
||||
unhealthy_threshold_count,
|
||||
healthcheck_protocol=None,
|
||||
healthcheck_port=None,
|
||||
healthcheck_path=None,
|
||||
healthcheck_interval_seconds=None,
|
||||
healthcheck_timeout_seconds=None,
|
||||
healthy_threshold_count=None,
|
||||
unhealthy_threshold_count=None,
|
||||
matcher=None,
|
||||
target_type=None):
|
||||
|
||||
# TODO: default values differs when you add Network Load balancer
|
||||
self.name = name
|
||||
self.arn = arn
|
||||
self.vpc_id = vpc_id
|
||||
self.protocol = protocol
|
||||
self.port = port
|
||||
self.healthcheck_protocol = healthcheck_protocol
|
||||
self.healthcheck_port = healthcheck_port
|
||||
self.healthcheck_path = healthcheck_path
|
||||
self.healthcheck_interval_seconds = healthcheck_interval_seconds
|
||||
self.healthcheck_timeout_seconds = healthcheck_timeout_seconds
|
||||
self.healthy_threshold_count = healthy_threshold_count
|
||||
self.unhealthy_threshold_count = unhealthy_threshold_count
|
||||
self.healthcheck_protocol = healthcheck_protocol or 'HTTP'
|
||||
self.healthcheck_port = healthcheck_port or str(self.port)
|
||||
self.healthcheck_path = healthcheck_path or '/'
|
||||
self.healthcheck_interval_seconds = healthcheck_interval_seconds or 30
|
||||
self.healthcheck_timeout_seconds = healthcheck_timeout_seconds or 5
|
||||
self.healthy_threshold_count = healthy_threshold_count or 5
|
||||
self.unhealthy_threshold_count = unhealthy_threshold_count or 2
|
||||
self.load_balancer_arns = []
|
||||
self.tags = {}
|
||||
self.matcher = matcher
|
||||
if matcher is None:
|
||||
self.matcher = {'HttpCode': '200'}
|
||||
else:
|
||||
self.matcher = matcher
|
||||
self.target_type = target_type
|
||||
|
||||
self.attributes = {
|
||||
@ -107,10 +113,14 @@ class FakeTargetGroup(BaseModel):
|
||||
raise TooManyTagsError()
|
||||
self.tags[key] = value
|
||||
|
||||
def health_for(self, target):
|
||||
def health_for(self, target, ec2_backend):
|
||||
t = self.targets.get(target['id'])
|
||||
if t is None:
|
||||
raise InvalidTargetError()
|
||||
if t['id'].startswith("i-"): # EC2 instance ID
|
||||
instance = ec2_backend.get_instance_by_id(t['id'])
|
||||
if instance.state == "stopped":
|
||||
return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'unused', 'Target.InvalidState', 'Target is in the stopped state')
|
||||
return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'healthy')
|
||||
|
||||
@classmethod
|
||||
@ -119,10 +129,7 @@ class FakeTargetGroup(BaseModel):
|
||||
|
||||
elbv2_backend = elbv2_backends[region_name]
|
||||
|
||||
# per cloudformation docs:
|
||||
# The target group name should be shorter than 22 characters because
|
||||
# AWS CloudFormation uses the target group name to create the name of the load balancer.
|
||||
name = properties.get('Name', resource_name[:22])
|
||||
name = properties.get('Name')
|
||||
vpc_id = properties.get("VpcId")
|
||||
protocol = properties.get('Protocol')
|
||||
port = properties.get("Port")
|
||||
@ -202,8 +209,20 @@ class FakeListener(BaseModel):
|
||||
# transform default actions to confirm with the rest of the code and XML templates
|
||||
if "DefaultActions" in properties:
|
||||
default_actions = []
|
||||
for action in properties['DefaultActions']:
|
||||
default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']})
|
||||
for i, action in enumerate(properties['DefaultActions']):
|
||||
action_type = action['Type']
|
||||
if action_type == 'forward':
|
||||
default_actions.append({'type': action_type, 'target_group_arn': action['TargetGroupArn']})
|
||||
elif action_type == 'redirect':
|
||||
redirect_action = {'type': action_type, }
|
||||
for redirect_config_key, redirect_config_value in action['RedirectConfig'].items():
|
||||
# need to match the output of _get_list_prefix
|
||||
if redirect_config_key == 'StatusCode':
|
||||
redirect_config_key = 'status_code'
|
||||
redirect_action['redirect_config._' + redirect_config_key.lower()] = redirect_config_value
|
||||
default_actions.append(redirect_action)
|
||||
else:
|
||||
raise InvalidActionTypeError(action_type, i + 1)
|
||||
else:
|
||||
default_actions = None
|
||||
|
||||
@ -293,11 +312,32 @@ class FakeLoadBalancer(BaseModel):
|
||||
return load_balancer
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
attributes = {
|
||||
'DNSName': self.dns_name,
|
||||
'LoadBalancerName': self.name,
|
||||
}
|
||||
return attributes[attribute_name]
|
||||
'''
|
||||
Implemented attributes:
|
||||
* DNSName
|
||||
* LoadBalancerName
|
||||
|
||||
Not implemented:
|
||||
* CanonicalHostedZoneID
|
||||
* LoadBalancerFullName
|
||||
* SecurityGroups
|
||||
|
||||
This method is similar to models.py:FakeLoadBalancer.get_cfn_attribute()
|
||||
'''
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
not_implemented_yet = [
|
||||
'CanonicalHostedZoneID',
|
||||
'LoadBalancerFullName',
|
||||
'SecurityGroups',
|
||||
]
|
||||
if attribute_name == 'DNSName':
|
||||
return self.dns_name
|
||||
elif attribute_name == 'LoadBalancerName':
|
||||
return self.name
|
||||
elif attribute_name in not_implemented_yet:
|
||||
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "%s" ]"' % attribute_name)
|
||||
else:
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
|
||||
class ELBv2Backend(BaseBackend):
|
||||
@ -394,11 +434,15 @@ class ELBv2Backend(BaseBackend):
|
||||
for i, action in enumerate(actions):
|
||||
index = i + 1
|
||||
action_type = action['type']
|
||||
if action_type not in ['forward']:
|
||||
if action_type == 'forward':
|
||||
action_target_group_arn = action['target_group_arn']
|
||||
if action_target_group_arn not in target_group_arns:
|
||||
raise ActionTargetGroupNotFoundError(action_target_group_arn)
|
||||
elif action_type == 'redirect':
|
||||
# nothing to do
|
||||
pass
|
||||
else:
|
||||
raise InvalidActionTypeError(action_type, index)
|
||||
action_target_group_arn = action['target_group_arn']
|
||||
if action_target_group_arn not in target_group_arns:
|
||||
raise ActionTargetGroupNotFoundError(action_target_group_arn)
|
||||
|
||||
# TODO: check for error 'TooManyRegistrationsForTargetId'
|
||||
# TODO: check for error 'TooManyRules'
|
||||
@ -411,7 +455,7 @@ class ELBv2Backend(BaseBackend):
|
||||
def create_target_group(self, name, **kwargs):
|
||||
if len(name) > 32:
|
||||
raise InvalidTargetGroupNameError(
|
||||
"Target group name '%s' cannot be longer than '22' characters" % name
|
||||
"Target group name '%s' cannot be longer than '32' characters" % name
|
||||
)
|
||||
if not re.match('^[a-zA-Z0-9\-]+$', name):
|
||||
raise InvalidTargetGroupNameError(
|
||||
@ -433,28 +477,18 @@ class ELBv2Backend(BaseBackend):
|
||||
raise DuplicateTargetGroupName()
|
||||
|
||||
valid_protocols = ['HTTPS', 'HTTP', 'TCP']
|
||||
if kwargs['healthcheck_protocol'] not in valid_protocols:
|
||||
if kwargs.get('healthcheck_protocol') and kwargs['healthcheck_protocol'] not in valid_protocols:
|
||||
raise InvalidConditionValueError(
|
||||
"Value {} at 'healthCheckProtocol' failed to satisfy constraint: "
|
||||
"Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols))
|
||||
if kwargs['protocol'] not in valid_protocols:
|
||||
if kwargs.get('protocol') and kwargs['protocol'] not in valid_protocols:
|
||||
raise InvalidConditionValueError(
|
||||
"Value {} at 'protocol' failed to satisfy constraint: "
|
||||
"Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols))
|
||||
|
||||
if FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None:
|
||||
if kwargs.get('matcher') and FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None:
|
||||
raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...')
|
||||
|
||||
valid_protocols = ['HTTPS', 'HTTP', 'TCP']
|
||||
if kwargs['healthcheck_protocol'] not in valid_protocols:
|
||||
raise InvalidConditionValueError(
|
||||
"Value {} at 'healthCheckProtocol' failed to satisfy constraint: "
|
||||
"Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols))
|
||||
if kwargs['protocol'] not in valid_protocols:
|
||||
raise InvalidConditionValueError(
|
||||
"Value {} at 'protocol' failed to satisfy constraint: "
|
||||
"Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols))
|
||||
|
||||
arn = make_arn_for_target_group(account_id=1, name=name, region_name=self.region_name)
|
||||
target_group = FakeTargetGroup(name, arn, **kwargs)
|
||||
self.target_groups[target_group.arn] = target_group
|
||||
@ -470,6 +504,18 @@ class ELBv2Backend(BaseBackend):
|
||||
arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self))
|
||||
listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions)
|
||||
balancer.listeners[listener.arn] = listener
|
||||
for i, action in enumerate(default_actions):
|
||||
action_type = action['type']
|
||||
if action_type == 'forward':
|
||||
if action['target_group_arn'] in self.target_groups.keys():
|
||||
target_group = self.target_groups[action['target_group_arn']]
|
||||
target_group.load_balancer_arns.append(load_balancer_arn)
|
||||
elif action_type == 'redirect':
|
||||
# nothing to do
|
||||
pass
|
||||
else:
|
||||
raise InvalidActionTypeError(action_type, i + 1)
|
||||
|
||||
return listener
|
||||
|
||||
def describe_load_balancers(self, arns, names):
|
||||
@ -632,11 +678,15 @@ class ELBv2Backend(BaseBackend):
|
||||
for i, action in enumerate(actions):
|
||||
index = i + 1
|
||||
action_type = action['type']
|
||||
if action_type not in ['forward']:
|
||||
if action_type == 'forward':
|
||||
action_target_group_arn = action['target_group_arn']
|
||||
if action_target_group_arn not in target_group_arns:
|
||||
raise ActionTargetGroupNotFoundError(action_target_group_arn)
|
||||
elif action_type == 'redirect':
|
||||
# nothing to do
|
||||
pass
|
||||
else:
|
||||
raise InvalidActionTypeError(action_type, index)
|
||||
action_target_group_arn = action['target_group_arn']
|
||||
if action_target_group_arn not in target_group_arns:
|
||||
raise ActionTargetGroupNotFoundError(action_target_group_arn)
|
||||
|
||||
# TODO: check for error 'TooManyRegistrationsForTargetId'
|
||||
# TODO: check for error 'TooManyRules'
|
||||
@ -667,7 +717,7 @@ class ELBv2Backend(BaseBackend):
|
||||
|
||||
if not targets:
|
||||
targets = target_group.targets.values()
|
||||
return [target_group.health_for(target) for target in targets]
|
||||
return [target_group.health_for(target, self.ec2_backend) for target in targets]
|
||||
|
||||
def set_rule_priorities(self, rule_priorities):
|
||||
# validate
|
||||
@ -856,7 +906,7 @@ class ELBv2Backend(BaseBackend):
|
||||
# Its already validated in responses.py
|
||||
listener.ssl_policy = ssl_policy
|
||||
|
||||
if default_actions is not None:
|
||||
if default_actions is not None and default_actions != []:
|
||||
# Is currently not validated
|
||||
listener.default_actions = default_actions
|
||||
|
||||
|
||||
@ -180,14 +180,14 @@ class ELBV2Response(BaseResponse):
|
||||
vpc_id = self._get_param('VpcId')
|
||||
protocol = self._get_param('Protocol')
|
||||
port = self._get_param('Port')
|
||||
healthcheck_protocol = self._get_param('HealthCheckProtocol', 'HTTP')
|
||||
healthcheck_port = self._get_param('HealthCheckPort', 'traffic-port')
|
||||
healthcheck_path = self._get_param('HealthCheckPath', '/')
|
||||
healthcheck_interval_seconds = self._get_param('HealthCheckIntervalSeconds', '30')
|
||||
healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5')
|
||||
healthy_threshold_count = self._get_param('HealthyThresholdCount', '5')
|
||||
unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2')
|
||||
http_codes = self._get_param('Matcher.HttpCode', '200')
|
||||
healthcheck_protocol = self._get_param('HealthCheckProtocol')
|
||||
healthcheck_port = self._get_param('HealthCheckPort')
|
||||
healthcheck_path = self._get_param('HealthCheckPath')
|
||||
healthcheck_interval_seconds = self._get_param('HealthCheckIntervalSeconds')
|
||||
healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds')
|
||||
healthy_threshold_count = self._get_param('HealthyThresholdCount')
|
||||
unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount')
|
||||
matcher = self._get_param('Matcher')
|
||||
|
||||
target_group = self.elbv2_backend.create_target_group(
|
||||
name,
|
||||
@ -201,7 +201,7 @@ class ELBV2Response(BaseResponse):
|
||||
healthcheck_timeout_seconds=healthcheck_timeout_seconds,
|
||||
healthy_threshold_count=healthy_threshold_count,
|
||||
unhealthy_threshold_count=unhealthy_threshold_count,
|
||||
matcher={'HttpCode': http_codes}
|
||||
matcher=matcher,
|
||||
)
|
||||
|
||||
template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE)
|
||||
@ -242,7 +242,7 @@ class ELBV2Response(BaseResponse):
|
||||
start = all_names.index(marker) + 1
|
||||
else:
|
||||
start = 0
|
||||
page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier
|
||||
page_size = self._get_int_param('PageSize', 50) # the default is 400, but using 50 to make testing easier
|
||||
load_balancers_resp = all_load_balancers[start:start + page_size]
|
||||
next_marker = None
|
||||
if len(all_load_balancers) > start + page_size:
|
||||
@ -468,7 +468,7 @@ class ELBV2Response(BaseResponse):
|
||||
def describe_account_limits(self):
|
||||
# Supports paging but not worth implementing yet
|
||||
# marker = self._get_param('Marker')
|
||||
# page_size = self._get_param('PageSize')
|
||||
# page_size = self._get_int_param('PageSize')
|
||||
|
||||
limits = {
|
||||
'application-load-balancers': 20,
|
||||
@ -489,7 +489,7 @@ class ELBV2Response(BaseResponse):
|
||||
names = self._get_multi_param('Names.member.')
|
||||
# Supports paging but not worth implementing yet
|
||||
# marker = self._get_param('Marker')
|
||||
# page_size = self._get_param('PageSize')
|
||||
# page_size = self._get_int_param('PageSize')
|
||||
|
||||
policies = SSL_POLICIES
|
||||
if names:
|
||||
@ -704,7 +704,11 @@ CREATE_RULE_TEMPLATE = """<CreateRuleResponse xmlns="http://elasticloadbalancing
|
||||
{% for action in rule.actions %}
|
||||
<member>
|
||||
<Type>{{ action["type"] }}</Type>
|
||||
{% if action["type"] == "forward" %}
|
||||
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
|
||||
{% elif action["type"] == "redirect" %}
|
||||
<RedirectConfig>{{ action["redirect_config"] }}</RedirectConfig>
|
||||
{% endif %}
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Actions>
|
||||
@ -772,7 +776,15 @@ CREATE_LISTENER_TEMPLATE = """<CreateListenerResponse xmlns="http://elasticloadb
|
||||
{% for action in listener.default_actions %}
|
||||
<member>
|
||||
<Type>{{ action.type }}</Type>
|
||||
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn>
|
||||
{% if action["type"] == "forward" %}
|
||||
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
|
||||
{% elif action["type"] == "redirect" %}
|
||||
<RedirectConfig>
|
||||
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
|
||||
<Port>{{ action["redirect_config._port"] }}</Port>
|
||||
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
|
||||
</RedirectConfig>
|
||||
{% endif %}
|
||||
</member>
|
||||
{% endfor %}
|
||||
</DefaultActions>
|
||||
@ -877,7 +889,15 @@ DESCRIBE_RULES_TEMPLATE = """<DescribeRulesResponse xmlns="http://elasticloadbal
|
||||
{% for action in rule.actions %}
|
||||
<member>
|
||||
<Type>{{ action["type"] }}</Type>
|
||||
{% if action["type"] == "forward" %}
|
||||
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
|
||||
{% elif action["type"] == "redirect" %}
|
||||
<RedirectConfig>
|
||||
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
|
||||
<Port>{{ action["redirect_config._port"] }}</Port>
|
||||
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
|
||||
</RedirectConfig>
|
||||
{% endif %}
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Actions>
|
||||
@ -970,7 +990,15 @@ DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://el
|
||||
{% for action in listener.default_actions %}
|
||||
<member>
|
||||
<Type>{{ action.type }}</Type>
|
||||
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn>
|
||||
{% if action["type"] == "forward" %}
|
||||
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>m
|
||||
{% elif action["type"] == "redirect" %}
|
||||
<RedirectConfig>
|
||||
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
|
||||
<Port>{{ action["redirect_config._port"] }}</Port>
|
||||
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
|
||||
</RedirectConfig>
|
||||
{% endif %}
|
||||
</member>
|
||||
{% endfor %}
|
||||
</DefaultActions>
|
||||
@ -1180,6 +1208,12 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """<DescribeTargetHealthResponse xmlns="http:/
|
||||
<HealthCheckPort>{{ target_health.health_port }}</HealthCheckPort>
|
||||
<TargetHealth>
|
||||
<State>{{ target_health.status }}</State>
|
||||
{% if target_health.reason %}
|
||||
<Reason>{{ target_health.reason }}</Reason>
|
||||
{% endif %}
|
||||
{% if target_health.description %}
|
||||
<Description>{{ target_health.description }}</Description>
|
||||
{% endif %}
|
||||
</TargetHealth>
|
||||
<Target>
|
||||
<Port>{{ target_health.port }}</Port>
|
||||
@ -1399,7 +1433,15 @@ MODIFY_LISTENER_TEMPLATE = """<ModifyListenerResponse xmlns="http://elasticloadb
|
||||
{% for action in listener.default_actions %}
|
||||
<member>
|
||||
<Type>{{ action.type }}</Type>
|
||||
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn>
|
||||
{% if action["type"] == "forward" %}
|
||||
<TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn>
|
||||
{% elif action["type"] == "redirect" %}
|
||||
<RedirectConfig>
|
||||
<Protocol>{{ action["redirect_config._protocol"] }}</Protocol>
|
||||
<Port>{{ action["redirect_config._port"] }}</Port>
|
||||
<StatusCode>{{ action["redirect_config._status_code"] }}</StatusCode>
|
||||
</RedirectConfig>
|
||||
{% endif %}
|
||||
</member>
|
||||
{% endfor %}
|
||||
</DefaultActions>
|
||||
|
||||
@ -97,7 +97,8 @@ class FakeCluster(BaseModel):
|
||||
visible_to_all_users='false',
|
||||
release_label=None,
|
||||
requested_ami_version=None,
|
||||
running_ami_version=None):
|
||||
running_ami_version=None,
|
||||
custom_ami_id=None):
|
||||
self.id = cluster_id or random_cluster_id()
|
||||
emr_backend.clusters[self.id] = self
|
||||
self.emr_backend = emr_backend
|
||||
@ -162,6 +163,7 @@ class FakeCluster(BaseModel):
|
||||
self.release_label = release_label
|
||||
self.requested_ami_version = requested_ami_version
|
||||
self.running_ami_version = running_ami_version
|
||||
self.custom_ami_id = custom_ami_id
|
||||
|
||||
self.role = job_flow_role or 'EMRJobflowDefault'
|
||||
self.service_role = service_role
|
||||
|
||||
@ -267,6 +267,18 @@ class ElasticMapReduceResponse(BaseResponse):
|
||||
else:
|
||||
kwargs['running_ami_version'] = '1.0.0'
|
||||
|
||||
custom_ami_id = self._get_param('CustomAmiId')
|
||||
if custom_ami_id:
|
||||
kwargs['custom_ami_id'] = custom_ami_id
|
||||
if release_label and release_label < 'emr-5.7.0':
|
||||
message = 'Custom AMI is not allowed'
|
||||
raise EmrError(error_type='ValidationException',
|
||||
message=message, template='error_json')
|
||||
elif ami_version:
|
||||
message = 'Custom AMI is not supported in this version of EMR'
|
||||
raise EmrError(error_type='ValidationException',
|
||||
message=message, template='error_json')
|
||||
|
||||
cluster = self.backend.run_job_flow(**kwargs)
|
||||
|
||||
applications = self._get_list_prefix('Applications.member')
|
||||
@ -375,6 +387,9 @@ DESCRIBE_CLUSTER_TEMPLATE = """<DescribeClusterResponse xmlns="http://elasticmap
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Configurations>
|
||||
{% if cluster.custom_ami_id is not none %}
|
||||
<CustomAmiId>{{ cluster.custom_ami_id }}</CustomAmiId>
|
||||
{% endif %}
|
||||
<Ec2InstanceAttributes>
|
||||
<AdditionalMasterSecurityGroups>
|
||||
{% for each in cluster.additional_master_security_groups %}
|
||||
@ -462,10 +477,10 @@ DESCRIBE_JOB_FLOWS_TEMPLATE = """<DescribeJobFlowsResponse xmlns="http://elastic
|
||||
<ScriptBootstrapAction>
|
||||
<Args>
|
||||
{% for arg in bootstrap_action.args %}
|
||||
<member>{{ arg }}</member>
|
||||
<member>{{ arg | escape }}</member>
|
||||
{% endfor %}
|
||||
</Args>
|
||||
<Path>{{ bootstrap_action.script_path }}</Path>
|
||||
<Path>{{ bootstrap_action.script_path | escape }}</Path>
|
||||
</ScriptBootstrapAction>
|
||||
</BootstrapActionConfig>
|
||||
</member>
|
||||
@ -568,12 +583,12 @@ DESCRIBE_JOB_FLOWS_TEMPLATE = """<DescribeJobFlowsResponse xmlns="http://elastic
|
||||
<MainClass>{{ step.main_class }}</MainClass>
|
||||
<Args>
|
||||
{% for arg in step.args %}
|
||||
<member>{{ arg }}</member>
|
||||
<member>{{ arg | escape }}</member>
|
||||
{% endfor %}
|
||||
</Args>
|
||||
<Properties/>
|
||||
</HadoopJarStep>
|
||||
<Name>{{ step.name }}</Name>
|
||||
<Name>{{ step.name | escape }}</Name>
|
||||
</StepConfig>
|
||||
</member>
|
||||
{% endfor %}
|
||||
@ -596,7 +611,7 @@ DESCRIBE_STEP_TEMPLATE = """<DescribeStepResponse xmlns="http://elasticmapreduce
|
||||
<Config>
|
||||
<Args>
|
||||
{% for arg in step.args %}
|
||||
<member>{{ arg }}</member>
|
||||
<member>{{ arg | escape }}</member>
|
||||
{% endfor %}
|
||||
</Args>
|
||||
<Jar>{{ step.jar }}</Jar>
|
||||
@ -605,21 +620,19 @@ DESCRIBE_STEP_TEMPLATE = """<DescribeStepResponse xmlns="http://elasticmapreduce
|
||||
{% for key, val in step.properties.items() %}
|
||||
<member>
|
||||
<key>{{ key }}</key>
|
||||
<value>{{ val }}</value>
|
||||
<value>{{ val | escape }}</value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Properties>
|
||||
</Config>
|
||||
<Id>{{ step.id }}</Id>
|
||||
<Name>{{ step.name }}</Name>
|
||||
<Name>{{ step.name | escape }}</Name>
|
||||
<Status>
|
||||
<!-- does not exist for botocore 1.4.28
|
||||
<FailureDetails>
|
||||
<Reason/>
|
||||
<Message/>
|
||||
<LogFile/>
|
||||
</FailureDetails>
|
||||
-->
|
||||
<State>{{ step.state }}</State>
|
||||
<StateChangeReason>{{ step.state_change_reason }}</StateChangeReason>
|
||||
<Timeline>
|
||||
@ -646,7 +659,7 @@ LIST_BOOTSTRAP_ACTIONS_TEMPLATE = """<ListBootstrapActionsResponse xmlns="http:/
|
||||
<member>
|
||||
<Args>
|
||||
{% for arg in bootstrap_action.args %}
|
||||
<member>{{ arg }}</member>
|
||||
<member>{{ arg | escape }}</member>
|
||||
{% endfor %}
|
||||
</Args>
|
||||
<Name>{{ bootstrap_action.name }}</Name>
|
||||
@ -760,22 +773,22 @@ LIST_STEPS_TEMPLATE = """<ListStepsResponse xmlns="http://elasticmapreduce.amazo
|
||||
<Config>
|
||||
<Args>
|
||||
{% for arg in step.args %}
|
||||
<member>{{ arg }}</member>
|
||||
<member>{{ arg | escape }}</member>
|
||||
{% endfor %}
|
||||
</Args>
|
||||
<Jar>{{ step.jar }}</Jar>
|
||||
<Jar>{{ step.jar | escape }}</Jar>
|
||||
<MainClass/>
|
||||
<Properties>
|
||||
{% for key, val in step.properties.items() %}
|
||||
<member>
|
||||
<key>{{ key }}</key>
|
||||
<value>{{ val }}</value>
|
||||
<value>{{ val | escape }}</value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Properties>
|
||||
</Config>
|
||||
<Id>{{ step.id }}</Id>
|
||||
<Name>{{ step.name }}</Name>
|
||||
<Name>{{ step.name | escape }}</Name>
|
||||
<Status>
|
||||
<!-- does not exist for botocore 1.4.28
|
||||
<FailureDetails>
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
@ -210,7 +211,7 @@ class EventsBackend(BaseBackend):
|
||||
raise NotImplementedError()
|
||||
|
||||
def put_permission(self, action, principal, statement_id):
|
||||
if action is None or action != 'PutEvents':
|
||||
if action is None or action != 'events:PutEvents':
|
||||
raise JsonRESTError('InvalidParameterValue', 'Action must be PutEvents')
|
||||
|
||||
if principal is None or self.ACCOUNT_ID.match(principal) is None:
|
||||
@ -235,11 +236,13 @@ class EventsBackend(BaseBackend):
|
||||
'Sid': statement_id,
|
||||
'Effect': 'Allow',
|
||||
'Principal': {'AWS': 'arn:aws:iam::{0}:root'.format(data['principal'])},
|
||||
'Action': 'events:{0}'.format(data['action']),
|
||||
'Action': data['action'],
|
||||
'Resource': arn
|
||||
})
|
||||
policy = {'Version': '2012-10-17', 'Statement': statements}
|
||||
policy_json = json.dumps(policy)
|
||||
return {
|
||||
'Policy': {'Version': '2012-10-17', 'Statement': statements},
|
||||
'Policy': policy_json,
|
||||
'Name': 'default',
|
||||
'Arn': arn
|
||||
}
|
||||
|
||||
@ -2,42 +2,101 @@ from __future__ import unicode_literals
|
||||
|
||||
import hashlib
|
||||
|
||||
import datetime
|
||||
|
||||
|
||||
import boto.glacier
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
|
||||
from .utils import get_job_id
|
||||
|
||||
|
||||
class ArchiveJob(BaseModel):
|
||||
class Job(BaseModel):
|
||||
def __init__(self, tier):
|
||||
self.st = datetime.datetime.now()
|
||||
|
||||
def __init__(self, job_id, archive_id):
|
||||
if tier.lower() == "expedited":
|
||||
self.et = self.st + datetime.timedelta(seconds=2)
|
||||
elif tier.lower() == "bulk":
|
||||
self.et = self.st + datetime.timedelta(seconds=10)
|
||||
else:
|
||||
# Standard
|
||||
self.et = self.st + datetime.timedelta(seconds=5)
|
||||
|
||||
|
||||
class ArchiveJob(Job):
|
||||
|
||||
def __init__(self, job_id, tier, arn, archive_id):
|
||||
self.job_id = job_id
|
||||
self.tier = tier
|
||||
self.arn = arn
|
||||
self.archive_id = archive_id
|
||||
Job.__init__(self, tier)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"Action": "InventoryRetrieval",
|
||||
d = {
|
||||
"Action": "ArchiveRetrieval",
|
||||
"ArchiveId": self.archive_id,
|
||||
"ArchiveSizeInBytes": 0,
|
||||
"ArchiveSHA256TreeHash": None,
|
||||
"Completed": True,
|
||||
"CompletionDate": "2013-03-20T17:03:43.221Z",
|
||||
"CreationDate": "2013-03-20T17:03:43.221Z",
|
||||
"InventorySizeInBytes": "0",
|
||||
"Completed": False,
|
||||
"CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
|
||||
"InventorySizeInBytes": 0,
|
||||
"JobDescription": None,
|
||||
"JobId": self.job_id,
|
||||
"RetrievalByteRange": None,
|
||||
"SHA256TreeHash": None,
|
||||
"SNSTopic": None,
|
||||
"StatusCode": "Succeeded",
|
||||
"StatusCode": "InProgress",
|
||||
"StatusMessage": None,
|
||||
"VaultARN": None,
|
||||
"VaultARN": self.arn,
|
||||
"Tier": self.tier
|
||||
}
|
||||
if datetime.datetime.now() > self.et:
|
||||
d["Completed"] = True
|
||||
d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
d["InventorySizeInBytes"] = 10000
|
||||
d["StatusCode"] = "Succeeded"
|
||||
return d
|
||||
|
||||
|
||||
class InventoryJob(Job):
|
||||
|
||||
def __init__(self, job_id, tier, arn):
|
||||
self.job_id = job_id
|
||||
self.tier = tier
|
||||
self.arn = arn
|
||||
Job.__init__(self, tier)
|
||||
|
||||
def to_dict(self):
|
||||
d = {
|
||||
"Action": "InventoryRetrieval",
|
||||
"ArchiveSHA256TreeHash": None,
|
||||
"Completed": False,
|
||||
"CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
|
||||
"InventorySizeInBytes": 0,
|
||||
"JobDescription": None,
|
||||
"JobId": self.job_id,
|
||||
"RetrievalByteRange": None,
|
||||
"SHA256TreeHash": None,
|
||||
"SNSTopic": None,
|
||||
"StatusCode": "InProgress",
|
||||
"StatusMessage": None,
|
||||
"VaultARN": self.arn,
|
||||
"Tier": self.tier
|
||||
}
|
||||
if datetime.datetime.now() > self.et:
|
||||
d["Completed"] = True
|
||||
d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
d["InventorySizeInBytes"] = 10000
|
||||
d["StatusCode"] = "Succeeded"
|
||||
return d
|
||||
|
||||
|
||||
class Vault(BaseModel):
|
||||
|
||||
def __init__(self, vault_name, region):
|
||||
self.st = datetime.datetime.now()
|
||||
self.vault_name = vault_name
|
||||
self.region = region
|
||||
self.archives = {}
|
||||
@ -48,29 +107,57 @@ class Vault(BaseModel):
|
||||
return "arn:aws:glacier:{0}:012345678901:vaults/{1}".format(self.region, self.vault_name)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"CreationDate": "2013-03-20T17:03:43.221Z",
|
||||
"LastInventoryDate": "2013-03-20T17:03:43.221Z",
|
||||
"NumberOfArchives": None,
|
||||
"SizeInBytes": None,
|
||||
archives_size = 0
|
||||
for k in self.archives:
|
||||
archives_size += self.archives[k]["size"]
|
||||
d = {
|
||||
"CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
|
||||
"LastInventoryDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
|
||||
"NumberOfArchives": len(self.archives),
|
||||
"SizeInBytes": archives_size,
|
||||
"VaultARN": self.arn,
|
||||
"VaultName": self.vault_name,
|
||||
}
|
||||
return d
|
||||
|
||||
def create_archive(self, body):
|
||||
archive_id = hashlib.sha256(body).hexdigest()
|
||||
self.archives[archive_id] = body
|
||||
def create_archive(self, body, description):
|
||||
archive_id = hashlib.md5(body).hexdigest()
|
||||
self.archives[archive_id] = {}
|
||||
self.archives[archive_id]["body"] = body
|
||||
self.archives[archive_id]["size"] = len(body)
|
||||
self.archives[archive_id]["sha256"] = hashlib.sha256(body).hexdigest()
|
||||
self.archives[archive_id]["creation_date"] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
self.archives[archive_id]["description"] = description
|
||||
return archive_id
|
||||
|
||||
def get_archive_body(self, archive_id):
|
||||
return self.archives[archive_id]
|
||||
return self.archives[archive_id]["body"]
|
||||
|
||||
def get_archive_list(self):
|
||||
archive_list = []
|
||||
for a in self.archives:
|
||||
archive = self.archives[a]
|
||||
aobj = {
|
||||
"ArchiveId": a,
|
||||
"ArchiveDescription": archive["description"],
|
||||
"CreationDate": archive["creation_date"],
|
||||
"Size": archive["size"],
|
||||
"SHA256TreeHash": archive["sha256"]
|
||||
}
|
||||
archive_list.append(aobj)
|
||||
return archive_list
|
||||
|
||||
def delete_archive(self, archive_id):
|
||||
return self.archives.pop(archive_id)
|
||||
|
||||
def initiate_job(self, archive_id):
|
||||
def initiate_job(self, job_type, tier, archive_id):
|
||||
job_id = get_job_id()
|
||||
job = ArchiveJob(job_id, archive_id)
|
||||
|
||||
if job_type == "inventory-retrieval":
|
||||
job = InventoryJob(job_id, tier, self.arn)
|
||||
elif job_type == "archive-retrieval":
|
||||
job = ArchiveJob(job_id, tier, self.arn, archive_id)
|
||||
|
||||
self.jobs[job_id] = job
|
||||
return job_id
|
||||
|
||||
@ -80,10 +167,24 @@ class Vault(BaseModel):
|
||||
def describe_job(self, job_id):
|
||||
return self.jobs.get(job_id)
|
||||
|
||||
def job_ready(self, job_id):
|
||||
job = self.describe_job(job_id)
|
||||
jobj = job.to_dict()
|
||||
return jobj["Completed"]
|
||||
|
||||
def get_job_output(self, job_id):
|
||||
job = self.describe_job(job_id)
|
||||
archive_body = self.get_archive_body(job.archive_id)
|
||||
return archive_body
|
||||
jobj = job.to_dict()
|
||||
if jobj["Action"] == "InventoryRetrieval":
|
||||
archives = self.get_archive_list()
|
||||
return {
|
||||
"VaultARN": self.arn,
|
||||
"InventoryDate": jobj["CompletionDate"],
|
||||
"ArchiveList": archives
|
||||
}
|
||||
else:
|
||||
archive_body = self.get_archive_body(job.archive_id)
|
||||
return archive_body
|
||||
|
||||
|
||||
class GlacierBackend(BaseBackend):
|
||||
@ -109,9 +210,9 @@ class GlacierBackend(BaseBackend):
|
||||
def delete_vault(self, vault_name):
|
||||
self.vaults.pop(vault_name)
|
||||
|
||||
def initiate_job(self, vault_name, archive_id):
|
||||
def initiate_job(self, vault_name, job_type, tier, archive_id):
|
||||
vault = self.get_vault(vault_name)
|
||||
job_id = vault.initiate_job(archive_id)
|
||||
job_id = vault.initiate_job(job_type, tier, archive_id)
|
||||
return job_id
|
||||
|
||||
def list_jobs(self, vault_name):
|
||||
|
||||
@ -72,17 +72,25 @@ class GlacierResponse(_TemplateEnvironmentMixin):
|
||||
|
||||
def _vault_archive_response(self, request, full_url, headers):
|
||||
method = request.method
|
||||
body = request.body
|
||||
if hasattr(request, 'body'):
|
||||
body = request.body
|
||||
else:
|
||||
body = request.data
|
||||
description = ""
|
||||
if 'x-amz-archive-description' in request.headers:
|
||||
description = request.headers['x-amz-archive-description']
|
||||
parsed_url = urlparse(full_url)
|
||||
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
|
||||
vault_name = full_url.split("/")[-2]
|
||||
|
||||
if method == 'POST':
|
||||
return self._vault_archive_response_post(vault_name, body, querystring, headers)
|
||||
return self._vault_archive_response_post(vault_name, body, description, querystring, headers)
|
||||
else:
|
||||
return 400, headers, "400 Bad Request"
|
||||
|
||||
def _vault_archive_response_post(self, vault_name, body, querystring, headers):
|
||||
def _vault_archive_response_post(self, vault_name, body, description, querystring, headers):
|
||||
vault = self.backend.get_vault(vault_name)
|
||||
vault_id = vault.create_archive(body)
|
||||
vault_id = vault.create_archive(body, description)
|
||||
headers['x-amz-archive-id'] = vault_id
|
||||
return 201, headers, ""
|
||||
|
||||
@ -110,7 +118,10 @@ class GlacierResponse(_TemplateEnvironmentMixin):
|
||||
|
||||
def _vault_jobs_response(self, request, full_url, headers):
|
||||
method = request.method
|
||||
body = request.body
|
||||
if hasattr(request, 'body'):
|
||||
body = request.body
|
||||
else:
|
||||
body = request.data
|
||||
account_id = full_url.split("/")[1]
|
||||
vault_name = full_url.split("/")[-2]
|
||||
|
||||
@ -125,11 +136,17 @@ class GlacierResponse(_TemplateEnvironmentMixin):
|
||||
})
|
||||
elif method == 'POST':
|
||||
json_body = json.loads(body.decode("utf-8"))
|
||||
archive_id = json_body['ArchiveId']
|
||||
job_id = self.backend.initiate_job(vault_name, archive_id)
|
||||
job_type = json_body['Type']
|
||||
archive_id = None
|
||||
if 'ArchiveId' in json_body:
|
||||
archive_id = json_body['ArchiveId']
|
||||
if 'Tier' in json_body:
|
||||
tier = json_body["Tier"]
|
||||
else:
|
||||
tier = "Standard"
|
||||
job_id = self.backend.initiate_job(vault_name, job_type, tier, archive_id)
|
||||
headers['x-amz-job-id'] = job_id
|
||||
headers[
|
||||
'Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id)
|
||||
headers['Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id)
|
||||
return 202, headers, ""
|
||||
|
||||
@classmethod
|
||||
@ -155,8 +172,14 @@ class GlacierResponse(_TemplateEnvironmentMixin):
|
||||
def _vault_jobs_output_response(self, request, full_url, headers):
|
||||
vault_name = full_url.split("/")[-4]
|
||||
job_id = full_url.split("/")[-2]
|
||||
|
||||
vault = self.backend.get_vault(vault_name)
|
||||
output = vault.get_job_output(job_id)
|
||||
headers['content-type'] = 'application/octet-stream'
|
||||
return 200, headers, output
|
||||
if vault.job_ready(job_id):
|
||||
output = vault.get_job_output(job_id)
|
||||
if isinstance(output, dict):
|
||||
headers['content-type'] = 'application/json'
|
||||
return 200, headers, json.dumps(output)
|
||||
else:
|
||||
headers['content-type'] = 'application/octet-stream'
|
||||
return 200, headers, output
|
||||
else:
|
||||
return 404, headers, "404 Not Found"
|
||||
|
||||
5
moto/glue/__init__.py
Normal file
5
moto/glue/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import glue_backend
|
||||
|
||||
glue_backends = {"global": glue_backend}
|
||||
mock_glue = glue_backend.decorator
|
||||
61
moto/glue/exceptions.py
Normal file
61
moto/glue/exceptions.py
Normal file
@ -0,0 +1,61 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
|
||||
|
||||
class GlueClientError(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
|
||||
class AlreadyExistsException(GlueClientError):
|
||||
def __init__(self, typ):
|
||||
super(GlueClientError, self).__init__(
|
||||
'AlreadyExistsException',
|
||||
'%s already exists.' % (typ),
|
||||
)
|
||||
|
||||
|
||||
class DatabaseAlreadyExistsException(AlreadyExistsException):
|
||||
def __init__(self):
|
||||
super(DatabaseAlreadyExistsException, self).__init__('Database')
|
||||
|
||||
|
||||
class TableAlreadyExistsException(AlreadyExistsException):
|
||||
def __init__(self):
|
||||
super(TableAlreadyExistsException, self).__init__('Table')
|
||||
|
||||
|
||||
class PartitionAlreadyExistsException(AlreadyExistsException):
|
||||
def __init__(self):
|
||||
super(PartitionAlreadyExistsException, self).__init__('Partition')
|
||||
|
||||
|
||||
class EntityNotFoundException(GlueClientError):
|
||||
def __init__(self, msg):
|
||||
super(GlueClientError, self).__init__(
|
||||
'EntityNotFoundException',
|
||||
msg,
|
||||
)
|
||||
|
||||
|
||||
class DatabaseNotFoundException(EntityNotFoundException):
|
||||
def __init__(self, db):
|
||||
super(DatabaseNotFoundException, self).__init__(
|
||||
'Database %s not found.' % db,
|
||||
)
|
||||
|
||||
|
||||
class TableNotFoundException(EntityNotFoundException):
|
||||
def __init__(self, tbl):
|
||||
super(TableNotFoundException, self).__init__(
|
||||
'Table %s not found.' % tbl,
|
||||
)
|
||||
|
||||
|
||||
class PartitionNotFoundException(EntityNotFoundException):
|
||||
def __init__(self):
|
||||
super(PartitionNotFoundException, self).__init__("Cannot find partition.")
|
||||
|
||||
|
||||
class VersionNotFoundException(EntityNotFoundException):
|
||||
def __init__(self):
|
||||
super(VersionNotFoundException, self).__init__("Version not found.")
|
||||
166
moto/glue/models.py
Normal file
166
moto/glue/models.py
Normal file
@ -0,0 +1,166 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import time
|
||||
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.compat import OrderedDict
|
||||
from.exceptions import (
|
||||
JsonRESTError,
|
||||
DatabaseAlreadyExistsException,
|
||||
DatabaseNotFoundException,
|
||||
TableAlreadyExistsException,
|
||||
TableNotFoundException,
|
||||
PartitionAlreadyExistsException,
|
||||
PartitionNotFoundException,
|
||||
VersionNotFoundException,
|
||||
)
|
||||
|
||||
|
||||
class GlueBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
self.databases = OrderedDict()
|
||||
|
||||
def create_database(self, database_name):
|
||||
if database_name in self.databases:
|
||||
raise DatabaseAlreadyExistsException()
|
||||
|
||||
database = FakeDatabase(database_name)
|
||||
self.databases[database_name] = database
|
||||
return database
|
||||
|
||||
def get_database(self, database_name):
|
||||
try:
|
||||
return self.databases[database_name]
|
||||
except KeyError:
|
||||
raise DatabaseNotFoundException(database_name)
|
||||
|
||||
def create_table(self, database_name, table_name, table_input):
|
||||
database = self.get_database(database_name)
|
||||
|
||||
if table_name in database.tables:
|
||||
raise TableAlreadyExistsException()
|
||||
|
||||
table = FakeTable(database_name, table_name, table_input)
|
||||
database.tables[table_name] = table
|
||||
return table
|
||||
|
||||
def get_table(self, database_name, table_name):
|
||||
database = self.get_database(database_name)
|
||||
try:
|
||||
return database.tables[table_name]
|
||||
except KeyError:
|
||||
raise TableNotFoundException(table_name)
|
||||
|
||||
def get_tables(self, database_name):
|
||||
database = self.get_database(database_name)
|
||||
return [table for table_name, table in database.tables.items()]
|
||||
|
||||
def delete_table(self, database_name, table_name):
|
||||
database = self.get_database(database_name)
|
||||
try:
|
||||
del database.tables[table_name]
|
||||
except KeyError:
|
||||
raise TableNotFoundException(table_name)
|
||||
return {}
|
||||
|
||||
|
||||
class FakeDatabase(BaseModel):
|
||||
|
||||
def __init__(self, database_name):
|
||||
self.name = database_name
|
||||
self.tables = OrderedDict()
|
||||
|
||||
|
||||
class FakeTable(BaseModel):
|
||||
|
||||
def __init__(self, database_name, table_name, table_input):
|
||||
self.database_name = database_name
|
||||
self.name = table_name
|
||||
self.partitions = OrderedDict()
|
||||
self.versions = []
|
||||
self.update(table_input)
|
||||
|
||||
def update(self, table_input):
|
||||
self.versions.append(table_input)
|
||||
|
||||
def get_version(self, ver):
|
||||
try:
|
||||
if not isinstance(ver, int):
|
||||
# "1" goes to [0]
|
||||
ver = int(ver) - 1
|
||||
except ValueError as e:
|
||||
raise JsonRESTError("InvalidInputException", str(e))
|
||||
|
||||
try:
|
||||
return self.versions[ver]
|
||||
except IndexError:
|
||||
raise VersionNotFoundException()
|
||||
|
||||
def as_dict(self, version=-1):
|
||||
obj = {
|
||||
'DatabaseName': self.database_name,
|
||||
'Name': self.name,
|
||||
}
|
||||
obj.update(self.get_version(version))
|
||||
return obj
|
||||
|
||||
def create_partition(self, partiton_input):
|
||||
partition = FakePartition(self.database_name, self.name, partiton_input)
|
||||
key = str(partition.values)
|
||||
if key in self.partitions:
|
||||
raise PartitionAlreadyExistsException()
|
||||
self.partitions[str(partition.values)] = partition
|
||||
|
||||
def get_partitions(self):
|
||||
return [p for str_part_values, p in self.partitions.items()]
|
||||
|
||||
def get_partition(self, values):
|
||||
try:
|
||||
return self.partitions[str(values)]
|
||||
except KeyError:
|
||||
raise PartitionNotFoundException()
|
||||
|
||||
def update_partition(self, old_values, partiton_input):
|
||||
partition = FakePartition(self.database_name, self.name, partiton_input)
|
||||
key = str(partition.values)
|
||||
if old_values == partiton_input['Values']:
|
||||
# Altering a partition in place. Don't remove it so the order of
|
||||
# returned partitions doesn't change
|
||||
if key not in self.partitions:
|
||||
raise PartitionNotFoundException()
|
||||
else:
|
||||
removed = self.partitions.pop(str(old_values), None)
|
||||
if removed is None:
|
||||
raise PartitionNotFoundException()
|
||||
if key in self.partitions:
|
||||
# Trying to update to overwrite a partition that exists
|
||||
raise PartitionAlreadyExistsException()
|
||||
self.partitions[key] = partition
|
||||
|
||||
def delete_partition(self, values):
|
||||
try:
|
||||
del self.partitions[str(values)]
|
||||
except KeyError:
|
||||
raise PartitionNotFoundException()
|
||||
|
||||
|
||||
class FakePartition(BaseModel):
|
||||
def __init__(self, database_name, table_name, partiton_input):
|
||||
self.creation_time = time.time()
|
||||
self.database_name = database_name
|
||||
self.table_name = table_name
|
||||
self.partition_input = partiton_input
|
||||
self.values = self.partition_input.get('Values', [])
|
||||
|
||||
def as_dict(self):
|
||||
obj = {
|
||||
'DatabaseName': self.database_name,
|
||||
'TableName': self.table_name,
|
||||
'CreationTime': self.creation_time,
|
||||
}
|
||||
obj.update(self.partition_input)
|
||||
return obj
|
||||
|
||||
|
||||
glue_backend = GlueBackend()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user