Merge pull request #31 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2020-02-24 08:15:40 +00:00 committed by GitHub
commit f009f7da8c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 5733 additions and 2847 deletions

View File

@ -26,11 +26,12 @@ install:
fi fi
docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${PYTHON_DOCKER_TAG} /moto/travis_moto_server.sh & docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${PYTHON_DOCKER_TAG} /moto/travis_moto_server.sh &
fi fi
travis_retry pip install -r requirements-dev.txt
travis_retry pip install boto==2.45.0 travis_retry pip install boto==2.45.0
travis_retry pip install boto3 travis_retry pip install boto3
travis_retry pip install dist/moto*.gz travis_retry pip install dist/moto*.gz
travis_retry pip install coveralls==1.1 travis_retry pip install coveralls==1.1
travis_retry pip install -r requirements-dev.txt travis_retry pip install coverage==4.5.4
if [ "$TEST_SERVER_MODE" = "true" ]; then if [ "$TEST_SERVER_MODE" = "true" ]; then
python wait_for.py python wait_for.py

View File

@ -450,6 +450,16 @@ boto3.resource(
) )
``` ```
### Caveats
The standalone server has some caveats with some services. The following services
require that you update your hosts file for your code to work properly:
1. `s3-control`
For the above services, this is required because the hostname is in the form of `AWS_ACCOUNT_ID.localhost`.
As a result, you need to add that entry to your host file for your tests to function properly.
## Install ## Install

View File

@ -56,9 +56,10 @@ author = 'Steve Pulec'
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = '0.4.10' import moto
version = moto.__version__
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = '0.4.10' release = moto.__version__
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.

View File

@ -24,8 +24,7 @@ For example, we have the following code we want to test:
.. sourcecode:: python .. sourcecode:: python
import boto import boto3
from boto.s3.key import Key
class MyModel(object): class MyModel(object):
def __init__(self, name, value): def __init__(self, name, value):
@ -33,11 +32,8 @@ For example, we have the following code we want to test:
self.value = value self.value = value
def save(self): def save(self):
conn = boto.connect_s3() s3 = boto3.client('s3', region_name='us-east-1')
bucket = conn.get_bucket('mybucket') s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value)
k = Key(bucket)
k.key = self.name
k.set_contents_from_string(self.value)
There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment. There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment.
@ -48,20 +44,23 @@ With a decorator wrapping, all the calls to S3 are automatically mocked out.
.. sourcecode:: python .. sourcecode:: python
import boto import boto3
from moto import mock_s3 from moto import mock_s3
from mymodule import MyModel from mymodule import MyModel
@mock_s3 @mock_s3
def test_my_model_save(): def test_my_model_save():
conn = boto.connect_s3() conn = boto3.resource('s3', region_name='us-east-1')
# We need to create the bucket since this is all in Moto's 'virtual' AWS account # We need to create the bucket since this is all in Moto's 'virtual' AWS account
conn.create_bucket('mybucket') conn.create_bucket(Bucket='mybucket')
model_instance = MyModel('steve', 'is awesome') model_instance = MyModel('steve', 'is awesome')
model_instance.save() model_instance.save()
assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' body = conn.Object('mybucket', 'steve').get()[
'Body'].read().decode("utf-8")
assert body == 'is awesome'
Context manager Context manager
~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~
@ -72,13 +71,16 @@ Same as the Decorator, every call inside the ``with`` statement is mocked out.
def test_my_model_save(): def test_my_model_save():
with mock_s3(): with mock_s3():
conn = boto.connect_s3() conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket('mybucket') conn.create_bucket(Bucket='mybucket')
model_instance = MyModel('steve', 'is awesome') model_instance = MyModel('steve', 'is awesome')
model_instance.save() model_instance.save()
assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' body = conn.Object('mybucket', 'steve').get()[
'Body'].read().decode("utf-8")
assert body == 'is awesome'
Raw Raw
~~~ ~~~
@ -91,13 +93,16 @@ You can also start and stop the mocking manually.
mock = mock_s3() mock = mock_s3()
mock.start() mock.start()
conn = boto.connect_s3() conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket('mybucket') conn.create_bucket(Bucket='mybucket')
model_instance = MyModel('steve', 'is awesome') model_instance = MyModel('steve', 'is awesome')
model_instance.save() model_instance.save()
assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' body = conn.Object('mybucket', 'steve').get()[
'Body'].read().decode("utf-8")
assert body == 'is awesome'
mock.stop() mock.stop()

View File

@ -85,6 +85,15 @@ class NoMethodDefined(BadRequestException):
) )
class AuthorizerNotFoundException(RESTError):
code = 404
def __init__(self):
super(AuthorizerNotFoundException, self).__init__(
"NotFoundException", "Invalid Authorizer identifier specified"
)
class StageNotFoundException(RESTError): class StageNotFoundException(RESTError):
code = 404 code = 404

View File

@ -28,6 +28,7 @@ from .exceptions import (
InvalidHttpEndpoint, InvalidHttpEndpoint,
InvalidResourcePathException, InvalidResourcePathException,
InvalidRequestInput, InvalidRequestInput,
AuthorizerNotFoundException,
StageNotFoundException, StageNotFoundException,
RoleNotSpecified, RoleNotSpecified,
NoIntegrationDefined, NoIntegrationDefined,
@ -117,14 +118,15 @@ class Resource(BaseModel):
self.api_id = api_id self.api_id = api_id
self.path_part = path_part self.path_part = path_part
self.parent_id = parent_id self.parent_id = parent_id
self.resource_methods = {"GET": {}} self.resource_methods = {}
def to_dict(self): def to_dict(self):
response = { response = {
"path": self.get_path(), "path": self.get_path(),
"id": self.id, "id": self.id,
"resourceMethods": self.resource_methods,
} }
if self.resource_methods:
response["resourceMethods"] = self.resource_methods
if self.parent_id: if self.parent_id:
response["parentId"] = self.parent_id response["parentId"] = self.parent_id
response["pathPart"] = self.path_part response["pathPart"] = self.path_part
@ -186,6 +188,54 @@ class Resource(BaseModel):
return self.resource_methods[method_type].pop("methodIntegration") return self.resource_methods[method_type].pop("methodIntegration")
class Authorizer(BaseModel, dict):
def __init__(self, id, name, authorizer_type, **kwargs):
super(Authorizer, self).__init__()
self["id"] = id
self["name"] = name
self["type"] = authorizer_type
if kwargs.get("provider_arns"):
self["providerARNs"] = kwargs.get("provider_arns")
if kwargs.get("auth_type"):
self["authType"] = kwargs.get("auth_type")
if kwargs.get("authorizer_uri"):
self["authorizerUri"] = kwargs.get("authorizer_uri")
if kwargs.get("authorizer_credentials"):
self["authorizerCredentials"] = kwargs.get("authorizer_credentials")
if kwargs.get("identity_source"):
self["identitySource"] = kwargs.get("identity_source")
if kwargs.get("identity_validation_expression"):
self["identityValidationExpression"] = kwargs.get(
"identity_validation_expression"
)
self["authorizerResultTtlInSeconds"] = kwargs.get("authorizer_result_ttl")
def apply_operations(self, patch_operations):
for op in patch_operations:
if "/authorizerUri" in op["path"]:
self["authorizerUri"] = op["value"]
elif "/authorizerCredentials" in op["path"]:
self["authorizerCredentials"] = op["value"]
elif "/authorizerResultTtlInSeconds" in op["path"]:
self["authorizerResultTtlInSeconds"] = int(op["value"])
elif "/authType" in op["path"]:
self["authType"] = op["value"]
elif "/identitySource" in op["path"]:
self["identitySource"] = op["value"]
elif "/identityValidationExpression" in op["path"]:
self["identityValidationExpression"] = op["value"]
elif "/name" in op["path"]:
self["name"] = op["value"]
elif "/providerARNs" in op["path"]:
# TODO: add and remove
raise Exception('Patch operation for "%s" not implemented' % op["path"])
elif "/type" in op["path"]:
self["type"] = op["value"]
else:
raise Exception('Patch operation "%s" not implemented' % op["op"])
return self
class Stage(BaseModel, dict): class Stage(BaseModel, dict):
def __init__( def __init__(
self, self,
@ -411,6 +461,7 @@ class RestAPI(BaseModel):
self.tags = kwargs.get("tags") or {} self.tags = kwargs.get("tags") or {}
self.deployments = {} self.deployments = {}
self.authorizers = {}
self.stages = {} self.stages = {}
self.resources = {} self.resources = {}
@ -478,6 +529,34 @@ class RestAPI(BaseModel):
), ),
) )
def create_authorizer(
self,
id,
name,
authorizer_type,
provider_arns=None,
auth_type=None,
authorizer_uri=None,
authorizer_credentials=None,
identity_source=None,
identiy_validation_expression=None,
authorizer_result_ttl=None,
):
authorizer = Authorizer(
id=id,
name=name,
authorizer_type=authorizer_type,
provider_arns=provider_arns,
auth_type=auth_type,
authorizer_uri=authorizer_uri,
authorizer_credentials=authorizer_credentials,
identity_source=identity_source,
identiy_validation_expression=identiy_validation_expression,
authorizer_result_ttl=authorizer_result_ttl,
)
self.authorizers[id] = authorizer
return authorizer
def create_stage( def create_stage(
self, self,
name, name,
@ -517,6 +596,9 @@ class RestAPI(BaseModel):
def get_deployment(self, deployment_id): def get_deployment(self, deployment_id):
return self.deployments[deployment_id] return self.deployments[deployment_id]
def get_authorizers(self):
return list(self.authorizers.values())
def get_stages(self): def get_stages(self):
return list(self.stages.values()) return list(self.stages.values())
@ -612,6 +694,46 @@ class APIGatewayBackend(BaseBackend):
) )
return method return method
def get_authorizer(self, restapi_id, authorizer_id):
api = self.get_rest_api(restapi_id)
authorizer = api.authorizers.get(authorizer_id)
if authorizer is None:
raise AuthorizerNotFoundException()
else:
return authorizer
def get_authorizers(self, restapi_id):
api = self.get_rest_api(restapi_id)
return api.get_authorizers()
def create_authorizer(self, restapi_id, name, authorizer_type, **kwargs):
api = self.get_rest_api(restapi_id)
authorizer_id = create_id()
authorizer = api.create_authorizer(
authorizer_id,
name,
authorizer_type,
provider_arns=kwargs.get("provider_arns"),
auth_type=kwargs.get("auth_type"),
authorizer_uri=kwargs.get("authorizer_uri"),
authorizer_credentials=kwargs.get("authorizer_credentials"),
identity_source=kwargs.get("identity_source"),
identiy_validation_expression=kwargs.get("identiy_validation_expression"),
authorizer_result_ttl=kwargs.get("authorizer_result_ttl"),
)
return api.authorizers.get(authorizer["id"])
def update_authorizer(self, restapi_id, authorizer_id, patch_operations):
authorizer = self.get_authorizer(restapi_id, authorizer_id)
if not authorizer:
api = self.get_rest_api(restapi_id)
authorizer = api.authorizers[authorizer_id] = Authorizer()
return authorizer.apply_operations(patch_operations)
def delete_authorizer(self, restapi_id, authorizer_id):
api = self.get_rest_api(restapi_id)
del api.authorizers[authorizer_id]
def get_stage(self, function_id, stage_name): def get_stage(self, function_id, stage_name):
api = self.get_rest_api(function_id) api = self.get_rest_api(function_id)
stage = api.stages.get(stage_name) stage = api.stages.get(stage_name)

View File

@ -8,11 +8,13 @@ from .exceptions import (
ApiKeyNotFoundException, ApiKeyNotFoundException,
BadRequestException, BadRequestException,
CrossAccountNotAllowed, CrossAccountNotAllowed,
AuthorizerNotFoundException,
StageNotFoundException, StageNotFoundException,
ApiKeyAlreadyExists, ApiKeyAlreadyExists,
) )
API_KEY_SOURCES = ["AUTHORIZER", "HEADER"] API_KEY_SOURCES = ["AUTHORIZER", "HEADER"]
AUTHORIZER_TYPES = ["TOKEN", "REQUEST", "COGNITO_USER_POOLS"]
ENDPOINT_CONFIGURATION_TYPES = ["PRIVATE", "EDGE", "REGIONAL"] ENDPOINT_CONFIGURATION_TYPES = ["PRIVATE", "EDGE", "REGIONAL"]
@ -177,6 +179,88 @@ class APIGatewayResponse(BaseResponse):
) )
return 200, {}, json.dumps(method_response) return 200, {}, json.dumps(method_response)
def restapis_authorizers(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
restapi_id = url_path_parts[2]
if self.method == "POST":
name = self._get_param("name")
authorizer_type = self._get_param("type")
provider_arns = self._get_param_with_default_value("providerARNs", None)
auth_type = self._get_param_with_default_value("authType", None)
authorizer_uri = self._get_param_with_default_value("authorizerUri", None)
authorizer_credentials = self._get_param_with_default_value(
"authorizerCredentials", None
)
identity_source = self._get_param_with_default_value("identitySource", None)
identiy_validation_expression = self._get_param_with_default_value(
"identityValidationExpression", None
)
authorizer_result_ttl = self._get_param_with_default_value(
"authorizerResultTtlInSeconds", 300
)
# Param validation
if authorizer_type and authorizer_type not in AUTHORIZER_TYPES:
return self.error(
"ValidationException",
(
"1 validation error detected: "
"Value '{authorizer_type}' at 'createAuthorizerInput.type' failed "
"to satisfy constraint: Member must satisfy enum value set: "
"[TOKEN, REQUEST, COGNITO_USER_POOLS]"
).format(authorizer_type=authorizer_type),
)
authorizer_response = self.backend.create_authorizer(
restapi_id,
name,
authorizer_type,
provider_arns=provider_arns,
auth_type=auth_type,
authorizer_uri=authorizer_uri,
authorizer_credentials=authorizer_credentials,
identity_source=identity_source,
identiy_validation_expression=identiy_validation_expression,
authorizer_result_ttl=authorizer_result_ttl,
)
elif self.method == "GET":
authorizers = self.backend.get_authorizers(restapi_id)
return 200, {}, json.dumps({"item": authorizers})
return 200, {}, json.dumps(authorizer_response)
def authorizers(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
restapi_id = url_path_parts[2]
authorizer_id = url_path_parts[4]
if self.method == "GET":
try:
authorizer_response = self.backend.get_authorizer(
restapi_id, authorizer_id
)
except AuthorizerNotFoundException as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
elif self.method == "PATCH":
patch_operations = self._get_param("patchOperations")
authorizer_response = self.backend.update_authorizer(
restapi_id, authorizer_id, patch_operations
)
elif self.method == "DELETE":
self.backend.delete_authorizer(restapi_id, authorizer_id)
return 202, {}, "{}"
return 200, {}, json.dumps(authorizer_response)
def restapis_stages(self, request, full_url, headers): def restapis_stages(self, request, full_url, headers):
self.setup_class(request, full_url, headers) self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/") url_path_parts = self.path.split("/")

View File

@ -7,6 +7,8 @@ url_paths = {
"{0}/restapis$": APIGatewayResponse().restapis, "{0}/restapis$": APIGatewayResponse().restapis,
"{0}/restapis/(?P<function_id>[^/]+)/?$": APIGatewayResponse().restapis_individual, "{0}/restapis/(?P<function_id>[^/]+)/?$": APIGatewayResponse().restapis_individual,
"{0}/restapis/(?P<function_id>[^/]+)/resources$": APIGatewayResponse().resources, "{0}/restapis/(?P<function_id>[^/]+)/resources$": APIGatewayResponse().resources,
"{0}/restapis/(?P<function_id>[^/]+)/authorizers$": APIGatewayResponse().restapis_authorizers,
"{0}/restapis/(?P<function_id>[^/]+)/authorizers/(?P<authorizer_id>[^/]+)/?$": APIGatewayResponse().authorizers,
"{0}/restapis/(?P<function_id>[^/]+)/stages$": APIGatewayResponse().restapis_stages, "{0}/restapis/(?P<function_id>[^/]+)/stages$": APIGatewayResponse().restapis_stages,
"{0}/restapis/(?P<function_id>[^/]+)/stages/(?P<stage_name>[^/]+)/?$": APIGatewayResponse().stages, "{0}/restapis/(?P<function_id>[^/]+)/stages/(?P<stage_name>[^/]+)/?$": APIGatewayResponse().stages,
"{0}/restapis/(?P<function_id>[^/]+)/deployments$": APIGatewayResponse().deployments, "{0}/restapis/(?P<function_id>[^/]+)/deployments$": APIGatewayResponse().deployments,

View File

@ -14,6 +14,7 @@ from jose import jws
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID
from .exceptions import ( from .exceptions import (
GroupExistsException, GroupExistsException,
NotAuthorizedError, NotAuthorizedError,
@ -69,6 +70,9 @@ class CognitoIdpUserPool(BaseModel):
def __init__(self, region, name, extended_config): def __init__(self, region, name, extended_config):
self.region = region self.region = region
self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex)) self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex))
self.arn = "arn:aws:cognito-idp:{}:{}:userpool/{}".format(
self.region, DEFAULT_ACCOUNT_ID, self.id
)
self.name = name self.name = name
self.status = None self.status = None
self.extended_config = extended_config or {} self.extended_config = extended_config or {}
@ -91,6 +95,7 @@ class CognitoIdpUserPool(BaseModel):
def _base_json(self): def _base_json(self):
return { return {
"Id": self.id, "Id": self.id,
"Arn": self.arn,
"Name": self.name, "Name": self.name,
"Status": self.status, "Status": self.status,
"CreationDate": time.mktime(self.creation_date.timetuple()), "CreationDate": time.mktime(self.creation_date.timetuple()),
@ -564,12 +569,17 @@ class CognitoIdpBackend(BaseBackend):
user.groups.discard(group) user.groups.discard(group)
# User # User
def admin_create_user(self, user_pool_id, username, temporary_password, attributes): def admin_create_user(
self, user_pool_id, username, message_action, temporary_password, attributes
):
user_pool = self.user_pools.get(user_pool_id) user_pool = self.user_pools.get(user_pool_id)
if not user_pool: if not user_pool:
raise ResourceNotFoundError(user_pool_id) raise ResourceNotFoundError(user_pool_id)
if username in user_pool.users: if message_action and message_action == "RESEND":
if username not in user_pool.users:
raise UserNotFoundError(username)
elif username in user_pool.users:
raise UsernameExistsException(username) raise UsernameExistsException(username)
user = CognitoIdpUser( user = CognitoIdpUser(

View File

@ -259,10 +259,12 @@ class CognitoIdpResponse(BaseResponse):
def admin_create_user(self): def admin_create_user(self):
user_pool_id = self._get_param("UserPoolId") user_pool_id = self._get_param("UserPoolId")
username = self._get_param("Username") username = self._get_param("Username")
message_action = self._get_param("MessageAction")
temporary_password = self._get_param("TemporaryPassword") temporary_password = self._get_param("TemporaryPassword")
user = cognitoidp_backends[self.region].admin_create_user( user = cognitoidp_backends[self.region].admin_create_user(
user_pool_id, user_pool_id,
username, username,
message_action,
temporary_password, temporary_password,
self._get_param("UserAttributes", []), self._get_param("UserAttributes", []),
) )

View File

@ -43,7 +43,7 @@ from moto.config.exceptions import (
) )
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.s3.config import s3_config_query from moto.s3.config import s3_account_public_access_block_query, s3_config_query
from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID
@ -58,7 +58,10 @@ POP_STRINGS = [
DEFAULT_PAGE_SIZE = 100 DEFAULT_PAGE_SIZE = 100
# Map the Config resource type to a backend: # Map the Config resource type to a backend:
RESOURCE_MAP = {"AWS::S3::Bucket": s3_config_query} RESOURCE_MAP = {
"AWS::S3::Bucket": s3_config_query,
"AWS::S3::AccountPublicAccessBlock": s3_account_public_access_block_query,
}
def datetime2int(date): def datetime2int(date):
@ -867,16 +870,17 @@ class ConfigBackend(BaseBackend):
backend_region=backend_query_region, backend_region=backend_query_region,
) )
result = { resource_identifiers = []
"resourceIdentifiers": [ for identifier in identifiers:
{ item = {"resourceType": identifier["type"], "resourceId": identifier["id"]}
"resourceType": identifier["type"],
"resourceId": identifier["id"], # Some resource types lack names:
"resourceName": identifier["name"], if identifier.get("name"):
} item["resourceName"] = identifier["name"]
for identifier in identifiers
] resource_identifiers.append(item)
}
result = {"resourceIdentifiers": resource_identifiers}
if new_token: if new_token:
result["nextToken"] = new_token result["nextToken"] = new_token
@ -927,18 +931,21 @@ class ConfigBackend(BaseBackend):
resource_region=resource_region, resource_region=resource_region,
) )
result = { resource_identifiers = []
"ResourceIdentifiers": [ for identifier in identifiers:
{ item = {
"SourceAccountId": DEFAULT_ACCOUNT_ID, "SourceAccountId": DEFAULT_ACCOUNT_ID,
"SourceRegion": identifier["region"], "SourceRegion": identifier["region"],
"ResourceType": identifier["type"], "ResourceType": identifier["type"],
"ResourceId": identifier["id"], "ResourceId": identifier["id"],
"ResourceName": identifier["name"], }
}
for identifier in identifiers if identifier.get("name"):
] item["ResourceName"] = identifier["name"]
}
resource_identifiers.append(item)
result = {"ResourceIdentifiers": resource_identifiers}
if new_token: if new_token:
result["NextToken"] = new_token result["NextToken"] = new_token

View File

@ -606,12 +606,13 @@ class ConfigQueryModel(object):
As such, the proper way to implement is to first obtain a full list of results from all the region backends, and then filter As such, the proper way to implement is to first obtain a full list of results from all the region backends, and then filter
from there. It may be valuable to make this a concatenation of the region and resource name. from there. It may be valuable to make this a concatenation of the region and resource name.
:param resource_region: :param resource_ids: A list of resource IDs
:param resource_ids: :param resource_name: The individual name of a resource
:param resource_name: :param limit: How many per page
:param limit: :param next_token: The item that will page on
:param next_token:
:param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query.
:param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a
non-aggregated query.
:return: This should return a list of Dicts that have the following fields: :return: This should return a list of Dicts that have the following fields:
[ [
{ {

View File

@ -1406,6 +1406,7 @@ class DynamoDBBackend(BaseBackend):
range_value = None range_value = None
item = table.get_item(hash_value, range_value) item = table.get_item(hash_value, range_value)
orig_item = copy.deepcopy(item)
if not expected: if not expected:
expected = {} expected = {}
@ -1439,6 +1440,8 @@ class DynamoDBBackend(BaseBackend):
) )
else: else:
item.update_with_attribute_updates(attribute_updates) item.update_with_attribute_updates(attribute_updates)
if table.stream_shard is not None:
table.stream_shard.add(orig_item, item)
return item return item
def delete_item( def delete_item(

View File

@ -86,6 +86,9 @@ class FakeStep(BaseModel):
self.start_datetime = None self.start_datetime = None
self.state = state self.state = state
def start(self):
self.start_datetime = datetime.now(pytz.utc)
class FakeCluster(BaseModel): class FakeCluster(BaseModel):
def __init__( def __init__(
@ -204,6 +207,8 @@ class FakeCluster(BaseModel):
self.start_cluster() self.start_cluster()
self.run_bootstrap_actions() self.run_bootstrap_actions()
if self.steps:
self.steps[0].start()
@property @property
def instance_groups(self): def instance_groups(self):

View File

@ -835,7 +835,7 @@ LIST_STEPS_TEMPLATE = """<ListStepsResponse xmlns="http://elasticmapreduce.amazo
{% if step.end_datetime is not none %} {% if step.end_datetime is not none %}
<EndDateTime>{{ step.end_datetime.isoformat() }}</EndDateTime> <EndDateTime>{{ step.end_datetime.isoformat() }}</EndDateTime>
{% endif %} {% endif %}
{% if step.ready_datetime is not none %} {% if step.start_datetime is not none %}
<StartDateTime>{{ step.start_datetime.isoformat() }}</StartDateTime> <StartDateTime>{{ step.start_datetime.isoformat() }}</StartDateTime>
{% endif %} {% endif %}
</Timeline> </Timeline>

View File

@ -6,6 +6,7 @@ from boto3 import Session
from moto.core.exceptions import JsonRESTError from moto.core.exceptions import JsonRESTError
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.sts.models import ACCOUNT_ID from moto.sts.models import ACCOUNT_ID
from moto.utilities.tagging_service import TaggingService
class Rule(BaseModel): class Rule(BaseModel):
@ -104,6 +105,7 @@ class EventsBackend(BaseBackend):
self.region_name = region_name self.region_name = region_name
self.event_buses = {} self.event_buses = {}
self.event_sources = {} self.event_sources = {}
self.tagger = TaggingService()
self._add_default_event_bus() self._add_default_event_bus()
@ -141,6 +143,9 @@ class EventsBackend(BaseBackend):
def delete_rule(self, name): def delete_rule(self, name):
self.rules_order.pop(self.rules_order.index(name)) self.rules_order.pop(self.rules_order.index(name))
arn = self.rules.get(name).arn
if self.tagger.has_tags(arn):
self.tagger.delete_all_tags_for_resource(arn)
return self.rules.pop(name) is not None return self.rules.pop(name) is not None
def describe_rule(self, name): def describe_rule(self, name):
@ -361,6 +366,32 @@ class EventsBackend(BaseBackend):
self.event_buses.pop(name, None) self.event_buses.pop(name, None)
def list_tags_for_resource(self, arn):
name = arn.split("/")[-1]
if name in self.rules:
return self.tagger.list_tags_for_resource(self.rules[name].arn)
raise JsonRESTError(
"ResourceNotFoundException", "An entity that you specified does not exist."
)
def tag_resource(self, arn, tags):
name = arn.split("/")[-1]
if name in self.rules:
self.tagger.tag_resource(self.rules[name].arn, tags)
return {}
raise JsonRESTError(
"ResourceNotFoundException", "An entity that you specified does not exist."
)
def untag_resource(self, arn, tag_names):
name = arn.split("/")[-1]
if name in self.rules:
self.tagger.untag_resource_using_names(self.rules[name].arn, tag_names)
return {}
raise JsonRESTError(
"ResourceNotFoundException", "An entity that you specified does not exist."
)
events_backends = {} events_backends = {}
for region in Session().get_available_regions("events"): for region in Session().get_available_regions("events"):

View File

@ -297,3 +297,26 @@ class EventsHandler(BaseResponse):
self.events_backend.delete_event_bus(name) self.events_backend.delete_event_bus(name)
return "", self.response_headers return "", self.response_headers
def list_tags_for_resource(self):
arn = self._get_param("ResourceARN")
result = self.events_backend.list_tags_for_resource(arn)
return json.dumps(result), self.response_headers
def tag_resource(self):
arn = self._get_param("ResourceARN")
tags = self._get_param("Tags")
result = self.events_backend.tag_resource(arn, tags)
return json.dumps(result), self.response_headers
def untag_resource(self):
arn = self._get_param("ResourceARN")
tags = self._get_param("TagKeys")
result = self.events_backend.untag_resource(arn, tags)
return json.dumps(result), self.response_headers

View File

@ -22,6 +22,15 @@ class InvalidRequestException(IoTClientError):
) )
class InvalidStateTransitionException(IoTClientError):
def __init__(self, msg=None):
self.code = 409
super(InvalidStateTransitionException, self).__init__(
"InvalidStateTransitionException",
msg or "An attempt was made to change to an invalid state.",
)
class VersionConflictException(IoTClientError): class VersionConflictException(IoTClientError):
def __init__(self, name): def __init__(self, name):
self.code = 409 self.code = 409

View File

@ -17,6 +17,7 @@ from .exceptions import (
DeleteConflictException, DeleteConflictException,
ResourceNotFoundException, ResourceNotFoundException,
InvalidRequestException, InvalidRequestException,
InvalidStateTransitionException,
VersionConflictException, VersionConflictException,
) )
@ -29,7 +30,7 @@ class FakeThing(BaseModel):
self.attributes = attributes self.attributes = attributes
self.arn = "arn:aws:iot:%s:1:thing/%s" % (self.region_name, thing_name) self.arn = "arn:aws:iot:%s:1:thing/%s" % (self.region_name, thing_name)
self.version = 1 self.version = 1
# TODO: we need to handle 'version'? # TODO: we need to handle "version"?
# for iot-data # for iot-data
self.thing_shadow = None self.thing_shadow = None
@ -174,18 +175,19 @@ class FakeCertificate(BaseModel):
class FakePolicy(BaseModel): class FakePolicy(BaseModel):
def __init__(self, name, document, region_name): def __init__(self, name, document, region_name, default_version_id="1"):
self.name = name self.name = name
self.document = document self.document = document
self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, name) self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, name)
self.version = "1" # TODO: handle version self.default_version_id = default_version_id
self.versions = [FakePolicyVersion(self.name, document, True, region_name)]
def to_get_dict(self): def to_get_dict(self):
return { return {
"policyName": self.name, "policyName": self.name,
"policyArn": self.arn, "policyArn": self.arn,
"policyDocument": self.document, "policyDocument": self.document,
"defaultVersionId": self.version, "defaultVersionId": self.default_version_id,
} }
def to_dict_at_creation(self): def to_dict_at_creation(self):
@ -193,13 +195,52 @@ class FakePolicy(BaseModel):
"policyName": self.name, "policyName": self.name,
"policyArn": self.arn, "policyArn": self.arn,
"policyDocument": self.document, "policyDocument": self.document,
"policyVersionId": self.version, "policyVersionId": self.default_version_id,
} }
def to_dict(self): def to_dict(self):
return {"policyName": self.name, "policyArn": self.arn} return {"policyName": self.name, "policyArn": self.arn}
class FakePolicyVersion(object):
def __init__(self, policy_name, document, is_default, region_name):
self.name = policy_name
self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, policy_name)
self.document = document or {}
self.is_default = is_default
self.version_id = "1"
self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple())
self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple())
def to_get_dict(self):
return {
"policyName": self.name,
"policyArn": self.arn,
"policyDocument": self.document,
"policyVersionId": self.version_id,
"isDefaultVersion": self.is_default,
"creationDate": self.create_datetime,
"lastModifiedDate": self.last_modified_datetime,
"generationId": self.version_id,
}
def to_dict_at_creation(self):
return {
"policyArn": self.arn,
"policyDocument": self.document,
"policyVersionId": self.version_id,
"isDefaultVersion": self.is_default,
}
def to_dict(self):
return {
"versionId": self.version_id,
"isDefaultVersion": self.is_default,
"createDate": self.create_datetime,
}
class FakeJob(BaseModel): class FakeJob(BaseModel):
JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]"
JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN)
@ -226,12 +267,14 @@ class FakeJob(BaseModel):
self.targets = targets self.targets = targets
self.document_source = document_source self.document_source = document_source
self.document = document self.document = document
self.force = False
self.description = description self.description = description
self.presigned_url_config = presigned_url_config self.presigned_url_config = presigned_url_config
self.target_selection = target_selection self.target_selection = target_selection
self.job_executions_rollout_config = job_executions_rollout_config self.job_executions_rollout_config = job_executions_rollout_config
self.status = None # IN_PROGRESS | CANCELED | COMPLETED self.status = "QUEUED" # IN_PROGRESS | CANCELED | COMPLETED
self.comment = None self.comment = None
self.reason_code = None
self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.created_at = time.mktime(datetime(2015, 1, 1).timetuple())
self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple())
self.completed_at = None self.completed_at = None
@ -258,9 +301,11 @@ class FakeJob(BaseModel):
"jobExecutionsRolloutConfig": self.job_executions_rollout_config, "jobExecutionsRolloutConfig": self.job_executions_rollout_config,
"status": self.status, "status": self.status,
"comment": self.comment, "comment": self.comment,
"forceCanceled": self.force,
"reasonCode": self.reason_code,
"createdAt": self.created_at, "createdAt": self.created_at,
"lastUpdatedAt": self.last_updated_at, "lastUpdatedAt": self.last_updated_at,
"completedAt": self.completedAt, "completedAt": self.completed_at,
"jobProcessDetails": self.job_process_details, "jobProcessDetails": self.job_process_details,
"documentParameters": self.document_parameters, "documentParameters": self.document_parameters,
"document": self.document, "document": self.document,
@ -275,12 +320,67 @@ class FakeJob(BaseModel):
return regex_match and length_match return regex_match and length_match
class FakeJobExecution(BaseModel):
def __init__(
self,
job_id,
thing_arn,
status="QUEUED",
force_canceled=False,
status_details_map={},
):
self.job_id = job_id
self.status = status # IN_PROGRESS | CANCELED | COMPLETED
self.force_canceled = force_canceled
self.status_details_map = status_details_map
self.thing_arn = thing_arn
self.queued_at = time.mktime(datetime(2015, 1, 1).timetuple())
self.started_at = time.mktime(datetime(2015, 1, 1).timetuple())
self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple())
self.execution_number = 123
self.version_number = 123
self.approximate_seconds_before_time_out = 123
def to_get_dict(self):
obj = {
"jobId": self.job_id,
"status": self.status,
"forceCanceled": self.force_canceled,
"statusDetails": {"detailsMap": self.status_details_map},
"thingArn": self.thing_arn,
"queuedAt": self.queued_at,
"startedAt": self.started_at,
"lastUpdatedAt": self.last_updated_at,
"executionNumber": self.execution_number,
"versionNumber": self.version_number,
"approximateSecondsBeforeTimedOut": self.approximate_seconds_before_time_out,
}
return obj
def to_dict(self):
obj = {
"jobId": self.job_id,
"thingArn": self.thing_arn,
"jobExecutionSummary": {
"status": self.status,
"queuedAt": self.queued_at,
"startedAt": self.started_at,
"lastUpdatedAt": self.last_updated_at,
"executionNumber": self.execution_number,
},
}
return obj
class IoTBackend(BaseBackend): class IoTBackend(BaseBackend):
def __init__(self, region_name=None): def __init__(self, region_name=None):
super(IoTBackend, self).__init__() super(IoTBackend, self).__init__()
self.region_name = region_name self.region_name = region_name
self.things = OrderedDict() self.things = OrderedDict()
self.jobs = OrderedDict() self.jobs = OrderedDict()
self.job_executions = OrderedDict()
self.thing_types = OrderedDict() self.thing_types = OrderedDict()
self.thing_groups = OrderedDict() self.thing_groups = OrderedDict()
self.certificates = OrderedDict() self.certificates = OrderedDict()
@ -535,6 +635,28 @@ class IoTBackend(BaseBackend):
self.policies[policy.name] = policy self.policies[policy.name] = policy
return policy return policy
def attach_policy(self, policy_name, target):
principal = self._get_principal(target)
policy = self.get_policy(policy_name)
k = (target, policy_name)
if k in self.principal_policies:
return
self.principal_policies[k] = (principal, policy)
def detach_policy(self, policy_name, target):
# this may raises ResourceNotFoundException
self._get_principal(target)
self.get_policy(policy_name)
k = (target, policy_name)
if k not in self.principal_policies:
raise ResourceNotFoundException()
del self.principal_policies[k]
def list_attached_policies(self, target):
policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target]
return policies
def list_policies(self): def list_policies(self):
policies = self.policies.values() policies = self.policies.values()
return policies return policies
@ -559,6 +681,60 @@ class IoTBackend(BaseBackend):
policy = self.get_policy(policy_name) policy = self.get_policy(policy_name)
del self.policies[policy.name] del self.policies[policy.name]
def create_policy_version(self, policy_name, policy_document, set_as_default):
policy = self.get_policy(policy_name)
if not policy:
raise ResourceNotFoundException()
version = FakePolicyVersion(
policy_name, policy_document, set_as_default, self.region_name
)
policy.versions.append(version)
version.version_id = "{0}".format(len(policy.versions))
if set_as_default:
self.set_default_policy_version(policy_name, version.version_id)
return version
def set_default_policy_version(self, policy_name, version_id):
policy = self.get_policy(policy_name)
if not policy:
raise ResourceNotFoundException()
for version in policy.versions:
if version.version_id == version_id:
version.is_default = True
policy.default_version_id = version.version_id
policy.document = version.document
else:
version.is_default = False
def get_policy_version(self, policy_name, version_id):
policy = self.get_policy(policy_name)
if not policy:
raise ResourceNotFoundException()
for version in policy.versions:
if version.version_id == version_id:
return version
raise ResourceNotFoundException()
def list_policy_versions(self, policy_name):
policy = self.get_policy(policy_name)
if not policy:
raise ResourceNotFoundException()
return policy.versions
def delete_policy_version(self, policy_name, version_id):
policy = self.get_policy(policy_name)
if not policy:
raise ResourceNotFoundException()
if version_id == policy.default_version_id:
raise InvalidRequestException(
"Cannot delete the default version of a policy"
)
for i, v in enumerate(policy.versions):
if v.version_id == version_id:
del policy.versions[i]
return
raise ResourceNotFoundException()
def _get_principal(self, principal_arn): def _get_principal(self, principal_arn):
""" """
raise ResourceNotFoundException raise ResourceNotFoundException
@ -574,14 +750,6 @@ class IoTBackend(BaseBackend):
pass pass
raise ResourceNotFoundException() raise ResourceNotFoundException()
def attach_policy(self, policy_name, target):
principal = self._get_principal(target)
policy = self.get_policy(policy_name)
k = (target, policy_name)
if k in self.principal_policies:
return
self.principal_policies[k] = (principal, policy)
def attach_principal_policy(self, policy_name, principal_arn): def attach_principal_policy(self, policy_name, principal_arn):
principal = self._get_principal(principal_arn) principal = self._get_principal(principal_arn)
policy = self.get_policy(policy_name) policy = self.get_policy(policy_name)
@ -590,15 +758,6 @@ class IoTBackend(BaseBackend):
return return
self.principal_policies[k] = (principal, policy) self.principal_policies[k] = (principal, policy)
def detach_policy(self, policy_name, target):
# this may raises ResourceNotFoundException
self._get_principal(target)
self.get_policy(policy_name)
k = (target, policy_name)
if k not in self.principal_policies:
raise ResourceNotFoundException()
del self.principal_policies[k]
def detach_principal_policy(self, policy_name, principal_arn): def detach_principal_policy(self, policy_name, principal_arn):
# this may raises ResourceNotFoundException # this may raises ResourceNotFoundException
self._get_principal(principal_arn) self._get_principal(principal_arn)
@ -819,11 +978,187 @@ class IoTBackend(BaseBackend):
self.region_name, self.region_name,
) )
self.jobs[job_id] = job self.jobs[job_id] = job
for thing_arn in targets:
thing_name = thing_arn.split(":")[-1].split("/")[-1]
job_execution = FakeJobExecution(job_id, thing_arn)
self.job_executions[(job_id, thing_name)] = job_execution
return job.job_arn, job_id, description return job.job_arn, job_id, description
def describe_job(self, job_id): def describe_job(self, job_id):
jobs = [_ for _ in self.jobs.values() if _.job_id == job_id]
if len(jobs) == 0:
raise ResourceNotFoundException()
return jobs[0]
def delete_job(self, job_id, force):
job = self.jobs[job_id]
if job.status == "IN_PROGRESS" and force:
del self.jobs[job_id]
elif job.status != "IN_PROGRESS":
del self.jobs[job_id]
else:
raise InvalidStateTransitionException()
def cancel_job(self, job_id, reason_code, comment, force):
job = self.jobs[job_id]
job.reason_code = reason_code if reason_code is not None else job.reason_code
job.comment = comment if comment is not None else job.comment
job.force = force if force is not None and force != job.force else job.force
job.status = "CANCELED"
if job.status == "IN_PROGRESS" and force:
self.jobs[job_id] = job
elif job.status != "IN_PROGRESS":
self.jobs[job_id] = job
else:
raise InvalidStateTransitionException()
return job
def get_job_document(self, job_id):
return self.jobs[job_id] return self.jobs[job_id]
def list_jobs(
self,
status,
target_selection,
max_results,
token,
thing_group_name,
thing_group_id,
):
# TODO: implement filters
all_jobs = [_.to_dict() for _ in self.jobs.values()]
filtered_jobs = all_jobs
if token is None:
jobs = filtered_jobs[0:max_results]
next_token = str(max_results) if len(filtered_jobs) > max_results else None
else:
token = int(token)
jobs = filtered_jobs[token : token + max_results]
next_token = (
str(token + max_results)
if len(filtered_jobs) > token + max_results
else None
)
return jobs, next_token
def describe_job_execution(self, job_id, thing_name, execution_number):
try:
job_execution = self.job_executions[(job_id, thing_name)]
except KeyError:
raise ResourceNotFoundException()
if job_execution is None or (
execution_number is not None
and job_execution.execution_number != execution_number
):
raise ResourceNotFoundException()
return job_execution
def cancel_job_execution(
self, job_id, thing_name, force, expected_version, status_details
):
job_execution = self.job_executions[(job_id, thing_name)]
if job_execution is None:
raise ResourceNotFoundException()
job_execution.force_canceled = (
force if force is not None else job_execution.force_canceled
)
# TODO: implement expected_version and status_details (at most 10 can be specified)
if job_execution.status == "IN_PROGRESS" and force:
job_execution.status = "CANCELED"
self.job_executions[(job_id, thing_name)] = job_execution
elif job_execution.status != "IN_PROGRESS":
job_execution.status = "CANCELED"
self.job_executions[(job_id, thing_name)] = job_execution
else:
raise InvalidStateTransitionException()
def delete_job_execution(self, job_id, thing_name, execution_number, force):
job_execution = self.job_executions[(job_id, thing_name)]
if job_execution.execution_number != execution_number:
raise ResourceNotFoundException()
if job_execution.status == "IN_PROGRESS" and force:
del self.job_executions[(job_id, thing_name)]
elif job_execution.status != "IN_PROGRESS":
del self.job_executions[(job_id, thing_name)]
else:
raise InvalidStateTransitionException()
def list_job_executions_for_job(self, job_id, status, max_results, next_token):
job_executions = [
self.job_executions[je].to_dict()
for je in self.job_executions
if je[0] == job_id
]
if status is not None:
job_executions = list(
filter(
lambda elem: status in elem["status"] and elem["status"] == status,
job_executions,
)
)
token = next_token
if token is None:
job_executions = job_executions[0:max_results]
next_token = str(max_results) if len(job_executions) > max_results else None
else:
token = int(token)
job_executions = job_executions[token : token + max_results]
next_token = (
str(token + max_results)
if len(job_executions) > token + max_results
else None
)
return job_executions, next_token
def list_job_executions_for_thing(
self, thing_name, status, max_results, next_token
):
job_executions = [
self.job_executions[je].to_dict()
for je in self.job_executions
if je[1] == thing_name
]
if status is not None:
job_executions = list(
filter(
lambda elem: status in elem["status"] and elem["status"] == status,
job_executions,
)
)
token = next_token
if token is None:
job_executions = job_executions[0:max_results]
next_token = str(max_results) if len(job_executions) > max_results else None
else:
token = int(token)
job_executions = job_executions[token : token + max_results]
next_token = (
str(token + max_results)
if len(job_executions) > token + max_results
else None
)
return job_executions, next_token
iot_backends = {} iot_backends = {}
for region in Session().get_available_regions("iot"): for region in Session().get_available_regions("iot"):

View File

@ -1,6 +1,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import json import json
from six.moves.urllib.parse import unquote
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from .models import iot_backends from .models import iot_backends
@ -141,6 +142,8 @@ class IoTResponse(BaseResponse):
createdAt=job.created_at, createdAt=job.created_at,
description=job.description, description=job.description,
documentParameters=job.document_parameters, documentParameters=job.document_parameters,
forceCanceled=job.force,
reasonCode=job.reason_code,
jobArn=job.job_arn, jobArn=job.job_arn,
jobExecutionsRolloutConfig=job.job_executions_rollout_config, jobExecutionsRolloutConfig=job.job_executions_rollout_config,
jobId=job.job_id, jobId=job.job_id,
@ -154,6 +157,127 @@ class IoTResponse(BaseResponse):
) )
) )
def delete_job(self):
job_id = self._get_param("jobId")
force = self._get_bool_param("force")
self.iot_backend.delete_job(job_id=job_id, force=force)
return json.dumps(dict())
def cancel_job(self):
job_id = self._get_param("jobId")
reason_code = self._get_param("reasonCode")
comment = self._get_param("comment")
force = self._get_bool_param("force")
job = self.iot_backend.cancel_job(
job_id=job_id, reason_code=reason_code, comment=comment, force=force
)
return json.dumps(job.to_dict())
def get_job_document(self):
job = self.iot_backend.get_job_document(job_id=self._get_param("jobId"))
if job.document is not None:
return json.dumps({"document": job.document})
else:
# job.document_source is not None:
# TODO: needs to be implemented to get document_source's content from S3
return json.dumps({"document": ""})
def list_jobs(self):
status = (self._get_param("status"),)
target_selection = (self._get_param("targetSelection"),)
max_results = self._get_int_param(
"maxResults", 50
) # not the default, but makes testing easier
previous_next_token = self._get_param("nextToken")
thing_group_name = (self._get_param("thingGroupName"),)
thing_group_id = self._get_param("thingGroupId")
jobs, next_token = self.iot_backend.list_jobs(
status=status,
target_selection=target_selection,
max_results=max_results,
token=previous_next_token,
thing_group_name=thing_group_name,
thing_group_id=thing_group_id,
)
return json.dumps(dict(jobs=jobs, nextToken=next_token))
def describe_job_execution(self):
job_id = self._get_param("jobId")
thing_name = self._get_param("thingName")
execution_number = self._get_int_param("executionNumber")
job_execution = self.iot_backend.describe_job_execution(
job_id=job_id, thing_name=thing_name, execution_number=execution_number
)
return json.dumps(dict(execution=job_execution.to_get_dict()))
def cancel_job_execution(self):
job_id = self._get_param("jobId")
thing_name = self._get_param("thingName")
force = self._get_bool_param("force")
expected_version = self._get_int_param("expectedVersion")
status_details = self._get_param("statusDetails")
self.iot_backend.cancel_job_execution(
job_id=job_id,
thing_name=thing_name,
force=force,
expected_version=expected_version,
status_details=status_details,
)
return json.dumps(dict())
def delete_job_execution(self):
job_id = self._get_param("jobId")
thing_name = self._get_param("thingName")
execution_number = self._get_int_param("executionNumber")
force = self._get_bool_param("force")
self.iot_backend.delete_job_execution(
job_id=job_id,
thing_name=thing_name,
execution_number=execution_number,
force=force,
)
return json.dumps(dict())
def list_job_executions_for_job(self):
job_id = self._get_param("jobId")
status = self._get_param("status")
max_results = self._get_int_param(
"maxResults", 50
) # not the default, but makes testing easier
next_token = self._get_param("nextToken")
job_executions, next_token = self.iot_backend.list_job_executions_for_job(
job_id=job_id, status=status, max_results=max_results, next_token=next_token
)
return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token))
def list_job_executions_for_thing(self):
thing_name = self._get_param("thingName")
status = self._get_param("status")
max_results = self._get_int_param(
"maxResults", 50
) # not the default, but makes testing easier
next_token = self._get_param("nextToken")
job_executions, next_token = self.iot_backend.list_job_executions_for_thing(
thing_name=thing_name,
status=status,
max_results=max_results,
next_token=next_token,
)
return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token))
def create_keys_and_certificate(self): def create_keys_and_certificate(self):
set_as_active = self._get_bool_param("setAsActive") set_as_active = self._get_bool_param("setAsActive")
cert, key_pair = self.iot_backend.create_keys_and_certificate( cert, key_pair = self.iot_backend.create_keys_and_certificate(
@ -241,12 +365,61 @@ class IoTResponse(BaseResponse):
self.iot_backend.delete_policy(policy_name=policy_name) self.iot_backend.delete_policy(policy_name=policy_name)
return json.dumps(dict()) return json.dumps(dict())
def create_policy_version(self):
policy_name = self._get_param("policyName")
policy_document = self._get_param("policyDocument")
set_as_default = self._get_bool_param("setAsDefault")
policy_version = self.iot_backend.create_policy_version(
policy_name, policy_document, set_as_default
)
return json.dumps(dict(policy_version.to_dict_at_creation()))
def set_default_policy_version(self):
policy_name = self._get_param("policyName")
version_id = self._get_param("policyVersionId")
self.iot_backend.set_default_policy_version(policy_name, version_id)
return json.dumps(dict())
def get_policy_version(self):
policy_name = self._get_param("policyName")
version_id = self._get_param("policyVersionId")
policy_version = self.iot_backend.get_policy_version(policy_name, version_id)
return json.dumps(dict(policy_version.to_get_dict()))
def list_policy_versions(self):
policy_name = self._get_param("policyName")
policiy_versions = self.iot_backend.list_policy_versions(
policy_name=policy_name
)
return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions]))
def delete_policy_version(self):
policy_name = self._get_param("policyName")
version_id = self._get_param("policyVersionId")
self.iot_backend.delete_policy_version(policy_name, version_id)
return json.dumps(dict())
def attach_policy(self): def attach_policy(self):
policy_name = self._get_param("policyName") policy_name = self._get_param("policyName")
target = self._get_param("target") target = self._get_param("target")
self.iot_backend.attach_policy(policy_name=policy_name, target=target) self.iot_backend.attach_policy(policy_name=policy_name, target=target)
return json.dumps(dict()) return json.dumps(dict())
def list_attached_policies(self):
principal = unquote(self._get_param("target"))
# marker = self._get_param("marker")
# page_size = self._get_int_param("pageSize")
policies = self.iot_backend.list_attached_policies(target=principal)
# TODO: implement pagination in the future
next_marker = None
return json.dumps(
dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)
)
def attach_principal_policy(self): def attach_principal_policy(self):
policy_name = self._get_param("policyName") policy_name = self._get_param("policyName")
principal = self.headers.get("x-amzn-iot-principal") principal = self.headers.get("x-amzn-iot-principal")

View File

@ -8,7 +8,8 @@ from boto3 import Session
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time from moto.core.utils import unix_time
from moto.utilities.tagging_service import TaggingService
from moto.core.exceptions import JsonRESTError
from moto.iam.models import ACCOUNT_ID from moto.iam.models import ACCOUNT_ID
from .utils import decrypt, encrypt, generate_key_id, generate_master_key from .utils import decrypt, encrypt, generate_key_id, generate_master_key
@ -16,7 +17,7 @@ from .utils import decrypt, encrypt, generate_key_id, generate_master_key
class Key(BaseModel): class Key(BaseModel):
def __init__( def __init__(
self, policy, key_usage, customer_master_key_spec, description, tags, region self, policy, key_usage, customer_master_key_spec, description, region
): ):
self.id = generate_key_id() self.id = generate_key_id()
self.creation_date = unix_time() self.creation_date = unix_time()
@ -29,7 +30,6 @@ class Key(BaseModel):
self.account_id = ACCOUNT_ID self.account_id = ACCOUNT_ID
self.key_rotation_status = False self.key_rotation_status = False
self.deletion_date = None self.deletion_date = None
self.tags = tags or {}
self.key_material = generate_master_key() self.key_material = generate_master_key()
self.origin = "AWS_KMS" self.origin = "AWS_KMS"
self.key_manager = "CUSTOMER" self.key_manager = "CUSTOMER"
@ -111,11 +111,12 @@ class Key(BaseModel):
key_usage="ENCRYPT_DECRYPT", key_usage="ENCRYPT_DECRYPT",
customer_master_key_spec="SYMMETRIC_DEFAULT", customer_master_key_spec="SYMMETRIC_DEFAULT",
description=properties["Description"], description=properties["Description"],
tags=properties.get("Tags"), tags=properties.get("Tags", []),
region=region_name, region=region_name,
) )
key.key_rotation_status = properties["EnableKeyRotation"] key.key_rotation_status = properties["EnableKeyRotation"]
key.enabled = properties["Enabled"] key.enabled = properties["Enabled"]
return key return key
def get_cfn_attribute(self, attribute_name): def get_cfn_attribute(self, attribute_name):
@ -130,32 +131,26 @@ class KmsBackend(BaseBackend):
def __init__(self): def __init__(self):
self.keys = {} self.keys = {}
self.key_to_aliases = defaultdict(set) self.key_to_aliases = defaultdict(set)
self.tagger = TaggingService(keyName="TagKey", valueName="TagValue")
def create_key( def create_key(
self, policy, key_usage, customer_master_key_spec, description, tags, region self, policy, key_usage, customer_master_key_spec, description, tags, region
): ):
key = Key( key = Key(policy, key_usage, customer_master_key_spec, description, region)
policy, key_usage, customer_master_key_spec, description, tags, region
)
self.keys[key.id] = key self.keys[key.id] = key
if tags is not None and len(tags) > 0:
self.tag_resource(key.id, tags)
return key return key
def update_key_description(self, key_id, description): def update_key_description(self, key_id, description):
key = self.keys[self.get_key_id(key_id)] key = self.keys[self.get_key_id(key_id)]
key.description = description key.description = description
def tag_resource(self, key_id, tags):
key = self.keys[self.get_key_id(key_id)]
key.tags = tags
def list_resource_tags(self, key_id):
key = self.keys[self.get_key_id(key_id)]
return key.tags
def delete_key(self, key_id): def delete_key(self, key_id):
if key_id in self.keys: if key_id in self.keys:
if key_id in self.key_to_aliases: if key_id in self.key_to_aliases:
self.key_to_aliases.pop(key_id) self.key_to_aliases.pop(key_id)
self.tagger.delete_all_tags_for_resource(key_id)
return self.keys.pop(key_id) return self.keys.pop(key_id)
@ -325,6 +320,32 @@ class KmsBackend(BaseBackend):
return plaintext, ciphertext_blob, arn return plaintext, ciphertext_blob, arn
def list_resource_tags(self, key_id):
if key_id in self.keys:
return self.tagger.list_tags_for_resource(key_id)
raise JsonRESTError(
"NotFoundException",
"The request was rejected because the specified entity or resource could not be found.",
)
def tag_resource(self, key_id, tags):
if key_id in self.keys:
self.tagger.tag_resource(key_id, tags)
return {}
raise JsonRESTError(
"NotFoundException",
"The request was rejected because the specified entity or resource could not be found.",
)
def untag_resource(self, key_id, tag_names):
if key_id in self.keys:
self.tagger.untag_resource_using_names(key_id, tag_names)
return {}
raise JsonRESTError(
"NotFoundException",
"The request was rejected because the specified entity or resource could not be found.",
)
kms_backends = {} kms_backends = {}
for region in Session().get_available_regions("kms"): for region in Session().get_available_regions("kms"):

View File

@ -144,17 +144,27 @@ class KmsResponse(BaseResponse):
self._validate_cmk_id(key_id) self._validate_cmk_id(key_id)
self.kms_backend.tag_resource(key_id, tags) result = self.kms_backend.tag_resource(key_id, tags)
return json.dumps({}) return json.dumps(result)
def untag_resource(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_UntagResource.html"""
key_id = self.parameters.get("KeyId")
tag_names = self.parameters.get("TagKeys")
self._validate_cmk_id(key_id)
result = self.kms_backend.untag_resource(key_id, tag_names)
return json.dumps(result)
def list_resource_tags(self): def list_resource_tags(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_ListResourceTags.html""" """https://docs.aws.amazon.com/kms/latest/APIReference/API_ListResourceTags.html"""
key_id = self.parameters.get("KeyId") key_id = self.parameters.get("KeyId")
self._validate_cmk_id(key_id) self._validate_cmk_id(key_id)
tags = self.kms_backend.list_resource_tags(key_id) tags = self.kms_backend.list_resource_tags(key_id)
return json.dumps({"Tags": tags, "NextMarker": None, "Truncated": False}) tags.update({"NextMarker": None, "Truncated": False})
return json.dumps(tags)
def describe_key(self): def describe_key(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html""" """https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html"""

View File

@ -318,7 +318,7 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
# KMS # KMS
def get_kms_tags(kms_key_id): def get_kms_tags(kms_key_id):
result = [] result = []
for tag in self.kms_backend.list_resource_tags(kms_key_id): for tag in self.kms_backend.list_resource_tags(kms_key_id).get("Tags", []):
result.append({"Key": tag["TagKey"], "Value": tag["TagValue"]}) result.append({"Key": tag["TagKey"], "Value": tag["TagValue"]})
return result return result

View File

@ -1,8 +1,13 @@
import datetime
import json import json
import time
from boto3 import Session
from moto.core.exceptions import InvalidNextTokenException from moto.core.exceptions import InvalidNextTokenException
from moto.core.models import ConfigQueryModel from moto.core.models import ConfigQueryModel
from moto.s3 import s3_backends from moto.s3 import s3_backends
from moto.s3.models import get_moto_s3_account_id
class S3ConfigQuery(ConfigQueryModel): class S3ConfigQuery(ConfigQueryModel):
@ -118,4 +123,146 @@ class S3ConfigQuery(ConfigQueryModel):
return config_data return config_data
class S3AccountPublicAccessBlockConfigQuery(ConfigQueryModel):
def list_config_service_resources(
self,
resource_ids,
resource_name,
limit,
next_token,
backend_region=None,
resource_region=None,
):
# For the Account Public Access Block, they are the same for all regions. The resource ID is the AWS account ID
# There is no resource name -- it should be a blank string "" if provided.
# The resource name can only ever be None or an empty string:
if resource_name is not None and resource_name != "":
return [], None
pab = None
account_id = get_moto_s3_account_id()
regions = [region for region in Session().get_available_regions("config")]
# If a resource ID was passed in, then filter accordingly:
if resource_ids:
for id in resource_ids:
if account_id == id:
pab = self.backends["global"].account_public_access_block
break
# Otherwise, just grab the one from the backend:
if not resource_ids:
pab = self.backends["global"].account_public_access_block
# If it's not present, then return nothing
if not pab:
return [], None
# Filter on regions (and paginate on them as well):
if backend_region:
pab_list = [backend_region]
elif resource_region:
# Invalid region?
if resource_region not in regions:
return [], None
pab_list = [resource_region]
# Aggregated query where no regions were supplied so return them all:
else:
pab_list = regions
# Pagination logic:
sorted_regions = sorted(pab_list)
new_token = None
# Get the start:
if not next_token:
start = 0
else:
# Tokens for this moto feature is just the region-name:
# For OTHER non-global resource types, it's the region concatenated with the resource ID.
if next_token not in sorted_regions:
raise InvalidNextTokenException()
start = sorted_regions.index(next_token)
# Get the list of items to collect:
pab_list = sorted_regions[start : (start + limit)]
if len(sorted_regions) > (start + limit):
new_token = sorted_regions[start + limit]
return (
[
{
"type": "AWS::S3::AccountPublicAccessBlock",
"id": account_id,
"region": region,
}
for region in pab_list
],
new_token,
)
def get_config_resource(
self, resource_id, resource_name=None, backend_region=None, resource_region=None
):
# Do we even have this defined?
if not self.backends["global"].account_public_access_block:
return None
# Resource name can only ever be "" if it's supplied:
if resource_name is not None and resource_name != "":
return None
# Are we filtering based on region?
account_id = get_moto_s3_account_id()
regions = [region for region in Session().get_available_regions("config")]
# Is the resource ID correct?:
if account_id == resource_id:
if backend_region:
pab_region = backend_region
# Invalid region?
elif resource_region not in regions:
return None
else:
pab_region = resource_region
else:
return None
# Format the PAB to the AWS Config format:
creation_time = datetime.datetime.utcnow()
config_data = {
"version": "1.3",
"accountId": account_id,
"configurationItemCaptureTime": str(creation_time),
"configurationItemStatus": "OK",
"configurationStateId": str(
int(time.mktime(creation_time.timetuple()))
), # PY2 and 3 compatible
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": account_id,
"awsRegion": pab_region,
"availabilityZone": "Not Applicable",
"configuration": self.backends[
"global"
].account_public_access_block.to_config_dict(),
"supplementaryConfiguration": {},
}
# The 'configuration' field is also a JSON string:
config_data["configuration"] = json.dumps(config_data["configuration"])
return config_data
s3_config_query = S3ConfigQuery(s3_backends) s3_config_query = S3ConfigQuery(s3_backends)
s3_account_public_access_block_query = S3AccountPublicAccessBlockConfigQuery(
s3_backends
)

View File

@ -359,3 +359,12 @@ class InvalidPublicAccessBlockConfiguration(S3ClientError):
*args, *args,
**kwargs **kwargs
) )
class WrongPublicAccessBlockAccountIdError(S3ClientError):
code = 403
def __init__(self):
super(WrongPublicAccessBlockAccountIdError, self).__init__(
"AccessDenied", "Access Denied"
)

View File

@ -19,7 +19,7 @@ import uuid
import six import six
from bisect import insort from bisect import insort
from moto.core import BaseBackend, BaseModel from moto.core import ACCOUNT_ID, BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from .exceptions import ( from .exceptions import (
BucketAlreadyExists, BucketAlreadyExists,
@ -37,6 +37,7 @@ from .exceptions import (
CrossLocationLoggingProhibitted, CrossLocationLoggingProhibitted,
NoSuchPublicAccessBlockConfiguration, NoSuchPublicAccessBlockConfiguration,
InvalidPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration,
WrongPublicAccessBlockAccountIdError,
) )
from .utils import clean_key_name, _VersionedKeyStore from .utils import clean_key_name, _VersionedKeyStore
@ -58,6 +59,13 @@ DEFAULT_TEXT_ENCODING = sys.getdefaultencoding()
OWNER = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a" OWNER = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a"
def get_moto_s3_account_id():
"""This makes it easy for mocking AWS Account IDs when using AWS Config
-- Simply mock.patch the ACCOUNT_ID here, and Config gets it for free.
"""
return ACCOUNT_ID
class FakeDeleteMarker(BaseModel): class FakeDeleteMarker(BaseModel):
def __init__(self, key): def __init__(self, key):
self.key = key self.key = key
@ -1163,6 +1171,7 @@ class FakeBucket(BaseModel):
class S3Backend(BaseBackend): class S3Backend(BaseBackend):
def __init__(self): def __init__(self):
self.buckets = {} self.buckets = {}
self.account_public_access_block = None
def create_bucket(self, bucket_name, region_name): def create_bucket(self, bucket_name, region_name):
if bucket_name in self.buckets: if bucket_name in self.buckets:
@ -1264,6 +1273,16 @@ class S3Backend(BaseBackend):
return bucket.public_access_block return bucket.public_access_block
def get_account_public_access_block(self, account_id):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
if not self.account_public_access_block:
raise NoSuchPublicAccessBlockConfiguration()
return self.account_public_access_block
def set_key( def set_key(
self, bucket_name, key_name, value, storage=None, etag=None, multipart=None self, bucket_name, key_name, value, storage=None, etag=None, multipart=None
): ):
@ -1356,6 +1375,13 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
bucket.public_access_block = None bucket.public_access_block = None
def delete_account_public_access_block(self, account_id):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
self.account_public_access_block = None
def put_bucket_notification_configuration(self, bucket_name, notification_config): def put_bucket_notification_configuration(self, bucket_name, notification_config):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
bucket.set_notification_configuration(notification_config) bucket.set_notification_configuration(notification_config)
@ -1384,6 +1410,21 @@ class S3Backend(BaseBackend):
pub_block_config.get("RestrictPublicBuckets"), pub_block_config.get("RestrictPublicBuckets"),
) )
def put_account_public_access_block(self, account_id, pub_block_config):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
if not pub_block_config:
raise InvalidPublicAccessBlockConfiguration()
self.account_public_access_block = PublicAccessBlock(
pub_block_config.get("BlockPublicAcls"),
pub_block_config.get("IgnorePublicAcls"),
pub_block_config.get("BlockPublicPolicy"),
pub_block_config.get("RestrictPublicBuckets"),
)
def initiate_multipart(self, bucket_name, key_name, metadata): def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata) new_multipart = FakeMultipart(key_name, metadata)

View File

@ -4,6 +4,7 @@ import re
import sys import sys
import six import six
from botocore.awsrequest import AWSPreparedRequest
from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys
from six.moves.urllib.parse import parse_qs, urlparse, unquote from six.moves.urllib.parse import parse_qs, urlparse, unquote
@ -123,6 +124,11 @@ ACTION_MAP = {
"uploadId": "PutObject", "uploadId": "PutObject",
}, },
}, },
"CONTROL": {
"GET": {"publicAccessBlock": "GetPublicAccessBlock"},
"PUT": {"publicAccessBlock": "PutPublicAccessBlock"},
"DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"},
},
} }
@ -168,7 +174,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
or host.startswith("localhost") or host.startswith("localhost")
or host.startswith("localstack") or host.startswith("localstack")
or re.match(r"^[^.]+$", host) or re.match(r"^[^.]+$", host)
or re.match(r"^.*\.svc\.cluster\.local$", host) or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host)
): ):
# Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev), # Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev),
# (3) local host names that do not contain a "." (e.g., Docker container host names), or # (3) local host names that do not contain a "." (e.g., Docker container host names), or
@ -220,7 +226,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
# Depending on which calling format the client is using, we don't know # Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check # if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request): if self.subdomain_based_buckets(request):
return self.key_response(request, full_url, headers) return self.key_or_control_response(request, full_url, headers)
else: else:
# Using path-based buckets # Using path-based buckets
return self.bucket_response(request, full_url, headers) return self.bucket_response(request, full_url, headers)
@ -287,7 +293,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
return self._bucket_response_post(request, body, bucket_name) return self._bucket_response_post(request, body, bucket_name)
else: else:
raise NotImplementedError( raise NotImplementedError(
"Method {0} has not been impelemented in the S3 backend yet".format( "Method {0} has not been implemented in the S3 backend yet".format(
method method
) )
) )
@ -595,6 +601,20 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
pass pass
return False return False
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
return parsed_xml
def _bucket_response_put( def _bucket_response_put(
self, request, body, region_name, bucket_name, querystring self, request, body, region_name, bucket_name, querystring
): ):
@ -673,19 +693,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
raise e raise e
elif "publicAccessBlock" in querystring: elif "publicAccessBlock" in querystring:
parsed_xml = xmltodict.parse(body) pab_config = self._parse_pab_config(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
self.backend.put_bucket_public_access_block( self.backend.put_bucket_public_access_block(
bucket_name, parsed_xml["PublicAccessBlockConfiguration"] bucket_name, pab_config["PublicAccessBlockConfiguration"]
) )
return "" return ""
@ -870,15 +880,21 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
) )
return 206, response_headers, response_content[begin : end + 1] return 206, response_headers, response_content[begin : end + 1]
def key_response(self, request, full_url, headers): def key_or_control_response(self, request, full_url, headers):
# Key and Control are lumped in because splitting out the regex is too much of a pain :/
self.method = request.method self.method = request.method
self.path = self._get_path(request) self.path = self._get_path(request)
self.headers = request.headers self.headers = request.headers
if "host" not in self.headers: if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc self.headers["host"] = urlparse(full_url).netloc
response_headers = {} response_headers = {}
try: try:
response = self._key_response(request, full_url, headers) # Is this an S3 control response?
if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url:
response = self._control_response(request, full_url, headers)
else:
response = self._key_response(request, full_url, headers)
except S3ClientError as s3error: except S3ClientError as s3error:
response = s3error.code, {}, s3error.description response = s3error.code, {}, s3error.description
@ -894,6 +910,94 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
) )
return status_code, response_headers, response_content return status_code, response_headers, response_content
def _control_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b""
if method == "GET":
return self._control_response_get(request, query, headers)
elif method == "PUT":
return self._control_response_put(request, body, query, headers)
elif method == "DELETE":
return self._control_response_delete(request, query, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _control_response_get(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "GET", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
public_block_config = self.backend.get_account_public_access_block(
headers["x-amz-account-id"]
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return (
200,
response_headers,
template.render(public_block_config=public_block_config),
)
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_put(self, request, body, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "PUT", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
pab_config = self._parse_pab_config(body)
self.backend.put_account_public_access_block(
headers["x-amz-account-id"],
pab_config["PublicAccessBlockConfiguration"],
)
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_delete(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "DELETE", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
self.backend.delete_account_public_access_block(headers["x-amz-account-id"])
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _key_response(self, request, full_url, headers): def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url) parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True) query = parse_qs(parsed_url.query, keep_blank_values=True)
@ -1098,6 +1202,10 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if mdirective is not None and mdirective == "REPLACE": if mdirective is not None and mdirective == "REPLACE":
metadata = metadata_from_headers(request.headers) metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True) new_key.set_metadata(metadata, replace=True)
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
new_key.set_tagging(tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE) template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict) response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key) return 200, response_headers, template.render(key=new_key)

View File

@ -13,7 +13,7 @@ url_paths = {
# subdomain key of path-based bucket # subdomain key of path-based bucket
"{0}/(?P<key_or_bucket_name>[^/]+)/?$": S3ResponseInstance.ambiguous_response, "{0}/(?P<key_or_bucket_name>[^/]+)/?$": S3ResponseInstance.ambiguous_response,
# path-based bucket + key # path-based bucket + key
"{0}/(?P<bucket_name_path>[^/]+)/(?P<key_name>.+)": S3ResponseInstance.key_response, "{0}/(?P<bucket_name_path>[^/]+)/(?P<key_name>.+)": S3ResponseInstance.key_or_control_response,
# subdomain bucket + key with empty first part of path # subdomain bucket + key with empty first part of path
"{0}//(?P<key_name>.*)$": S3ResponseInstance.key_response, "{0}//(?P<key_name>.*)$": S3ResponseInstance.key_or_control_response,
} }

View File

@ -127,6 +127,10 @@ class WorkflowExecution(BaseModel):
"executionInfo": self.to_medium_dict(), "executionInfo": self.to_medium_dict(),
"executionConfiguration": {"taskList": {"name": self.task_list}}, "executionConfiguration": {"taskList": {"name": self.task_list}},
} }
# info
if self.execution_status == "CLOSED":
hsh["executionInfo"]["closeStatus"] = self.close_status
hsh["executionInfo"]["closeTimestamp"] = self.close_timestamp
# configuration # configuration
for key in self._configuration_keys: for key in self._configuration_keys:
attr = camelcase_to_underscores(key) attr = camelcase_to_underscores(key)

View File

@ -8,6 +8,8 @@ class WorkflowType(GenericType):
"defaultChildPolicy", "defaultChildPolicy",
"defaultExecutionStartToCloseTimeout", "defaultExecutionStartToCloseTimeout",
"defaultTaskStartToCloseTimeout", "defaultTaskStartToCloseTimeout",
"defaultTaskPriority",
"defaultLambdaRole",
] ]
@property @property

View File

@ -300,6 +300,8 @@ class SWFResponse(BaseResponse):
default_execution_start_to_close_timeout = self._params.get( default_execution_start_to_close_timeout = self._params.get(
"defaultExecutionStartToCloseTimeout" "defaultExecutionStartToCloseTimeout"
) )
default_task_priority = self._params.get("defaultTaskPriority")
default_lambda_role = self._params.get("defaultLambdaRole")
description = self._params.get("description") description = self._params.get("description")
self._check_string(domain) self._check_string(domain)
@ -309,10 +311,10 @@ class SWFResponse(BaseResponse):
self._check_none_or_string(default_child_policy) self._check_none_or_string(default_child_policy)
self._check_none_or_string(default_task_start_to_close_timeout) self._check_none_or_string(default_task_start_to_close_timeout)
self._check_none_or_string(default_execution_start_to_close_timeout) self._check_none_or_string(default_execution_start_to_close_timeout)
self._check_none_or_string(default_task_priority)
self._check_none_or_string(default_lambda_role)
self._check_none_or_string(description) self._check_none_or_string(description)
# TODO: add defaultTaskPriority when boto gets to support it
# TODO: add defaultLambdaRole when boto gets to support it
self.swf_backend.register_type( self.swf_backend.register_type(
"workflow", "workflow",
domain, domain,
@ -322,6 +324,8 @@ class SWFResponse(BaseResponse):
default_child_policy=default_child_policy, default_child_policy=default_child_policy,
default_task_start_to_close_timeout=default_task_start_to_close_timeout, default_task_start_to_close_timeout=default_task_start_to_close_timeout,
default_execution_start_to_close_timeout=default_execution_start_to_close_timeout, default_execution_start_to_close_timeout=default_execution_start_to_close_timeout,
default_task_priority=default_task_priority,
default_lambda_role=default_lambda_role,
description=description, description=description,
) )
return "" return ""

View File

View File

@ -0,0 +1,62 @@
class TaggingService:
def __init__(self, tagName="Tags", keyName="Key", valueName="Value"):
self.tagName = tagName
self.keyName = keyName
self.valueName = valueName
self.tags = {}
def list_tags_for_resource(self, arn):
result = []
if arn in self.tags:
for k, v in self.tags[arn].items():
result.append({self.keyName: k, self.valueName: v})
return {self.tagName: result}
def delete_all_tags_for_resource(self, arn):
del self.tags[arn]
def has_tags(self, arn):
return arn in self.tags
def tag_resource(self, arn, tags):
if arn not in self.tags:
self.tags[arn] = {}
for t in tags:
if self.valueName in t:
self.tags[arn][t[self.keyName]] = t[self.valueName]
else:
self.tags[arn][t[self.keyName]] = None
def untag_resource_using_names(self, arn, tag_names):
for name in tag_names:
if name in self.tags.get(arn, {}):
del self.tags[arn][name]
def untag_resource_using_tags(self, arn, tags):
m = self.tags.get(arn, {})
for t in tags:
if self.keyName in t:
if t[self.keyName] in m:
if self.valueName in t:
if m[t[self.keyName]] != t[self.valueName]:
continue
# If both key and value are provided, match both before deletion
del m[t[self.keyName]]
def extract_tag_names(self, tags):
results = []
if len(tags) == 0:
return results
for tag in tags:
if self.keyName in tag:
results.append(tag[self.keyName])
return results
def flatten_tag_list(self, tags):
result = {}
for t in tags:
if self.valueName in t:
result[t[self.keyName]] = t[self.valueName]
else:
result[t[self.keyName]] = None
return result

View File

@ -8,7 +8,7 @@ import sure # noqa
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
import responses import responses
from moto import mock_apigateway, settings from moto import mock_apigateway, mock_cognitoidp, settings
from moto.core import ACCOUNT_ID from moto.core import ACCOUNT_ID
from nose.tools import assert_raises from nose.tools import assert_raises
@ -204,12 +204,7 @@ def test_create_resource():
root_resource["ResponseMetadata"].pop("HTTPHeaders", None) root_resource["ResponseMetadata"].pop("HTTPHeaders", None)
root_resource["ResponseMetadata"].pop("RetryAttempts", None) root_resource["ResponseMetadata"].pop("RetryAttempts", None)
root_resource.should.equal( root_resource.should.equal(
{ {"path": "/", "id": root_id, "ResponseMetadata": {"HTTPStatusCode": 200},}
"path": "/",
"id": root_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
"resourceMethods": {"GET": {}},
}
) )
client.create_resource(restApiId=api_id, parentId=root_id, pathPart="users") client.create_resource(restApiId=api_id, parentId=root_id, pathPart="users")
@ -257,7 +252,6 @@ def test_child_resource():
"parentId": users_id, "parentId": users_id,
"id": tags_id, "id": tags_id,
"ResponseMetadata": {"HTTPStatusCode": 200}, "ResponseMetadata": {"HTTPStatusCode": 200},
"resourceMethods": {"GET": {}},
} }
) )
@ -582,6 +576,254 @@ def test_integration_response():
response["methodIntegration"]["integrationResponses"].should.equal({}) response["methodIntegration"]["integrationResponses"].should.equal({})
@mock_apigateway
@mock_cognitoidp
def test_update_authorizer_configuration():
client = boto3.client("apigateway", region_name="us-west-2")
authorizer_name = "my_authorizer"
response = client.create_rest_api(name="my_api", description="this is my api")
api_id = response["id"]
cognito_client = boto3.client("cognito-idp", region_name="us-west-2")
user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[
"UserPool"
]["Arn"]
response = client.create_authorizer(
restApiId=api_id,
name=authorizer_name,
type="COGNITO_USER_POOLS",
providerARNs=[user_pool_arn],
identitySource="method.request.header.Authorization",
)
authorizer_id = response["id"]
response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id)
# createdDate is hard to match against, remove it
response.pop("createdDate", None)
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response.should.equal(
{
"id": authorizer_id,
"name": authorizer_name,
"type": "COGNITO_USER_POOLS",
"providerARNs": [user_pool_arn],
"identitySource": "method.request.header.Authorization",
"authorizerResultTtlInSeconds": 300,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
)
client.update_authorizer(
restApiId=api_id,
authorizerId=authorizer_id,
patchOperations=[{"op": "replace", "path": "/type", "value": "TOKEN"}],
)
authorizer = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id)
authorizer.should.have.key("type").which.should.equal("TOKEN")
client.update_authorizer(
restApiId=api_id,
authorizerId=authorizer_id,
patchOperations=[{"op": "replace", "path": "/type", "value": "REQUEST"}],
)
authorizer = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id)
authorizer.should.have.key("type").which.should.equal("REQUEST")
# TODO: implement mult-update tests
try:
client.update_authorizer(
restApiId=api_id,
authorizerId=authorizer_id,
patchOperations=[
{"op": "add", "path": "/notasetting", "value": "eu-west-1"}
],
)
assert False.should.be.ok # Fail, should not be here
except Exception:
assert True.should.be.ok
@mock_apigateway
def test_non_existent_authorizer():
client = boto3.client("apigateway", region_name="us-west-2")
response = client.create_rest_api(name="my_api", description="this is my api")
api_id = response["id"]
client.get_authorizer.when.called_with(
restApiId=api_id, authorizerId="xxx"
).should.throw(ClientError)
@mock_apigateway
@mock_cognitoidp
def test_create_authorizer():
client = boto3.client("apigateway", region_name="us-west-2")
authorizer_name = "my_authorizer"
response = client.create_rest_api(name="my_api", description="this is my api")
api_id = response["id"]
cognito_client = boto3.client("cognito-idp", region_name="us-west-2")
user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[
"UserPool"
]["Arn"]
response = client.create_authorizer(
restApiId=api_id,
name=authorizer_name,
type="COGNITO_USER_POOLS",
providerARNs=[user_pool_arn],
identitySource="method.request.header.Authorization",
)
authorizer_id = response["id"]
response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id)
# createdDate is hard to match against, remove it
response.pop("createdDate", None)
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response.should.equal(
{
"id": authorizer_id,
"name": authorizer_name,
"type": "COGNITO_USER_POOLS",
"providerARNs": [user_pool_arn],
"identitySource": "method.request.header.Authorization",
"authorizerResultTtlInSeconds": 300,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
)
authorizer_name2 = "my_authorizer2"
response = client.create_authorizer(
restApiId=api_id,
name=authorizer_name2,
type="COGNITO_USER_POOLS",
providerARNs=[user_pool_arn],
identitySource="method.request.header.Authorization",
)
authorizer_id2 = response["id"]
response = client.get_authorizers(restApiId=api_id)
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response["items"][0]["id"].should.match(
r"{0}|{1}".format(authorizer_id2, authorizer_id)
)
response["items"][1]["id"].should.match(
r"{0}|{1}".format(authorizer_id2, authorizer_id)
)
new_authorizer_name_with_vars = "authorizer_with_vars"
response = client.create_authorizer(
restApiId=api_id,
name=new_authorizer_name_with_vars,
type="COGNITO_USER_POOLS",
providerARNs=[user_pool_arn],
identitySource="method.request.header.Authorization",
)
authorizer_id3 = response["id"]
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response.should.equal(
{
"name": new_authorizer_name_with_vars,
"id": authorizer_id3,
"type": "COGNITO_USER_POOLS",
"providerARNs": [user_pool_arn],
"identitySource": "method.request.header.Authorization",
"authorizerResultTtlInSeconds": 300,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
)
stage = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id3)
stage["name"].should.equal(new_authorizer_name_with_vars)
stage["id"].should.equal(authorizer_id3)
stage["type"].should.equal("COGNITO_USER_POOLS")
stage["providerARNs"].should.equal([user_pool_arn])
stage["identitySource"].should.equal("method.request.header.Authorization")
stage["authorizerResultTtlInSeconds"].should.equal(300)
@mock_apigateway
@mock_cognitoidp
def test_delete_authorizer():
client = boto3.client("apigateway", region_name="us-west-2")
authorizer_name = "my_authorizer"
response = client.create_rest_api(name="my_api", description="this is my api")
api_id = response["id"]
cognito_client = boto3.client("cognito-idp", region_name="us-west-2")
user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[
"UserPool"
]["Arn"]
response = client.create_authorizer(
restApiId=api_id,
name=authorizer_name,
type="COGNITO_USER_POOLS",
providerARNs=[user_pool_arn],
identitySource="method.request.header.Authorization",
)
authorizer_id = response["id"]
response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id)
# createdDate is hard to match against, remove it
response.pop("createdDate", None)
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response.should.equal(
{
"id": authorizer_id,
"name": authorizer_name,
"type": "COGNITO_USER_POOLS",
"providerARNs": [user_pool_arn],
"identitySource": "method.request.header.Authorization",
"authorizerResultTtlInSeconds": 300,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
)
authorizer_name2 = "my_authorizer2"
response = client.create_authorizer(
restApiId=api_id,
name=authorizer_name2,
type="COGNITO_USER_POOLS",
providerARNs=[user_pool_arn],
identitySource="method.request.header.Authorization",
)
authorizer_id2 = response["id"]
authorizers = client.get_authorizers(restApiId=api_id)["items"]
sorted([authorizer["name"] for authorizer in authorizers]).should.equal(
sorted([authorizer_name2, authorizer_name])
)
# delete stage
response = client.delete_authorizer(restApiId=api_id, authorizerId=authorizer_id2)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(202)
# verify other stage still exists
authorizers = client.get_authorizers(restApiId=api_id)["items"]
sorted([authorizer["name"] for authorizer in authorizers]).should.equal(
sorted([authorizer_name])
)
@mock_apigateway @mock_apigateway
def test_update_stage_configuration(): def test_update_stage_configuration():
client = boto3.client("apigateway", region_name="us-west-2") client = boto3.client("apigateway", region_name="us-west-2")

View File

@ -150,7 +150,7 @@ def test_invoke_requestresponse_function_with_arn():
Payload=json.dumps(in_data), Payload=json.dumps(in_data),
) )
success_result["StatusCode"].should.equal(202) success_result["StatusCode"].should.equal(200)
result_obj = json.loads( result_obj = json.loads(
base64.b64decode(success_result["LogResult"]).decode("utf-8") base64.b64decode(success_result["LogResult"]).decode("utf-8")
) )
@ -1161,7 +1161,7 @@ def test_invoke_function_from_sqs():
@mock_logs @mock_logs
@mock_lambda @mock_lambda
@mock_dynamodb2 @mock_dynamodb2
def test_invoke_function_from_dynamodb(): def test_invoke_function_from_dynamodb_put():
logs_conn = boto3.client("logs", region_name="us-east-1") logs_conn = boto3.client("logs", region_name="us-east-1")
dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb = boto3.client("dynamodb", region_name="us-east-1")
table_name = "table_with_stream" table_name = "table_with_stream"
@ -1218,6 +1218,72 @@ def test_invoke_function_from_dynamodb():
assert False, "Test Failed" assert False, "Test Failed"
@mock_logs
@mock_lambda
@mock_dynamodb2
def test_invoke_function_from_dynamodb_update():
logs_conn = boto3.client("logs", region_name="us-east-1")
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
table_name = "table_with_stream"
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
StreamSpecification={
"StreamEnabled": True,
"StreamViewType": "NEW_AND_OLD_IMAGES",
},
)
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file3()},
Description="test lambda function executed after a DynamoDB table is updated",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=table["TableDescription"]["LatestStreamArn"],
FunctionName=func["FunctionArn"],
)
assert response["EventSourceArn"] == table["TableDescription"]["LatestStreamArn"]
assert response["State"] == "Enabled"
dynamodb.update_item(
TableName=table_name,
Key={"id": {"S": "item 1"}},
UpdateExpression="set #attr = :val",
ExpressionAttributeNames={"#attr": "new_attr"},
ExpressionAttributeValues={":val": {"S": "new_val"}},
)
start = time.time()
while (time.time() - start) < 30:
result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
assert len(log_streams) == 1
result = logs_conn.get_log_events(
logGroupName="/aws/lambda/testFunction",
logStreamName=log_streams[0]["logStreamName"],
)
for event in result.get("events"):
if event["message"] == "get_test_zip_file3 success":
return
time.sleep(1)
assert False, "Test Failed"
@mock_logs @mock_logs
@mock_lambda @mock_lambda
@mock_sqs @mock_sqs

View File

@ -27,6 +27,11 @@ def test_create_user_pool():
result["UserPool"]["Id"].should_not.be.none result["UserPool"]["Id"].should_not.be.none
result["UserPool"]["Id"].should.match(r"[\w-]+_[0-9a-zA-Z]+") result["UserPool"]["Id"].should.match(r"[\w-]+_[0-9a-zA-Z]+")
result["UserPool"]["Arn"].should.equal(
"arn:aws:cognito-idp:us-west-2:{}:userpool/{}".format(
ACCOUNT_ID, result["UserPool"]["Id"]
)
)
result["UserPool"]["Name"].should.equal(name) result["UserPool"]["Name"].should.equal(name)
result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value)
@ -911,6 +916,55 @@ def test_admin_create_existing_user():
caught.should.be.true caught.should.be.true
@mock_cognitoidp
def test_admin_resend_invitation_existing_user():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[{"Name": "thing", "Value": value}],
)
caught = False
try:
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[{"Name": "thing", "Value": value}],
MessageAction="RESEND",
)
except conn.exceptions.UsernameExistsException:
caught = True
caught.should.be.false
@mock_cognitoidp
def test_admin_resend_invitation_missing_user():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
caught = False
try:
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[{"Name": "thing", "Value": value}],
MessageAction="RESEND",
)
except conn.exceptions.UserNotFoundException:
caught = True
caught.should.be.true
@mock_cognitoidp @mock_cognitoidp
def test_admin_get_user(): def test_admin_get_user():
conn = boto3.client("cognito-idp", "us-west-2") conn = boto3.client("cognito-idp", "us-west-2")

View File

@ -46,4 +46,4 @@ def test_domain_dispatched_with_service():
dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") dispatcher = DomainDispatcherApplication(create_backend_app, service="s3")
backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"}) backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"})
keys = set(backend_app.view_functions.keys()) keys = set(backend_app.view_functions.keys())
keys.should.contain("ResponseObject.key_response") keys.should.contain("ResponseObject.key_or_control_response")

View File

@ -752,7 +752,9 @@ def test_steps():
# StateChangeReason # StateChangeReason
x["Status"]["Timeline"]["CreationDateTime"].should.be.a("datetime.datetime") x["Status"]["Timeline"]["CreationDateTime"].should.be.a("datetime.datetime")
# x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime')
# x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') # Only the first step will have started - we don't know anything about when it finishes, so the second step never starts
if x["Name"] == "My wordcount example":
x["Status"]["Timeline"]["StartDateTime"].should.be.a("datetime.datetime")
x = client.describe_step(ClusterId=cluster_id, StepId=x["Id"])["Step"] x = client.describe_step(ClusterId=cluster_id, StepId=x["Id"])["Step"]
x["ActionOnFailure"].should.equal("TERMINATE_CLUSTER") x["ActionOnFailure"].should.equal("TERMINATE_CLUSTER")

View File

@ -1,11 +1,14 @@
import random from moto.events.models import EventsBackend
import boto3
import json
import sure # noqa
from moto.events import mock_events from moto.events import mock_events
import json
import random
import unittest
import boto3
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from moto.core.exceptions import JsonRESTError
from nose.tools import assert_raises from nose.tools import assert_raises
from moto.core import ACCOUNT_ID from moto.core import ACCOUNT_ID
RULES = [ RULES = [
@ -136,14 +139,6 @@ def test_list_rule_names_by_target():
assert rule in test_2_target["Rules"] assert rule in test_2_target["Rules"]
@mock_events
def test_list_rules():
client = generate_environment()
rules = client.list_rules()
assert len(rules["Rules"]) == len(RULES)
@mock_events @mock_events
def test_delete_rule(): def test_delete_rule():
client = generate_environment() client = generate_environment()
@ -461,3 +456,50 @@ def test_delete_event_bus_errors():
client.delete_event_bus.when.called_with(Name="default").should.throw( client.delete_event_bus.when.called_with(Name="default").should.throw(
ClientError, "Cannot delete event bus default." ClientError, "Cannot delete event bus default."
) )
@mock_events
def test_rule_tagging_happy():
client = generate_environment()
rule_name = get_random_rule()["Name"]
rule_arn = client.describe_rule(Name=rule_name).get("Arn")
tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}]
client.tag_resource(ResourceARN=rule_arn, Tags=tags)
actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags")
tc = unittest.TestCase("__init__")
expected = [{"Value": "value1", "Key": "key1"}, {"Value": "value2", "Key": "key2"}]
tc.assertTrue(
(expected[0] == actual[0] and expected[1] == actual[1])
or (expected[1] == actual[0] and expected[0] == actual[1])
)
client.untag_resource(ResourceARN=rule_arn, TagKeys=["key1"])
actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags")
expected = [{"Key": "key2", "Value": "value2"}]
assert expected == actual
@mock_events
def test_rule_tagging_sad():
back_end = EventsBackend("us-west-2")
try:
back_end.tag_resource("unknown", [])
raise "tag_resource should fail if ResourceARN is not known"
except JsonRESTError:
pass
try:
back_end.untag_resource("unknown", [])
raise "untag_resource should fail if ResourceARN is not known"
except JsonRESTError:
pass
try:
back_end.list_tags_for_resource("unknown")
raise "list_tags_for_resource should fail if ResourceARN is not known"
except JsonRESTError:
pass

View File

@ -9,6 +9,173 @@ from botocore.exceptions import ClientError
from nose.tools import assert_raises from nose.tools import assert_raises
@mock_iot
def test_attach_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
res["policies"][0]["policyName"].should.equal("my-policy")
@mock_iot
def test_detach_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
res["policies"][0]["policyName"].should.equal("my-policy")
client.detach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.be.empty
@mock_iot
def test_list_attached_policies():
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=True)
policies = client.list_attached_policies(target=cert["certificateArn"])
policies["policies"].should.be.empty
@mock_iot
def test_policy_versions():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
policy = client.create_policy(policyName=policy_name, policyDocument=doc)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(json.dumps({}))
policy.should.have.key("policyVersionId").which.should.equal("1")
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(json.dumps({}))
policy.should.have.key("defaultVersionId").which.should.equal(
policy["defaultVersionId"]
)
policy1 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_1"}),
setAsDefault=True,
)
policy1.should.have.key("policyArn").which.should_not.be.none
policy1.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy1.should.have.key("policyVersionId").which.should.equal("2")
policy1.should.have.key("isDefaultVersion").which.should.equal(True)
policy2 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_2"}),
setAsDefault=False,
)
policy2.should.have.key("policyArn").which.should_not.be.none
policy2.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_2"})
)
policy2.should.have.key("policyVersionId").which.should.equal("3")
policy2.should.have.key("isDefaultVersion").which.should.equal(False)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy1["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(3)
list(
map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
).count(True).should.equal(1)
default_policy = list(
filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
)
default_policy[0].should.have.key("versionId").should.equal(
policy1["policyVersionId"]
)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy1["policyVersionId"]
)
client.set_default_policy_version(
policyName=policy_name, policyVersionId=policy2["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(3)
list(
map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
).count(True).should.equal(1)
default_policy = list(
filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
)
default_policy[0].should.have.key("versionId").should.equal(
policy2["policyVersionId"]
)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_2"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy2["policyVersionId"]
)
client.delete_policy_version(policyName=policy_name, policyVersionId="1")
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(2)
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy1["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(1)
# should fail as it"s the default policy. Should use delete_policy instead
try:
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy2["policyVersionId"]
)
assert False, "Should have failed in previous call"
except Exception as exception:
exception.response["Error"]["Message"].should.equal(
"Cannot delete the default version of a policy"
)
@mock_iot @mock_iot
def test_things(): def test_things():
client = boto3.client("iot", region_name="ap-northeast-1") client = boto3.client("iot", region_name="ap-northeast-1")
@ -994,7 +1161,10 @@ def test_create_job():
client = boto3.client("iot", region_name="eu-west-1") client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing" name = "my-thing"
job_id = "TestJob" job_id = "TestJob"
# thing # thing# job document
# job_document = {
# "field": "value"
# }
thing = client.create_thing(thingName=name) thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name) thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn") thing.should.have.key("thingArn")
@ -1020,6 +1190,63 @@ def test_create_job():
job.should.have.key("description") job.should.have.key("description")
@mock_iot
def test_list_jobs():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing# job document
# job_document = {
# "field": "value"
# }
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job1 = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job1.should.have.key("jobId").which.should.equal(job_id)
job1.should.have.key("jobArn")
job1.should.have.key("description")
job2 = client.create_job(
jobId=job_id + "1",
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job2.should.have.key("jobId").which.should.equal(job_id + "1")
job2.should.have.key("jobArn")
job2.should.have.key("description")
jobs = client.list_jobs()
jobs.should.have.key("jobs")
jobs.should_not.have.key("nextToken")
jobs["jobs"][0].should.have.key("jobId").which.should.equal(job_id)
jobs["jobs"][1].should.have.key("jobId").which.should.equal(job_id + "1")
@mock_iot @mock_iot
def test_describe_job(): def test_describe_job():
client = boto3.client("iot", region_name="eu-west-1") client = boto3.client("iot", region_name="eu-west-1")
@ -1124,3 +1351,387 @@ def test_describe_job_1():
job.should.have.key("job").which.should.have.key( job.should.have.key("job").which.should.have.key(
"jobExecutionsRolloutConfig" "jobExecutionsRolloutConfig"
).which.should.have.key("maximumPerMinute").which.should.equal(10) ).which.should.have.key("maximumPerMinute").which.should.equal(10)
@mock_iot
def test_delete_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
client.delete_job(jobId=job_id)
client.list_jobs()["jobs"].should.have.length_of(0)
@mock_iot
def test_cancel_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
job = client.cancel_job(jobId=job_id, reasonCode="Because", comment="You are")
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("job").which.should.have.key("status").which.should.equal(
"CANCELED"
)
job.should.have.key("job").which.should.have.key(
"forceCanceled"
).which.should.equal(False)
job.should.have.key("job").which.should.have.key("reasonCode").which.should.equal(
"Because"
)
job.should.have.key("job").which.should.have.key("comment").which.should.equal(
"You are"
)
@mock_iot
def test_get_job_document_with_document_source():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job_document = client.get_job_document(jobId=job_id)
job_document.should.have.key("document").which.should.equal("")
@mock_iot
def test_get_job_document_with_document():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job_document = client.get_job_document(jobId=job_id)
job_document.should.have.key("document").which.should.equal('{"field": "value"}')
@mock_iot
def test_describe_job_execution():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
job_execution = client.describe_job_execution(jobId=job_id, thingName=name)
job_execution.should.have.key("execution")
job_execution["execution"].should.have.key("jobId").which.should.equal(job_id)
job_execution["execution"].should.have.key("status").which.should.equal("QUEUED")
job_execution["execution"].should.have.key("forceCanceled").which.should.equal(
False
)
job_execution["execution"].should.have.key("statusDetails").which.should.equal(
{"detailsMap": {}}
)
job_execution["execution"].should.have.key("thingArn").which.should.equal(
thing["thingArn"]
)
job_execution["execution"].should.have.key("queuedAt")
job_execution["execution"].should.have.key("startedAt")
job_execution["execution"].should.have.key("lastUpdatedAt")
job_execution["execution"].should.have.key("executionNumber").which.should.equal(
123
)
job_execution["execution"].should.have.key("versionNumber").which.should.equal(123)
job_execution["execution"].should.have.key(
"approximateSecondsBeforeTimedOut"
).which.should.equal(123)
job_execution = client.describe_job_execution(
jobId=job_id, thingName=name, executionNumber=123
)
job_execution.should.have.key("execution")
job_execution["execution"].should.have.key("jobId").which.should.equal(job_id)
job_execution["execution"].should.have.key("status").which.should.equal("QUEUED")
job_execution["execution"].should.have.key("forceCanceled").which.should.equal(
False
)
job_execution["execution"].should.have.key("statusDetails").which.should.equal(
{"detailsMap": {}}
)
job_execution["execution"].should.have.key("thingArn").which.should.equal(
thing["thingArn"]
)
job_execution["execution"].should.have.key("queuedAt")
job_execution["execution"].should.have.key("startedAt")
job_execution["execution"].should.have.key("lastUpdatedAt")
job_execution["execution"].should.have.key("executionNumber").which.should.equal(
123
)
job_execution["execution"].should.have.key("versionNumber").which.should.equal(123)
job_execution["execution"].should.have.key(
"approximateSecondsBeforeTimedOut"
).which.should.equal(123)
try:
client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456)
except ClientError as exc:
error_code = exc.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
else:
raise Exception("Should have raised error")
@mock_iot
def test_cancel_job_execution():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
client.cancel_job_execution(jobId=job_id, thingName=name)
job_execution = client.describe_job_execution(jobId=job_id, thingName=name)
job_execution.should.have.key("execution")
job_execution["execution"].should.have.key("status").which.should.equal("CANCELED")
@mock_iot
def test_delete_job_execution():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123)
try:
client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123)
except ClientError as exc:
error_code = exc.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
else:
raise Exception("Should have raised error")
@mock_iot
def test_list_job_executions_for_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
job_execution = client.list_job_executions_for_job(jobId=job_id)
job_execution.should.have.key("executionSummaries")
job_execution["executionSummaries"][0].should.have.key(
"thingArn"
).which.should.equal(thing["thingArn"])
@mock_iot
def test_list_job_executions_for_thing():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
job_execution = client.list_job_executions_for_thing(thingName=name)
job_execution.should.have.key("executionSummaries")
job_execution["executionSummaries"][0].should.have.key("jobId").which.should.equal(
job_id
)

View File

@ -4,15 +4,17 @@ import base64
import re import re
import boto.kms import boto.kms
import boto3
import six import six
import sure # noqa import sure # noqa
from boto.exception import JSONResponseError from boto.exception import JSONResponseError
from boto.kms.exceptions import AlreadyExistsException, NotFoundException from boto.kms.exceptions import AlreadyExistsException, NotFoundException
from nose.tools import assert_raises from nose.tools import assert_raises
from parameterized import parameterized from parameterized import parameterized
from moto.core.exceptions import JsonRESTError
from moto.kms.models import KmsBackend
from moto.kms.exceptions import NotFoundException as MotoNotFoundException from moto.kms.exceptions import NotFoundException as MotoNotFoundException
from moto import mock_kms_deprecated from moto import mock_kms_deprecated, mock_kms
PLAINTEXT_VECTORS = ( PLAINTEXT_VECTORS = (
(b"some encodeable plaintext",), (b"some encodeable plaintext",),
@ -679,3 +681,77 @@ def test__assert_default_policy():
_assert_default_policy.when.called_with("default").should_not.throw( _assert_default_policy.when.called_with("default").should_not.throw(
MotoNotFoundException MotoNotFoundException
) )
if six.PY2:
sort = sorted
else:
sort = lambda l: sorted(l, key=lambda d: d.keys())
@mock_kms
def test_key_tag_on_create_key_happy():
client = boto3.client("kms", region_name="us-east-1")
tags = [
{"TagKey": "key1", "TagValue": "value1"},
{"TagKey": "key2", "TagValue": "value2"},
]
key = client.create_key(Description="test-key-tagging", Tags=tags)
key_id = key["KeyMetadata"]["KeyId"]
result = client.list_resource_tags(KeyId=key_id)
actual = result.get("Tags", [])
assert sort(tags) == sort(actual)
client.untag_resource(KeyId=key_id, TagKeys=["key1"])
actual = client.list_resource_tags(KeyId=key_id).get("Tags", [])
expected = [{"TagKey": "key2", "TagValue": "value2"}]
assert sort(expected) == sort(actual)
@mock_kms
def test_key_tag_added_happy():
client = boto3.client("kms", region_name="us-east-1")
key = client.create_key(Description="test-key-tagging")
key_id = key["KeyMetadata"]["KeyId"]
tags = [
{"TagKey": "key1", "TagValue": "value1"},
{"TagKey": "key2", "TagValue": "value2"},
]
client.tag_resource(KeyId=key_id, Tags=tags)
result = client.list_resource_tags(KeyId=key_id)
actual = result.get("Tags", [])
assert sort(tags) == sort(actual)
client.untag_resource(KeyId=key_id, TagKeys=["key1"])
actual = client.list_resource_tags(KeyId=key_id).get("Tags", [])
expected = [{"TagKey": "key2", "TagValue": "value2"}]
assert sort(expected) == sort(actual)
@mock_kms_deprecated
def test_key_tagging_sad():
b = KmsBackend()
try:
b.tag_resource("unknown", [])
raise "tag_resource should fail if KeyId is not known"
except JsonRESTError:
pass
try:
b.untag_resource("unknown", [])
raise "untag_resource should fail if KeyId is not known"
except JsonRESTError:
pass
try:
b.list_resource_tags("unknown")
raise "list_resource_tags should fail if KeyId is not known"
except JsonRESTError:
pass

View File

@ -102,7 +102,7 @@ def test_deserialize_ciphertext_blob(raw, serialized):
@parameterized(((ec[0],) for ec in ENCRYPTION_CONTEXT_VECTORS)) @parameterized(((ec[0],) for ec in ENCRYPTION_CONTEXT_VECTORS))
def test_encrypt_decrypt_cycle(encryption_context): def test_encrypt_decrypt_cycle(encryption_context):
plaintext = b"some secret plaintext" plaintext = b"some secret plaintext"
master_key = Key("nop", "nop", "nop", "nop", [], "nop") master_key = Key("nop", "nop", "nop", "nop", "nop")
master_key_map = {master_key.id: master_key} master_key_map = {master_key.id: master_key}
ciphertext_blob = encrypt( ciphertext_blob = encrypt(
@ -133,7 +133,7 @@ def test_encrypt_unknown_key_id():
def test_decrypt_invalid_ciphertext_format(): def test_decrypt_invalid_ciphertext_format():
master_key = Key("nop", "nop", "nop", "nop", [], "nop") master_key = Key("nop", "nop", "nop", "nop", "nop")
master_key_map = {master_key.id: master_key} master_key_map = {master_key.id: master_key}
with assert_raises(InvalidCiphertextException): with assert_raises(InvalidCiphertextException):
@ -153,7 +153,7 @@ def test_decrypt_unknwown_key_id():
def test_decrypt_invalid_ciphertext(): def test_decrypt_invalid_ciphertext():
master_key = Key("nop", "nop", "nop", "nop", [], "nop") master_key = Key("nop", "nop", "nop", "nop", "nop")
master_key_map = {master_key.id: master_key} master_key_map = {master_key.id: master_key}
ciphertext_blob = ( ciphertext_blob = (
master_key.id.encode("utf-8") + b"123456789012" master_key.id.encode("utf-8") + b"123456789012"
@ -171,7 +171,7 @@ def test_decrypt_invalid_ciphertext():
def test_decrypt_invalid_encryption_context(): def test_decrypt_invalid_encryption_context():
plaintext = b"some secret plaintext" plaintext = b"some secret plaintext"
master_key = Key("nop", "nop", "nop", "nop", [], "nop") master_key = Key("nop", "nop", "nop", "nop", "nop")
master_key_map = {master_key.id: master_key} master_key_map = {master_key.id: master_key}
ciphertext_blob = encrypt( ciphertext_blob = encrypt(

View File

@ -5,6 +5,7 @@ import datetime
import os import os
import sys import sys
from boto3 import Session
from six.moves.urllib.request import urlopen from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError from six.moves.urllib.error import HTTPError
from functools import wraps from functools import wraps
@ -1135,6 +1136,380 @@ if not settings.TEST_SERVER_MODE:
"The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to."
) )
# All tests for s3-control cannot be run under the server without a modification of the
# hosts file on your system. This is due to the fact that the URL to the host is in the form of:
# ACCOUNT_ID.s3-control.amazonaws.com <-- That Account ID part is the problem. If you want to
# make use of the moto server, update your hosts file for `THE_ACCOUNT_ID_FOR_MOTO.localhost`
# and this will work fine.
@mock_s3
def test_get_public_access_block_for_account():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
# With an invalid account ID:
with assert_raises(ClientError) as ce:
client.get_public_access_block(AccountId="111111111111")
assert ce.exception.response["Error"]["Code"] == "AccessDenied"
# Without one defined:
with assert_raises(ClientError) as ce:
client.get_public_access_block(AccountId=ACCOUNT_ID)
assert (
ce.exception.response["Error"]["Code"]
== "NoSuchPublicAccessBlockConfiguration"
)
# Put a with an invalid account ID:
with assert_raises(ClientError) as ce:
client.put_public_access_block(
AccountId="111111111111",
PublicAccessBlockConfiguration={"BlockPublicAcls": True},
)
assert ce.exception.response["Error"]["Code"] == "AccessDenied"
# Put with an invalid PAB:
with assert_raises(ClientError) as ce:
client.put_public_access_block(
AccountId=ACCOUNT_ID, PublicAccessBlockConfiguration={}
)
assert ce.exception.response["Error"]["Code"] == "InvalidRequest"
assert (
"Must specify at least one configuration."
in ce.exception.response["Error"]["Message"]
)
# Correct PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Get the correct PAB (for all regions):
for region in Session().get_available_regions("s3control"):
region_client = boto3.client("s3control", region_name=region)
assert region_client.get_public_access_block(AccountId=ACCOUNT_ID)[
"PublicAccessBlockConfiguration"
] == {
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
}
# Delete with an invalid account ID:
with assert_raises(ClientError) as ce:
client.delete_public_access_block(AccountId="111111111111")
assert ce.exception.response["Error"]["Code"] == "AccessDenied"
# Delete successfully:
client.delete_public_access_block(AccountId=ACCOUNT_ID)
# Confirm that it's deleted:
with assert_raises(ClientError) as ce:
client.get_public_access_block(AccountId=ACCOUNT_ID)
assert (
ce.exception.response["Error"]["Code"]
== "NoSuchPublicAccessBlockConfiguration"
)
@mock_s3
@mock_config
def test_config_list_account_pab():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
config_client = boto3.client("config", region_name="us-west-2")
# Create the aggregator:
account_aggregation_source = {
"AccountIds": [ACCOUNT_ID],
"AllAwsRegions": True,
}
config_client.put_configuration_aggregator(
ConfigurationAggregatorName="testing",
AccountAggregationSources=[account_aggregation_source],
)
# Without a PAB in place:
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock"
)
assert not result["resourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
)
assert not result["ResourceIdentifiers"]
# Create a PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Test that successful queries work (non-aggregated):
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock"
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock",
resourceIds=[ACCOUNT_ID, "nope"],
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceName=""
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
# Test that successful queries work (aggregated):
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
)
regions = {region for region in Session().get_available_regions("config")}
for r in result["ResourceIdentifiers"]:
regions.remove(r.pop("SourceRegion"))
assert r == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
}
# Just check that the len is the same -- this should be reasonable
regions = {region for region in Session().get_available_regions("config")}
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": ""},
)
assert len(regions) == len(result["ResourceIdentifiers"])
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": "", "ResourceId": ACCOUNT_ID},
)
assert len(regions) == len(result["ResourceIdentifiers"])
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={
"ResourceName": "",
"ResourceId": ACCOUNT_ID,
"Region": "us-west-2",
},
)
assert (
result["ResourceIdentifiers"][0]["SourceRegion"] == "us-west-2"
and len(result["ResourceIdentifiers"]) == 1
)
# Test aggregator pagination:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Limit=1,
)
regions = sorted(
[region for region in Session().get_available_regions("config")]
)
assert result["ResourceIdentifiers"][0] == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
"SourceRegion": regions[0],
}
assert result["NextToken"] == regions[1]
# Get the next region:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Limit=1,
NextToken=regions[1],
)
assert result["ResourceIdentifiers"][0] == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
"SourceRegion": regions[1],
}
# Non-aggregated with incorrect info:
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="nope"
)
assert not result["resourceIdentifiers"]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceIds=["nope"]
)
assert not result["resourceIdentifiers"]
# Aggregated with incorrect info:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": "nope"},
)
assert not result["ResourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceId": "nope"},
)
assert not result["ResourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"Region": "Nope"},
)
assert not result["ResourceIdentifiers"]
@mock_s3
@mock_config
def test_config_get_account_pab():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
config_client = boto3.client("config", region_name="us-west-2")
# Create the aggregator:
account_aggregation_source = {
"AccountIds": [ACCOUNT_ID],
"AllAwsRegions": True,
}
config_client.put_configuration_aggregator(
ConfigurationAggregatorName="testing",
AccountAggregationSources=[account_aggregation_source],
)
# Without a PAB in place:
with assert_raises(ClientError) as ce:
config_client.get_resource_config_history(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID
)
assert (
ce.exception.response["Error"]["Code"] == "ResourceNotDiscoveredException"
)
# aggregate
result = config_client.batch_get_resource_config(
resourceKeys=[
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": "ACCOUNT_ID",
}
]
)
assert not result["baseConfigurationItems"]
result = config_client.batch_get_aggregate_resource_config(
ConfigurationAggregatorName="testing",
ResourceIdentifiers=[
{
"SourceAccountId": ACCOUNT_ID,
"SourceRegion": "us-west-2",
"ResourceId": ACCOUNT_ID,
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"ResourceName": "",
}
],
)
assert not result["BaseConfigurationItems"]
# Create a PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Get the proper config:
proper_config = {
"blockPublicAcls": True,
"ignorePublicAcls": True,
"blockPublicPolicy": True,
"restrictPublicBuckets": True,
}
result = config_client.get_resource_config_history(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID
)
assert (
json.loads(result["configurationItems"][0]["configuration"])
== proper_config
)
assert (
result["configurationItems"][0]["accountId"]
== result["configurationItems"][0]["resourceId"]
== ACCOUNT_ID
)
result = config_client.batch_get_resource_config(
resourceKeys=[
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
)
assert len(result["baseConfigurationItems"]) == 1
assert (
json.loads(result["baseConfigurationItems"][0]["configuration"])
== proper_config
)
assert (
result["baseConfigurationItems"][0]["accountId"]
== result["baseConfigurationItems"][0]["resourceId"]
== ACCOUNT_ID
)
for region in Session().get_available_regions("s3control"):
result = config_client.batch_get_aggregate_resource_config(
ConfigurationAggregatorName="testing",
ResourceIdentifiers=[
{
"SourceAccountId": ACCOUNT_ID,
"SourceRegion": region,
"ResourceId": ACCOUNT_ID,
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"ResourceName": "",
}
],
)
assert len(result["BaseConfigurationItems"]) == 1
assert (
json.loads(result["BaseConfigurationItems"][0]["configuration"])
== proper_config
)
@mock_s3_deprecated @mock_s3_deprecated
def test_ranged_get(): def test_ranged_get():
@ -1768,6 +2143,34 @@ def test_boto3_copy_object_from_unversioned_to_versioned_bucket():
obj2_version_new.should_not.equal(None) obj2_version_new.should_not.equal(None)
@mock_s3
def test_boto3_copy_object_with_replacement_tagging():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(Bucket="mybucket")
client.put_object(
Bucket="mybucket", Key="original", Body=b"test", Tagging="tag=old"
)
client.copy_object(
CopySource={"Bucket": "mybucket", "Key": "original"},
Bucket="mybucket",
Key="copy1",
TaggingDirective="REPLACE",
Tagging="tag=new",
)
client.copy_object(
CopySource={"Bucket": "mybucket", "Key": "original"},
Bucket="mybucket",
Key="copy2",
TaggingDirective="COPY",
)
tags1 = client.get_object_tagging(Bucket="mybucket", Key="copy1")["TagSet"]
tags1.should.equal([{"Key": "tag", "Value": "new"}])
tags2 = client.get_object_tagging(Bucket="mybucket", Key="copy2")["TagSet"]
tags2.should.equal([{"Key": "tag", "Value": "old"}])
@mock_s3 @mock_s3
def test_boto3_deleted_versionings_list(): def test_boto3_deleted_versionings_list():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)

View File

@ -148,6 +148,39 @@ def test_workflow_execution_full_dict_representation():
) )
def test_closed_workflow_execution_full_dict_representation():
domain = get_basic_domain()
wf_type = WorkflowType(
"test-workflow",
"v1.0",
task_list="queue",
default_child_policy="ABANDON",
default_execution_start_to_close_timeout="300",
default_task_start_to_close_timeout="300",
)
wfe = WorkflowExecution(domain, wf_type, "ab1234")
wfe.execution_status = "CLOSED"
wfe.close_status = "CANCELED"
wfe.close_timestamp = 1420066801.123
fd = wfe.to_full_dict()
medium_dict = wfe.to_medium_dict()
medium_dict["closeStatus"] = "CANCELED"
medium_dict["closeTimestamp"] = 1420066801.123
fd["executionInfo"].should.equal(medium_dict)
fd["openCounts"]["openTimers"].should.equal(0)
fd["openCounts"]["openDecisionTasks"].should.equal(0)
fd["openCounts"]["openActivityTasks"].should.equal(0)
fd["executionConfiguration"].should.equal(
{
"childPolicy": "ABANDON",
"executionStartToCloseTimeout": "300",
"taskList": {"name": "queue"},
"taskStartToCloseTimeout": "300",
}
)
def test_workflow_execution_list_dict_representation(): def test_workflow_execution_list_dict_representation():
domain = get_basic_domain() domain = get_basic_domain()
wf_type = WorkflowType( wf_type = WorkflowType(

View File

@ -1,7 +1,9 @@
import sure import sure
import boto import boto
import boto3
from moto import mock_swf_deprecated from moto import mock_swf_deprecated
from moto import mock_swf
from boto.swf.exceptions import SWFResponseError from boto.swf.exceptions import SWFResponseError
@ -133,6 +135,41 @@ def test_describe_workflow_type():
infos["status"].should.equal("REGISTERED") infos["status"].should.equal("REGISTERED")
@mock_swf
def test_describe_workflow_type_full_boto3():
# boto3 required as boto doesn't support all of the arguments
client = boto3.client("swf", region_name="us-east-1")
client.register_domain(
name="test-domain", workflowExecutionRetentionPeriodInDays="2"
)
client.register_workflow_type(
domain="test-domain",
name="test-workflow",
version="v1.0",
description="Test workflow.",
defaultTaskStartToCloseTimeout="20",
defaultExecutionStartToCloseTimeout="60",
defaultTaskList={"name": "foo"},
defaultTaskPriority="-2",
defaultChildPolicy="ABANDON",
defaultLambdaRole="arn:bar",
)
resp = client.describe_workflow_type(
domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"}
)
resp["typeInfo"]["workflowType"]["name"].should.equal("test-workflow")
resp["typeInfo"]["workflowType"]["version"].should.equal("v1.0")
resp["typeInfo"]["status"].should.equal("REGISTERED")
resp["typeInfo"]["description"].should.equal("Test workflow.")
resp["configuration"]["defaultTaskStartToCloseTimeout"].should.equal("20")
resp["configuration"]["defaultExecutionStartToCloseTimeout"].should.equal("60")
resp["configuration"]["defaultTaskList"]["name"].should.equal("foo")
resp["configuration"]["defaultTaskPriority"].should.equal("-2")
resp["configuration"]["defaultChildPolicy"].should.equal("ABANDON")
resp["configuration"]["defaultLambdaRole"].should.equal("arn:bar")
@mock_swf_deprecated @mock_swf_deprecated
def test_describe_non_existent_workflow_type(): def test_describe_non_existent_workflow_type():
conn = boto.connect_swf("the_key", "the_secret") conn = boto.connect_swf("the_key", "the_secret")

View File

@ -0,0 +1,79 @@
import sure
from moto.utilities.tagging_service import TaggingService
def test_list_empty():
svc = TaggingService()
result = svc.list_tags_for_resource("test")
{"Tags": []}.should.be.equal(result)
def test_create_tag():
svc = TaggingService("TheTags", "TagKey", "TagValue")
tags = [{"TagKey": "key_key", "TagValue": "value_value"}]
svc.tag_resource("arn", tags)
actual = svc.list_tags_for_resource("arn")
expected = {"TheTags": [{"TagKey": "key_key", "TagValue": "value_value"}]}
expected.should.be.equal(actual)
def test_create_tag_without_value():
svc = TaggingService()
tags = [{"Key": "key_key"}]
svc.tag_resource("arn", tags)
actual = svc.list_tags_for_resource("arn")
expected = {"Tags": [{"Key": "key_key", "Value": None}]}
expected.should.be.equal(actual)
def test_delete_tag_using_names():
svc = TaggingService()
tags = [{"Key": "key_key", "Value": "value_value"}]
svc.tag_resource("arn", tags)
svc.untag_resource_using_names("arn", ["key_key"])
result = svc.list_tags_for_resource("arn")
{"Tags": []}.should.be.equal(result)
def test_delete_all_tags_for_resource():
svc = TaggingService()
tags = [{"Key": "key_key", "Value": "value_value"}]
tags2 = [{"Key": "key_key2", "Value": "value_value2"}]
svc.tag_resource("arn", tags)
svc.tag_resource("arn", tags2)
svc.delete_all_tags_for_resource("arn")
result = svc.list_tags_for_resource("arn")
{"Tags": []}.should.be.equal(result)
def test_list_empty_delete():
svc = TaggingService()
svc.untag_resource_using_names("arn", ["key_key"])
result = svc.list_tags_for_resource("arn")
{"Tags": []}.should.be.equal(result)
def test_delete_tag_using_tags():
svc = TaggingService()
tags = [{"Key": "key_key", "Value": "value_value"}]
svc.tag_resource("arn", tags)
svc.untag_resource_using_tags("arn", tags)
result = svc.list_tags_for_resource("arn")
{"Tags": []}.should.be.equal(result)
def test_extract_tag_names():
svc = TaggingService()
tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}]
actual = svc.extract_tag_names(tags)
expected = ["key1", "key2"]
expected.should.be.equal(actual)