Merge remote-tracking branch 'upstream/master' into feat/cognitoidp-add-update-identity-provider
This commit is contained in:
commit
c7d3e1f8b6
3
.gitignore
vendored
3
.gitignore
vendored
@ -16,4 +16,5 @@ python_env
|
||||
.pytest_cache/
|
||||
venv/
|
||||
.python-version
|
||||
.vscode/
|
||||
.vscode/
|
||||
tests/file.tmp
|
||||
|
@ -477,12 +477,12 @@
|
||||
- [X] create_stack
|
||||
- [ ] create_stack_instances
|
||||
- [ ] create_stack_set
|
||||
- [ ] delete_change_set
|
||||
- [X] delete_change_set
|
||||
- [X] delete_stack
|
||||
- [ ] delete_stack_instances
|
||||
- [ ] delete_stack_set
|
||||
- [ ] describe_account_limits
|
||||
- [ ] describe_change_set
|
||||
- [X] describe_change_set
|
||||
- [ ] describe_stack_events
|
||||
- [ ] describe_stack_instance
|
||||
- [ ] describe_stack_resource
|
||||
@ -495,7 +495,7 @@
|
||||
- [ ] get_stack_policy
|
||||
- [ ] get_template
|
||||
- [ ] get_template_summary
|
||||
- [ ] list_change_sets
|
||||
- [X] list_change_sets
|
||||
- [X] list_exports
|
||||
- [ ] list_imports
|
||||
- [ ] list_stack_instances
|
||||
@ -2208,7 +2208,7 @@
|
||||
- [ ] describe_event_types
|
||||
- [ ] describe_events
|
||||
|
||||
## iam - 48% implemented
|
||||
## iam - 62% implemented
|
||||
- [ ] add_client_id_to_open_id_connect_provider
|
||||
- [X] add_role_to_instance_profile
|
||||
- [X] add_user_to_group
|
||||
@ -2247,7 +2247,7 @@
|
||||
- [X] delete_server_certificate
|
||||
- [ ] delete_service_linked_role
|
||||
- [ ] delete_service_specific_credential
|
||||
- [ ] delete_signing_certificate
|
||||
- [X] delete_signing_certificate
|
||||
- [ ] delete_ssh_public_key
|
||||
- [X] delete_user
|
||||
- [X] delete_user_policy
|
||||
@ -2279,7 +2279,7 @@
|
||||
- [ ] get_ssh_public_key
|
||||
- [X] get_user
|
||||
- [X] get_user_policy
|
||||
- [ ] list_access_keys
|
||||
- [X] list_access_keys
|
||||
- [X] list_account_aliases
|
||||
- [X] list_attached_group_policies
|
||||
- [X] list_attached_role_policies
|
||||
@ -2287,19 +2287,21 @@
|
||||
- [ ] list_entities_for_policy
|
||||
- [X] list_group_policies
|
||||
- [X] list_groups
|
||||
- [ ] list_groups_for_user
|
||||
- [ ] list_instance_profiles
|
||||
- [ ] list_instance_profiles_for_role
|
||||
- [X] list_groups_for_user
|
||||
- [X] list_instance_profiles
|
||||
- [X] list_instance_profiles_for_role
|
||||
- [X] list_mfa_devices
|
||||
- [ ] list_open_id_connect_providers
|
||||
- [X] list_policies
|
||||
- [X] list_policy_versions
|
||||
- [X] list_role_policies
|
||||
- [ ] list_roles
|
||||
- [X] list_roles
|
||||
- [X] list_role_tags
|
||||
- [ ] list_user_tags
|
||||
- [X] list_saml_providers
|
||||
- [ ] list_server_certificates
|
||||
- [X] list_server_certificates
|
||||
- [ ] list_service_specific_credentials
|
||||
- [ ] list_signing_certificates
|
||||
- [X] list_signing_certificates
|
||||
- [ ] list_ssh_public_keys
|
||||
- [X] list_user_policies
|
||||
- [X] list_users
|
||||
@ -2315,6 +2317,10 @@
|
||||
- [ ] set_default_policy_version
|
||||
- [ ] simulate_custom_policy
|
||||
- [ ] simulate_principal_policy
|
||||
- [X] tag_role
|
||||
- [ ] tag_user
|
||||
- [X] untag_role
|
||||
- [ ] untag_user
|
||||
- [X] update_access_key
|
||||
- [ ] update_account_password_policy
|
||||
- [ ] update_assume_role_policy
|
||||
@ -2326,11 +2332,11 @@
|
||||
- [X] update_saml_provider
|
||||
- [ ] update_server_certificate
|
||||
- [ ] update_service_specific_credential
|
||||
- [ ] update_signing_certificate
|
||||
- [X] update_signing_certificate
|
||||
- [ ] update_ssh_public_key
|
||||
- [ ] update_user
|
||||
- [ ] upload_server_certificate
|
||||
- [ ] upload_signing_certificate
|
||||
- [X] upload_server_certificate
|
||||
- [X] upload_signing_certificate
|
||||
- [ ] upload_ssh_public_key
|
||||
|
||||
## importexport - 0% implemented
|
||||
@ -3542,7 +3548,7 @@
|
||||
- [ ] get_bucket_inventory_configuration
|
||||
- [ ] get_bucket_lifecycle
|
||||
- [ ] get_bucket_lifecycle_configuration
|
||||
- [ ] get_bucket_location
|
||||
- [X] get_bucket_location
|
||||
- [ ] get_bucket_logging
|
||||
- [ ] get_bucket_metrics_configuration
|
||||
- [ ] get_bucket_notification
|
||||
@ -3913,7 +3919,7 @@
|
||||
- [ ] delete_message_batch
|
||||
- [X] delete_queue
|
||||
- [ ] get_queue_attributes
|
||||
- [ ] get_queue_url
|
||||
- [X] get_queue_url
|
||||
- [X] list_dead_letter_source_queues
|
||||
- [ ] list_queue_tags
|
||||
- [X] list_queues
|
||||
|
@ -70,10 +70,12 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|
||||
|------------------------------------------------------------------------------|
|
||||
| CloudwatchEvents | @mock_events | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Cognito Identity | @mock_cognitoidentity| basic endpoints done |
|
||||
| Cognito Identity | @mock_cognitoidentity| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Cognito Identity Provider | @mock_cognitoidp| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Config | @mock_config | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Data Pipeline | @mock_datapipeline| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| DynamoDB | @mock_dynamodb | core endpoints done |
|
||||
|
@ -13,6 +13,7 @@ from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated
|
||||
from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa
|
||||
from .cognitoidentity import mock_cognitoidentity, mock_cognitoidentity_deprecated # flake8: noqa
|
||||
from .cognitoidp import mock_cognitoidp, mock_cognitoidp_deprecated # flake8: noqa
|
||||
from .config import mock_config # flake8: noqa
|
||||
from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa
|
||||
from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa
|
||||
from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa
|
||||
|
@ -243,7 +243,7 @@ class CertBundle(BaseModel):
|
||||
'KeyAlgorithm': key_algo,
|
||||
'NotAfter': datetime_to_epoch(self._cert.not_valid_after),
|
||||
'NotBefore': datetime_to_epoch(self._cert.not_valid_before),
|
||||
'Serial': self._cert.serial,
|
||||
'Serial': self._cert.serial_number,
|
||||
'SignatureAlgorithm': self._cert.signature_algorithm_oid._name.upper().replace('ENCRYPTION', ''),
|
||||
'Status': self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED.
|
||||
'Subject': 'CN={0}'.format(self.common_name),
|
||||
|
@ -500,6 +500,11 @@ class LambdaStorage(object):
|
||||
except ValueError:
|
||||
return self._functions[name]['latest']
|
||||
|
||||
def list_versions_by_function(self, name):
|
||||
if name not in self._functions:
|
||||
return None
|
||||
return [self._functions[name]['latest']]
|
||||
|
||||
def get_arn(self, arn):
|
||||
return self._arns.get(arn, None)
|
||||
|
||||
@ -607,6 +612,9 @@ class LambdaBackend(BaseBackend):
|
||||
def get_function(self, function_name, qualifier=None):
|
||||
return self._lambdas.get_function(function_name, qualifier)
|
||||
|
||||
def list_versions_by_function(self, function_name):
|
||||
return self._lambdas.list_versions_by_function(function_name)
|
||||
|
||||
def get_function_by_arn(self, function_arn):
|
||||
return self._lambdas.get_arn(function_arn)
|
||||
|
||||
|
@ -52,7 +52,11 @@ class LambdaResponse(BaseResponse):
|
||||
self.setup_class(request, full_url, headers)
|
||||
if request.method == 'GET':
|
||||
# This is ListVersionByFunction
|
||||
raise ValueError("Cannot handle request")
|
||||
|
||||
path = request.path if hasattr(request, 'path') else path_url(request.url)
|
||||
function_name = path.split('/')[-2]
|
||||
return self._list_versions_by_function(function_name)
|
||||
|
||||
elif request.method == 'POST':
|
||||
return self._publish_function(request, full_url, headers)
|
||||
else:
|
||||
@ -151,6 +155,19 @@ class LambdaResponse(BaseResponse):
|
||||
|
||||
return 200, {}, json.dumps(result)
|
||||
|
||||
def _list_versions_by_function(self, function_name):
|
||||
result = {
|
||||
'Versions': []
|
||||
}
|
||||
|
||||
functions = self.lambda_backend.list_versions_by_function(function_name)
|
||||
if functions:
|
||||
for fn in functions:
|
||||
json_data = fn.get_configuration()
|
||||
result['Versions'].append(json_data)
|
||||
|
||||
return 200, {}, json.dumps(result)
|
||||
|
||||
def _create_function(self, request, full_url, headers):
|
||||
try:
|
||||
fn = self.lambda_backend.create_function(self.json_body)
|
||||
|
@ -46,7 +46,7 @@ from moto.iot import iot_backends
|
||||
from moto.iotdata import iotdata_backends
|
||||
from moto.batch import batch_backends
|
||||
from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends
|
||||
|
||||
from moto.config import config_backends
|
||||
|
||||
BACKENDS = {
|
||||
'acm': acm_backends,
|
||||
@ -57,6 +57,7 @@ BACKENDS = {
|
||||
'cloudwatch': cloudwatch_backends,
|
||||
'cognito-identity': cognitoidentity_backends,
|
||||
'cognito-idp': cognitoidp_backends,
|
||||
'config': config_backends,
|
||||
'datapipeline': datapipeline_backends,
|
||||
'dynamodb': dynamodb_backends,
|
||||
'dynamodb2': dynamodb_backends2,
|
||||
|
@ -127,6 +127,49 @@ class FakeStack(BaseModel):
|
||||
self.status = "DELETE_COMPLETE"
|
||||
|
||||
|
||||
class FakeChange(BaseModel):
|
||||
|
||||
def __init__(self, action, logical_resource_id, resource_type):
|
||||
self.action = action
|
||||
self.logical_resource_id = logical_resource_id
|
||||
self.resource_type = resource_type
|
||||
|
||||
|
||||
class FakeChangeSet(FakeStack):
|
||||
|
||||
def __init__(self, stack_id, stack_name, stack_template, change_set_id, change_set_name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None):
|
||||
super(FakeChangeSet, self).__init__(
|
||||
stack_id,
|
||||
stack_name,
|
||||
stack_template,
|
||||
parameters,
|
||||
region_name,
|
||||
notification_arns=notification_arns,
|
||||
tags=tags,
|
||||
role_arn=role_arn,
|
||||
cross_stack_resources=cross_stack_resources,
|
||||
create_change_set=True,
|
||||
)
|
||||
self.stack_name = stack_name
|
||||
self.change_set_id = change_set_id
|
||||
self.change_set_name = change_set_name
|
||||
self.changes = self.diff(template=template, parameters=parameters)
|
||||
|
||||
def diff(self, template, parameters=None):
|
||||
self.template = template
|
||||
self._parse_template()
|
||||
changes = []
|
||||
resources_by_action = self.resource_map.diff(self.template_dict, parameters)
|
||||
for action, resources in resources_by_action.items():
|
||||
for resource_name, resource in resources.items():
|
||||
changes.append(FakeChange(
|
||||
action=action,
|
||||
logical_resource_id=resource_name,
|
||||
resource_type=resource['ResourceType'],
|
||||
))
|
||||
return changes
|
||||
|
||||
|
||||
class FakeEvent(BaseModel):
|
||||
|
||||
def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):
|
||||
@ -171,24 +214,62 @@ class CloudFormationBackend(BaseBackend):
|
||||
return new_stack
|
||||
|
||||
def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):
|
||||
stack_id = None
|
||||
stack_template = None
|
||||
if change_set_type == 'UPDATE':
|
||||
stacks = self.stacks.values()
|
||||
stack = None
|
||||
for s in stacks:
|
||||
if s.name == stack_name:
|
||||
stack = s
|
||||
stack_id = stack.stack_id
|
||||
stack_template = stack.template
|
||||
if stack is None:
|
||||
raise ValidationError(stack_name)
|
||||
|
||||
else:
|
||||
stack = self.create_stack(stack_name, template, parameters,
|
||||
region_name, notification_arns, tags,
|
||||
role_arn, create_change_set=True)
|
||||
stack_id = generate_stack_id(stack_name)
|
||||
stack_template = template
|
||||
|
||||
change_set_id = generate_changeset_id(change_set_name, region_name)
|
||||
self.stacks[change_set_name] = {'Id': change_set_id,
|
||||
'StackId': stack.stack_id}
|
||||
self.change_sets[change_set_id] = stack
|
||||
return change_set_id, stack.stack_id
|
||||
new_change_set = FakeChangeSet(
|
||||
stack_id=stack_id,
|
||||
stack_name=stack_name,
|
||||
stack_template=stack_template,
|
||||
change_set_id=change_set_id,
|
||||
change_set_name=change_set_name,
|
||||
template=template,
|
||||
parameters=parameters,
|
||||
region_name=region_name,
|
||||
notification_arns=notification_arns,
|
||||
tags=tags,
|
||||
role_arn=role_arn,
|
||||
cross_stack_resources=self.exports
|
||||
)
|
||||
self.change_sets[change_set_id] = new_change_set
|
||||
self.stacks[stack_id] = new_change_set
|
||||
return change_set_id, stack_id
|
||||
|
||||
def delete_change_set(self, change_set_name, stack_name=None):
|
||||
if change_set_name in self.change_sets:
|
||||
# This means arn was passed in
|
||||
del self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
del self.change_sets[cs]
|
||||
|
||||
def describe_change_set(self, change_set_name, stack_name=None):
|
||||
change_set = None
|
||||
if change_set_name in self.change_sets:
|
||||
# This means arn was passed in
|
||||
change_set = self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
change_set = self.change_sets[cs]
|
||||
if change_set is None:
|
||||
raise ValidationError(change_set_name)
|
||||
return change_set
|
||||
|
||||
def execute_change_set(self, change_set_name, stack_name=None):
|
||||
stack = None
|
||||
@ -197,7 +278,7 @@ class CloudFormationBackend(BaseBackend):
|
||||
stack = self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].name == change_set_name:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
stack = self.change_sets[cs]
|
||||
if stack is None:
|
||||
raise ValidationError(stack_name)
|
||||
@ -223,6 +304,9 @@ class CloudFormationBackend(BaseBackend):
|
||||
else:
|
||||
return list(stacks)
|
||||
|
||||
def list_change_sets(self):
|
||||
return self.change_sets.values()
|
||||
|
||||
def list_stacks(self):
|
||||
return [
|
||||
v for v in self.stacks.values()
|
||||
|
@ -465,36 +465,70 @@ class ResourceMap(collections.Mapping):
|
||||
ec2_models.ec2_backends[self._region_name].create_tags(
|
||||
[self[resource].physical_resource_id], self.tags)
|
||||
|
||||
def update(self, template, parameters=None):
|
||||
def diff(self, template, parameters=None):
|
||||
if parameters:
|
||||
self.input_parameters = parameters
|
||||
self.load_mapping()
|
||||
self.load_parameters()
|
||||
self.load_conditions()
|
||||
|
||||
old_template = self._resource_json_map
|
||||
new_template = template['Resources']
|
||||
|
||||
resource_names_by_action = {
|
||||
'Add': set(new_template) - set(old_template),
|
||||
'Modify': set(name for name in new_template if name in old_template and new_template[
|
||||
name] != old_template[name]),
|
||||
'Remove': set(old_template) - set(new_template)
|
||||
}
|
||||
resources_by_action = {
|
||||
'Add': {},
|
||||
'Modify': {},
|
||||
'Remove': {},
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Add']:
|
||||
resources_by_action['Add'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': new_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Modify']:
|
||||
resources_by_action['Modify'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': new_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Remove']:
|
||||
resources_by_action['Remove'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': old_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
return resources_by_action
|
||||
|
||||
def update(self, template, parameters=None):
|
||||
resources_by_action = self.diff(template, parameters)
|
||||
|
||||
old_template = self._resource_json_map
|
||||
new_template = template['Resources']
|
||||
self._resource_json_map = new_template
|
||||
|
||||
new_resource_names = set(new_template) - set(old_template)
|
||||
for resource_name in new_resource_names:
|
||||
for resource_name, resource in resources_by_action['Add'].items():
|
||||
resource_json = new_template[resource_name]
|
||||
new_resource = parse_and_create_resource(
|
||||
resource_name, resource_json, self, self._region_name)
|
||||
self._parsed_resources[resource_name] = new_resource
|
||||
|
||||
removed_resource_names = set(old_template) - set(new_template)
|
||||
for resource_name in removed_resource_names:
|
||||
for resource_name, resource in resources_by_action['Remove'].items():
|
||||
resource_json = old_template[resource_name]
|
||||
parse_and_delete_resource(
|
||||
resource_name, resource_json, self, self._region_name)
|
||||
self._parsed_resources.pop(resource_name)
|
||||
|
||||
resources_to_update = set(name for name in new_template if name in old_template and new_template[
|
||||
name] != old_template[name])
|
||||
tries = 1
|
||||
while resources_to_update and tries < 5:
|
||||
for resource_name in resources_to_update.copy():
|
||||
while resources_by_action['Modify'] and tries < 5:
|
||||
for resource_name, resource in resources_by_action['Modify'].copy().items():
|
||||
resource_json = new_template[resource_name]
|
||||
try:
|
||||
changed_resource = parse_and_update_resource(
|
||||
@ -505,7 +539,7 @@ class ResourceMap(collections.Mapping):
|
||||
last_exception = e
|
||||
else:
|
||||
self._parsed_resources[resource_name] = changed_resource
|
||||
resources_to_update.remove(resource_name)
|
||||
del resources_by_action['Modify'][resource_name]
|
||||
tries += 1
|
||||
if tries == 5:
|
||||
raise last_exception
|
||||
|
@ -120,6 +120,31 @@ class CloudFormationResponse(BaseResponse):
|
||||
template = self.response_template(CREATE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(stack_id=stack_id, change_set_id=change_set_id)
|
||||
|
||||
def delete_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
change_set_name = self._get_param('ChangeSetName')
|
||||
|
||||
self.cloudformation_backend.delete_change_set(change_set_name=change_set_name, stack_name=stack_name)
|
||||
if self.request_json:
|
||||
return json.dumps({
|
||||
'DeleteChangeSetResponse': {
|
||||
'DeleteChangeSetResult': {},
|
||||
}
|
||||
})
|
||||
else:
|
||||
template = self.response_template(DELETE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def describe_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
change_set_name = self._get_param('ChangeSetName')
|
||||
change_set = self.cloudformation_backend.describe_change_set(
|
||||
change_set_name=change_set_name,
|
||||
stack_name=stack_name,
|
||||
)
|
||||
template = self.response_template(DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(change_set=change_set)
|
||||
|
||||
@amzn_request_id
|
||||
def execute_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
@ -187,6 +212,11 @@ class CloudFormationResponse(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_STACK_EVENTS_RESPONSE)
|
||||
return template.render(stack=stack)
|
||||
|
||||
def list_change_sets(self):
|
||||
change_sets = self.cloudformation_backend.list_change_sets()
|
||||
template = self.response_template(LIST_CHANGE_SETS_RESPONSE)
|
||||
return template.render(change_sets=change_sets)
|
||||
|
||||
def list_stacks(self):
|
||||
stacks = self.cloudformation_backend.list_stacks()
|
||||
template = self.response_template(LIST_STACKS_RESPONSE)
|
||||
@ -354,6 +384,66 @@ CREATE_CHANGE_SET_RESPONSE_TEMPLATE = """<CreateStackResponse>
|
||||
</CreateStackResponse>
|
||||
"""
|
||||
|
||||
DELETE_CHANGE_SET_RESPONSE_TEMPLATE = """<DeleteChangeSetResponse>
|
||||
<DeleteChangeSetResult>
|
||||
</DeleteChangeSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>3d3200a1-810e-3023-6cc3-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteChangeSetResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE = """<DescribeChangeSetResponse>
|
||||
<DescribeChangeSetResult>
|
||||
<ChangeSetId>{{ change_set.change_set_id }}</ChangeSetId>
|
||||
<ChangeSetName>{{ change_set.change_set_name }}</ChangeSetName>
|
||||
<StackId>{{ change_set.stack_id }}</StackId>
|
||||
<StackName>{{ change_set.stack_name }}</StackName>
|
||||
<Description>{{ change_set.description }}</Description>
|
||||
<Parameters>
|
||||
{% for param_name, param_value in change_set.stack_parameters.items() %}
|
||||
<member>
|
||||
<ParameterKey>{{ param_name }}</ParameterKey>
|
||||
<ParameterValue>{{ param_value }}</ParameterValue>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Parameters>
|
||||
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
|
||||
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
|
||||
<Status>{{ change_set.status }}</Status>
|
||||
<StatusReason>{{ change_set.status_reason }}</StatusReason>
|
||||
{% if change_set.notification_arns %}
|
||||
<NotificationARNs>
|
||||
{% for notification_arn in change_set.notification_arns %}
|
||||
<member>{{ notification_arn }}</member>
|
||||
{% endfor %}
|
||||
</NotificationARNs>
|
||||
{% else %}
|
||||
<NotificationARNs/>
|
||||
{% endif %}
|
||||
{% if change_set.role_arn %}
|
||||
<RoleARN>{{ change_set.role_arn }}</RoleARN>
|
||||
{% endif %}
|
||||
{% if change_set.changes %}
|
||||
<Changes>
|
||||
{% for change in change_set.changes %}
|
||||
<member>
|
||||
<Type>Resource</Type>
|
||||
<ResourceChange>
|
||||
<Action>{{ change.action }}</Action>
|
||||
<LogicalResourceId>{{ change.logical_resource_id }}</LogicalResourceId>
|
||||
<ResourceType>{{ change.resource_type }}</ResourceType>
|
||||
</ResourceChange>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Changes>
|
||||
{% endif %}
|
||||
{% if next_token %}
|
||||
<NextToken>{{ next_token }}</NextToken>
|
||||
{% endif %}
|
||||
</DescribeChangeSetResult>
|
||||
</DescribeChangeSetResponse>"""
|
||||
|
||||
EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE = """<ExecuteChangeSetResponse>
|
||||
<ExecuteChangeSetResult>
|
||||
<ExecuteChangeSetResult/>
|
||||
@ -479,6 +569,27 @@ DESCRIBE_STACK_EVENTS_RESPONSE = """<DescribeStackEventsResponse xmlns="http://c
|
||||
</DescribeStackEventsResponse>"""
|
||||
|
||||
|
||||
LIST_CHANGE_SETS_RESPONSE = """<ListChangeSetsResponse>
|
||||
<ListChangeSetsResult>
|
||||
<Summaries>
|
||||
{% for change_set in change_sets %}
|
||||
<member>
|
||||
<StackId>{{ change_set.stack_id }}</StackId>
|
||||
<StackName>{{ change_set.stack_name }}</StackName>
|
||||
<ChangeSetId>{{ change_set.change_set_id }}</ChangeSetId>
|
||||
<ChangeSetName>{{ change_set.change_set_name }}</ChangeSetName>
|
||||
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
|
||||
<Status>{{ change_set.status }}</Status>
|
||||
<StatusReason>{{ change_set.status_reason }}</StatusReason>
|
||||
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
|
||||
<Description>{{ change_set.description }}</Description>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListChangeSetsResult>
|
||||
</ListChangeSetsResponse>"""
|
||||
|
||||
|
||||
LIST_STACKS_RESPONSE = """<ListStacksResponse>
|
||||
<ListStacksResult>
|
||||
<StackSummaries>
|
||||
|
4
moto/config/__init__.py
Normal file
4
moto/config/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from .models import config_backends
|
||||
from ..core.models import base_decorator
|
||||
|
||||
mock_config = base_decorator(config_backends)
|
149
moto/config/exceptions.py
Normal file
149
moto/config/exceptions.py
Normal file
@ -0,0 +1,149 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
|
||||
|
||||
class NameTooLongException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name, location):
|
||||
message = '1 validation error detected: Value \'{name}\' at \'{location}\' failed to satisfy' \
|
||||
' constraint: Member must have length less than or equal to 256'.format(name=name, location=location)
|
||||
super(NameTooLongException, self).__init__("ValidationException", message)
|
||||
|
||||
|
||||
class InvalidConfigurationRecorderNameException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'The configuration recorder name \'{name}\' is not valid, blank string.'.format(name=name)
|
||||
super(InvalidConfigurationRecorderNameException, self).__init__("InvalidConfigurationRecorderNameException",
|
||||
message)
|
||||
|
||||
|
||||
class MaxNumberOfConfigurationRecordersExceededException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to put configuration recorder \'{name}\' because the maximum number of ' \
|
||||
'configuration recorders: 1 is reached.'.format(name=name)
|
||||
super(MaxNumberOfConfigurationRecordersExceededException, self).__init__(
|
||||
"MaxNumberOfConfigurationRecordersExceededException", message)
|
||||
|
||||
|
||||
class InvalidRecordingGroupException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The recording group provided is not valid'
|
||||
super(InvalidRecordingGroupException, self).__init__("InvalidRecordingGroupException", message)
|
||||
|
||||
|
||||
class InvalidResourceTypeException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, bad_list, good_list):
|
||||
message = '{num} validation error detected: Value \'{bad_list}\' at ' \
|
||||
'\'configurationRecorder.recordingGroup.resourceTypes\' failed to satisfy constraint: ' \
|
||||
'Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]'.format(
|
||||
num=len(bad_list), bad_list=bad_list, good_list=good_list)
|
||||
# For PY2:
|
||||
message = str(message)
|
||||
|
||||
super(InvalidResourceTypeException, self).__init__("ValidationException", message)
|
||||
|
||||
|
||||
class NoSuchConfigurationRecorderException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Cannot find configuration recorder with the specified name \'{name}\'.'.format(name=name)
|
||||
super(NoSuchConfigurationRecorderException, self).__init__("NoSuchConfigurationRecorderException", message)
|
||||
|
||||
|
||||
class InvalidDeliveryChannelNameException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'The delivery channel name \'{name}\' is not valid, blank string.'.format(name=name)
|
||||
super(InvalidDeliveryChannelNameException, self).__init__("InvalidDeliveryChannelNameException",
|
||||
message)
|
||||
|
||||
|
||||
class NoSuchBucketException(JsonRESTError):
|
||||
"""We are *only* validating that there is value that is not '' here."""
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Cannot find a S3 bucket with an empty bucket name.'
|
||||
super(NoSuchBucketException, self).__init__("NoSuchBucketException", message)
|
||||
|
||||
|
||||
class InvalidS3KeyPrefixException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The s3 key prefix \'\' is not valid, empty s3 key prefix.'
|
||||
super(InvalidS3KeyPrefixException, self).__init__("InvalidS3KeyPrefixException", message)
|
||||
|
||||
|
||||
class InvalidSNSTopicARNException(JsonRESTError):
|
||||
"""We are *only* validating that there is value that is not '' here."""
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The sns topic arn \'\' is not valid.'
|
||||
super(InvalidSNSTopicARNException, self).__init__("InvalidSNSTopicARNException", message)
|
||||
|
||||
|
||||
class InvalidDeliveryFrequency(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, value, good_list):
|
||||
message = '1 validation error detected: Value \'{value}\' at ' \
|
||||
'\'deliveryChannel.configSnapshotDeliveryProperties.deliveryFrequency\' failed to satisfy ' \
|
||||
'constraint: Member must satisfy enum value set: {good_list}'.format(value=value, good_list=good_list)
|
||||
super(InvalidDeliveryFrequency, self).__init__("InvalidDeliveryFrequency", message)
|
||||
|
||||
|
||||
class MaxNumberOfDeliveryChannelsExceededException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to put delivery channel \'{name}\' because the maximum number of ' \
|
||||
'delivery channels: 1 is reached.'.format(name=name)
|
||||
super(MaxNumberOfDeliveryChannelsExceededException, self).__init__(
|
||||
"MaxNumberOfDeliveryChannelsExceededException", message)
|
||||
|
||||
|
||||
class NoSuchDeliveryChannelException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Cannot find delivery channel with specified name \'{name}\'.'.format(name=name)
|
||||
super(NoSuchDeliveryChannelException, self).__init__("NoSuchDeliveryChannelException", message)
|
||||
|
||||
|
||||
class NoAvailableConfigurationRecorderException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Configuration recorder is not available to put delivery channel.'
|
||||
super(NoAvailableConfigurationRecorderException, self).__init__("NoAvailableConfigurationRecorderException",
|
||||
message)
|
||||
|
||||
|
||||
class NoAvailableDeliveryChannelException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Delivery channel is not available to start configuration recorder.'
|
||||
super(NoAvailableDeliveryChannelException, self).__init__("NoAvailableDeliveryChannelException", message)
|
||||
|
||||
|
||||
class LastDeliveryChannelDeleteFailedException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to delete last specified delivery channel with name \'{name}\', because there, ' \
|
||||
'because there is a running configuration recorder.'.format(name=name)
|
||||
super(LastDeliveryChannelDeleteFailedException, self).__init__("LastDeliveryChannelDeleteFailedException", message)
|
335
moto/config/models.py
Normal file
335
moto/config/models.py
Normal file
@ -0,0 +1,335 @@
|
||||
import json
|
||||
import time
|
||||
import pkg_resources
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from boto3 import Session
|
||||
|
||||
from moto.config.exceptions import InvalidResourceTypeException, InvalidDeliveryFrequency, \
|
||||
InvalidConfigurationRecorderNameException, NameTooLongException, \
|
||||
MaxNumberOfConfigurationRecordersExceededException, InvalidRecordingGroupException, \
|
||||
NoSuchConfigurationRecorderException, NoAvailableConfigurationRecorderException, \
|
||||
InvalidDeliveryChannelNameException, NoSuchBucketException, InvalidS3KeyPrefixException, \
|
||||
InvalidSNSTopicARNException, MaxNumberOfDeliveryChannelsExceededException, NoAvailableDeliveryChannelException, \
|
||||
NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException
|
||||
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
|
||||
DEFAULT_ACCOUNT_ID = 123456789012
|
||||
|
||||
|
||||
def datetime2int(date):
|
||||
return int(time.mktime(date.timetuple()))
|
||||
|
||||
|
||||
def snake_to_camels(original):
|
||||
parts = original.split('_')
|
||||
|
||||
camel_cased = parts[0].lower() + ''.join(p.title() for p in parts[1:])
|
||||
camel_cased = camel_cased.replace('Arn', 'ARN') # Config uses 'ARN' instead of 'Arn'
|
||||
|
||||
return camel_cased
|
||||
|
||||
|
||||
class ConfigEmptyDictable(BaseModel):
|
||||
"""Base class to make serialization easy. This assumes that the sub-class will NOT return 'None's in the JSON."""
|
||||
|
||||
def to_dict(self):
|
||||
data = {}
|
||||
for item, value in self.__dict__.items():
|
||||
if value is not None:
|
||||
if isinstance(value, ConfigEmptyDictable):
|
||||
data[snake_to_camels(item)] = value.to_dict()
|
||||
else:
|
||||
data[snake_to_camels(item)] = value
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class ConfigRecorderStatus(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
self.recording = False
|
||||
self.last_start_time = None
|
||||
self.last_stop_time = None
|
||||
self.last_status = None
|
||||
self.last_error_code = None
|
||||
self.last_error_message = None
|
||||
self.last_status_change_time = None
|
||||
|
||||
def start(self):
|
||||
self.recording = True
|
||||
self.last_status = 'PENDING'
|
||||
self.last_start_time = datetime2int(datetime.utcnow())
|
||||
self.last_status_change_time = datetime2int(datetime.utcnow())
|
||||
|
||||
def stop(self):
|
||||
self.recording = False
|
||||
self.last_stop_time = datetime2int(datetime.utcnow())
|
||||
self.last_status_change_time = datetime2int(datetime.utcnow())
|
||||
|
||||
|
||||
class ConfigDeliverySnapshotProperties(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, delivery_frequency):
|
||||
self.delivery_frequency = delivery_frequency
|
||||
|
||||
|
||||
class ConfigDeliveryChannel(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, name, s3_bucket_name, prefix=None, sns_arn=None, snapshot_properties=None):
|
||||
self.name = name
|
||||
self.s3_bucket_name = s3_bucket_name
|
||||
self.s3_key_prefix = prefix
|
||||
self.sns_topic_arn = sns_arn
|
||||
self.config_snapshot_delivery_properties = snapshot_properties
|
||||
|
||||
|
||||
class RecordingGroup(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, all_supported=True, include_global_resource_types=False, resource_types=None):
|
||||
self.all_supported = all_supported
|
||||
self.include_global_resource_types = include_global_resource_types
|
||||
self.resource_types = resource_types
|
||||
|
||||
|
||||
class ConfigRecorder(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, role_arn, recording_group, name='default', status=None):
|
||||
self.name = name
|
||||
self.role_arn = role_arn
|
||||
self.recording_group = recording_group
|
||||
|
||||
if not status:
|
||||
self.status = ConfigRecorderStatus(name)
|
||||
else:
|
||||
self.status = status
|
||||
|
||||
|
||||
class ConfigBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
self.recorders = {}
|
||||
self.delivery_channels = {}
|
||||
|
||||
@staticmethod
|
||||
def _validate_resource_types(resource_list):
|
||||
# Load the service file:
|
||||
resource_package = 'botocore'
|
||||
resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json'))
|
||||
conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path))
|
||||
|
||||
# Verify that each entry exists in the supported list:
|
||||
bad_list = []
|
||||
for resource in resource_list:
|
||||
# For PY2:
|
||||
r_str = str(resource)
|
||||
|
||||
if r_str not in conifg_schema['shapes']['ResourceType']['enum']:
|
||||
bad_list.append(r_str)
|
||||
|
||||
if bad_list:
|
||||
raise InvalidResourceTypeException(bad_list, conifg_schema['shapes']['ResourceType']['enum'])
|
||||
|
||||
@staticmethod
|
||||
def _validate_delivery_snapshot_properties(properties):
|
||||
# Load the service file:
|
||||
resource_package = 'botocore'
|
||||
resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json'))
|
||||
conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path))
|
||||
|
||||
# Verify that the deliveryFrequency is set to an acceptable value:
|
||||
if properties.get('deliveryFrequency', None) not in \
|
||||
conifg_schema['shapes']['MaximumExecutionFrequency']['enum']:
|
||||
raise InvalidDeliveryFrequency(properties.get('deliveryFrequency', None),
|
||||
conifg_schema['shapes']['MaximumExecutionFrequency']['enum'])
|
||||
|
||||
def put_configuration_recorder(self, config_recorder):
|
||||
# Validate the name:
|
||||
if not config_recorder.get('name'):
|
||||
raise InvalidConfigurationRecorderNameException(config_recorder.get('name'))
|
||||
if len(config_recorder.get('name')) > 256:
|
||||
raise NameTooLongException(config_recorder.get('name'), 'configurationRecorder.name')
|
||||
|
||||
# We're going to assume that the passed in Role ARN is correct.
|
||||
|
||||
# Config currently only allows 1 configuration recorder for an account:
|
||||
if len(self.recorders) == 1 and not self.recorders.get(config_recorder['name']):
|
||||
raise MaxNumberOfConfigurationRecordersExceededException(config_recorder['name'])
|
||||
|
||||
# Is this updating an existing one?
|
||||
recorder_status = None
|
||||
if self.recorders.get(config_recorder['name']):
|
||||
recorder_status = self.recorders[config_recorder['name']].status
|
||||
|
||||
# Validate the Recording Group:
|
||||
if config_recorder.get('recordingGroup') is None:
|
||||
recording_group = RecordingGroup()
|
||||
else:
|
||||
rg = config_recorder['recordingGroup']
|
||||
|
||||
# If an empty dict is passed in, then bad:
|
||||
if not rg:
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Can't have both the resource types specified and the other flags as True.
|
||||
if rg.get('resourceTypes') and (
|
||||
rg.get('allSupported', False) or
|
||||
rg.get('includeGlobalResourceTypes', False)):
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Must supply resourceTypes if 'allSupported' is not supplied:
|
||||
if not rg.get('allSupported') and not rg.get('resourceTypes'):
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Validate that the list provided is correct:
|
||||
self._validate_resource_types(rg.get('resourceTypes', []))
|
||||
|
||||
recording_group = RecordingGroup(
|
||||
all_supported=rg.get('allSupported', True),
|
||||
include_global_resource_types=rg.get('includeGlobalResourceTypes', False),
|
||||
resource_types=rg.get('resourceTypes', [])
|
||||
)
|
||||
|
||||
self.recorders[config_recorder['name']] = \
|
||||
ConfigRecorder(config_recorder['roleARN'], recording_group, name=config_recorder['name'],
|
||||
status=recorder_status)
|
||||
|
||||
def describe_configuration_recorders(self, recorder_names):
|
||||
recorders = []
|
||||
|
||||
if recorder_names:
|
||||
for rn in recorder_names:
|
||||
if not self.recorders.get(rn):
|
||||
raise NoSuchConfigurationRecorderException(rn)
|
||||
|
||||
# Format the recorder:
|
||||
recorders.append(self.recorders[rn].to_dict())
|
||||
|
||||
else:
|
||||
for recorder in self.recorders.values():
|
||||
recorders.append(recorder.to_dict())
|
||||
|
||||
return recorders
|
||||
|
||||
def describe_configuration_recorder_status(self, recorder_names):
|
||||
recorders = []
|
||||
|
||||
if recorder_names:
|
||||
for rn in recorder_names:
|
||||
if not self.recorders.get(rn):
|
||||
raise NoSuchConfigurationRecorderException(rn)
|
||||
|
||||
# Format the recorder:
|
||||
recorders.append(self.recorders[rn].status.to_dict())
|
||||
|
||||
else:
|
||||
for recorder in self.recorders.values():
|
||||
recorders.append(recorder.status.to_dict())
|
||||
|
||||
return recorders
|
||||
|
||||
def put_delivery_channel(self, delivery_channel):
|
||||
# Must have a configuration recorder:
|
||||
if not self.recorders:
|
||||
raise NoAvailableConfigurationRecorderException()
|
||||
|
||||
# Validate the name:
|
||||
if not delivery_channel.get('name'):
|
||||
raise InvalidDeliveryChannelNameException(delivery_channel.get('name'))
|
||||
if len(delivery_channel.get('name')) > 256:
|
||||
raise NameTooLongException(delivery_channel.get('name'), 'deliveryChannel.name')
|
||||
|
||||
# We are going to assume that the bucket exists -- but will verify if the bucket provided is blank:
|
||||
if not delivery_channel.get('s3BucketName'):
|
||||
raise NoSuchBucketException()
|
||||
|
||||
# We are going to assume that the bucket has the correct policy attached to it. We are only going to verify
|
||||
# if the prefix provided is not an empty string:
|
||||
if delivery_channel.get('s3KeyPrefix', None) == '':
|
||||
raise InvalidS3KeyPrefixException()
|
||||
|
||||
# Ditto for SNS -- Only going to assume that the ARN provided is not an empty string:
|
||||
if delivery_channel.get('snsTopicARN', None) == '':
|
||||
raise InvalidSNSTopicARNException()
|
||||
|
||||
# Config currently only allows 1 delivery channel for an account:
|
||||
if len(self.delivery_channels) == 1 and not self.delivery_channels.get(delivery_channel['name']):
|
||||
raise MaxNumberOfDeliveryChannelsExceededException(delivery_channel['name'])
|
||||
|
||||
if not delivery_channel.get('configSnapshotDeliveryProperties'):
|
||||
dp = None
|
||||
|
||||
else:
|
||||
# Validate the config snapshot delivery properties:
|
||||
self._validate_delivery_snapshot_properties(delivery_channel['configSnapshotDeliveryProperties'])
|
||||
|
||||
dp = ConfigDeliverySnapshotProperties(
|
||||
delivery_channel['configSnapshotDeliveryProperties']['deliveryFrequency'])
|
||||
|
||||
self.delivery_channels[delivery_channel['name']] = \
|
||||
ConfigDeliveryChannel(delivery_channel['name'], delivery_channel['s3BucketName'],
|
||||
prefix=delivery_channel.get('s3KeyPrefix', None),
|
||||
sns_arn=delivery_channel.get('snsTopicARN', None),
|
||||
snapshot_properties=dp)
|
||||
|
||||
def describe_delivery_channels(self, channel_names):
|
||||
channels = []
|
||||
|
||||
if channel_names:
|
||||
for cn in channel_names:
|
||||
if not self.delivery_channels.get(cn):
|
||||
raise NoSuchDeliveryChannelException(cn)
|
||||
|
||||
# Format the delivery channel:
|
||||
channels.append(self.delivery_channels[cn].to_dict())
|
||||
|
||||
else:
|
||||
for channel in self.delivery_channels.values():
|
||||
channels.append(channel.to_dict())
|
||||
|
||||
return channels
|
||||
|
||||
def start_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
# Must have a delivery channel available as well:
|
||||
if not self.delivery_channels:
|
||||
raise NoAvailableDeliveryChannelException()
|
||||
|
||||
# Start recording:
|
||||
self.recorders[recorder_name].status.start()
|
||||
|
||||
def stop_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
# Stop recording:
|
||||
self.recorders[recorder_name].status.stop()
|
||||
|
||||
def delete_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
del self.recorders[recorder_name]
|
||||
|
||||
def delete_delivery_channel(self, channel_name):
|
||||
if not self.delivery_channels.get(channel_name):
|
||||
raise NoSuchDeliveryChannelException(channel_name)
|
||||
|
||||
# Check if a channel is recording -- if so, bad -- (there can only be 1 recorder):
|
||||
for recorder in self.recorders.values():
|
||||
if recorder.status.recording:
|
||||
raise LastDeliveryChannelDeleteFailedException(channel_name)
|
||||
|
||||
del self.delivery_channels[channel_name]
|
||||
|
||||
|
||||
config_backends = {}
|
||||
boto3_session = Session()
|
||||
for region in boto3_session.get_available_regions('config'):
|
||||
config_backends[region] = ConfigBackend()
|
53
moto/config/responses.py
Normal file
53
moto/config/responses.py
Normal file
@ -0,0 +1,53 @@
|
||||
import json
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import config_backends
|
||||
|
||||
|
||||
class ConfigResponse(BaseResponse):
|
||||
|
||||
@property
|
||||
def config_backend(self):
|
||||
return config_backends[self.region]
|
||||
|
||||
def put_configuration_recorder(self):
|
||||
self.config_backend.put_configuration_recorder(self._get_param('ConfigurationRecorder'))
|
||||
return ""
|
||||
|
||||
def describe_configuration_recorders(self):
|
||||
recorders = self.config_backend.describe_configuration_recorders(self._get_param('ConfigurationRecorderNames'))
|
||||
schema = {'ConfigurationRecorders': recorders}
|
||||
return json.dumps(schema)
|
||||
|
||||
def describe_configuration_recorder_status(self):
|
||||
recorder_statuses = self.config_backend.describe_configuration_recorder_status(
|
||||
self._get_param('ConfigurationRecorderNames'))
|
||||
schema = {'ConfigurationRecordersStatus': recorder_statuses}
|
||||
return json.dumps(schema)
|
||||
|
||||
def put_delivery_channel(self):
|
||||
self.config_backend.put_delivery_channel(self._get_param('DeliveryChannel'))
|
||||
return ""
|
||||
|
||||
def describe_delivery_channels(self):
|
||||
delivery_channels = self.config_backend.describe_delivery_channels(self._get_param('DeliveryChannelNames'))
|
||||
schema = {'DeliveryChannels': delivery_channels}
|
||||
return json.dumps(schema)
|
||||
|
||||
def describe_delivery_channel_status(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_delivery_channel(self):
|
||||
self.config_backend.delete_delivery_channel(self._get_param('DeliveryChannelName'))
|
||||
return ""
|
||||
|
||||
def delete_configuration_recorder(self):
|
||||
self.config_backend.delete_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
||||
|
||||
def start_configuration_recorder(self):
|
||||
self.config_backend.start_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
||||
|
||||
def stop_configuration_recorder(self):
|
||||
self.config_backend.stop_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
10
moto/config/urls.py
Normal file
10
moto/config/urls.py
Normal file
@ -0,0 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import ConfigResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://config.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
'{0}/$': ConfigResponse.dispatch,
|
||||
}
|
@ -66,6 +66,8 @@ class DynamoType(object):
|
||||
return int(self.value)
|
||||
except ValueError:
|
||||
return float(self.value)
|
||||
elif self.is_set():
|
||||
return set(self.value)
|
||||
else:
|
||||
return self.value
|
||||
|
||||
@ -509,15 +511,12 @@ class Table(BaseModel):
|
||||
elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif 'ComparisonOperator' in val:
|
||||
comparison_func = get_comparison_func(
|
||||
val['ComparisonOperator'])
|
||||
dynamo_types = [
|
||||
DynamoType(ele) for ele in
|
||||
val.get("AttributeValueList", [])
|
||||
]
|
||||
for t in dynamo_types:
|
||||
if not comparison_func(current_attr[key].value, t.value):
|
||||
raise ValueError('The conditional request failed')
|
||||
if not current_attr[key].compare(val['ComparisonOperator'], dynamo_types):
|
||||
raise ValueError('The conditional request failed')
|
||||
if range_value:
|
||||
self.items[hash_value][range_value] = item
|
||||
else:
|
||||
@ -946,15 +945,12 @@ class DynamoDBBackend(BaseBackend):
|
||||
elif 'Value' in val and DynamoType(val['Value']).value != item_attr[key].value:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif 'ComparisonOperator' in val:
|
||||
comparison_func = get_comparison_func(
|
||||
val['ComparisonOperator'])
|
||||
dynamo_types = [
|
||||
DynamoType(ele) for ele in
|
||||
val.get("AttributeValueList", [])
|
||||
]
|
||||
for t in dynamo_types:
|
||||
if not comparison_func(item_attr[key].value, t.value):
|
||||
raise ValueError('The conditional request failed')
|
||||
if not item_attr[key].compare(val['ComparisonOperator'], dynamo_types):
|
||||
raise ValueError('The conditional request failed')
|
||||
|
||||
# Update does not fail on new items, so create one
|
||||
if item is None:
|
||||
|
@ -31,6 +31,67 @@ def get_empty_str_error():
|
||||
))
|
||||
|
||||
|
||||
def condition_expression_to_expected(condition_expression, expression_attribute_names, expression_attribute_values):
|
||||
"""
|
||||
Limited condition expression syntax parsing.
|
||||
Supports Global Negation ex: NOT(inner expressions).
|
||||
Supports simple AND conditions ex: cond_a AND cond_b and cond_c.
|
||||
Atomic expressions supported are attribute_exists(key), attribute_not_exists(key) and #key = :value.
|
||||
"""
|
||||
expected = {}
|
||||
if condition_expression and 'OR' not in condition_expression:
|
||||
reverse_re = re.compile('^NOT\s*\((.*)\)$')
|
||||
reverse_m = reverse_re.match(condition_expression.strip())
|
||||
|
||||
reverse = False
|
||||
if reverse_m:
|
||||
reverse = True
|
||||
condition_expression = reverse_m.group(1)
|
||||
|
||||
cond_items = [c.strip() for c in condition_expression.split('AND')]
|
||||
if cond_items:
|
||||
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
|
||||
not_exists_re = re.compile(
|
||||
'^attribute_not_exists\s*\((.*)\)$')
|
||||
equals_re = re.compile('^(#?\w+)\s*=\s*(\:?\w+)')
|
||||
|
||||
for cond in cond_items:
|
||||
exists_m = exists_re.match(cond)
|
||||
not_exists_m = not_exists_re.match(cond)
|
||||
equals_m = equals_re.match(cond)
|
||||
|
||||
if exists_m:
|
||||
attribute_name = expression_attribute_names_lookup(exists_m.group(1), expression_attribute_names)
|
||||
expected[attribute_name] = {'Exists': True if not reverse else False}
|
||||
elif not_exists_m:
|
||||
attribute_name = expression_attribute_names_lookup(not_exists_m.group(1), expression_attribute_names)
|
||||
expected[attribute_name] = {'Exists': False if not reverse else True}
|
||||
elif equals_m:
|
||||
attribute_name = expression_attribute_names_lookup(equals_m.group(1), expression_attribute_names)
|
||||
attribute_value = expression_attribute_values_lookup(equals_m.group(2), expression_attribute_values)
|
||||
expected[attribute_name] = {
|
||||
'AttributeValueList': [attribute_value],
|
||||
'ComparisonOperator': 'EQ' if not reverse else 'NEQ'}
|
||||
|
||||
return expected
|
||||
|
||||
|
||||
def expression_attribute_names_lookup(attribute_name, expression_attribute_names):
|
||||
if attribute_name.startswith('#') and attribute_name in expression_attribute_names:
|
||||
return expression_attribute_names[attribute_name]
|
||||
else:
|
||||
return attribute_name
|
||||
|
||||
|
||||
def expression_attribute_values_lookup(attribute_value, expression_attribute_values):
|
||||
if isinstance(attribute_value, six.string_types) and \
|
||||
attribute_value.startswith(':') and\
|
||||
attribute_value in expression_attribute_values:
|
||||
return expression_attribute_values[attribute_value]
|
||||
else:
|
||||
return attribute_value
|
||||
|
||||
|
||||
class DynamoHandler(BaseResponse):
|
||||
|
||||
def get_endpoint_name(self, headers):
|
||||
@ -220,24 +281,13 @@ class DynamoHandler(BaseResponse):
|
||||
# expression
|
||||
if not expected:
|
||||
condition_expression = self.body.get('ConditionExpression')
|
||||
if condition_expression and 'OR' not in condition_expression:
|
||||
cond_items = [c.strip()
|
||||
for c in condition_expression.split('AND')]
|
||||
|
||||
if cond_items:
|
||||
expected = {}
|
||||
overwrite = False
|
||||
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
|
||||
not_exists_re = re.compile(
|
||||
'^attribute_not_exists\s*\((.*)\)$')
|
||||
|
||||
for cond in cond_items:
|
||||
exists_m = exists_re.match(cond)
|
||||
not_exists_m = not_exists_re.match(cond)
|
||||
if exists_m:
|
||||
expected[exists_m.group(1)] = {'Exists': True}
|
||||
elif not_exists_m:
|
||||
expected[not_exists_m.group(1)] = {'Exists': False}
|
||||
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
|
||||
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
|
||||
expected = condition_expression_to_expected(condition_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
if expected:
|
||||
overwrite = False
|
||||
|
||||
try:
|
||||
result = self.dynamodb_backend.put_item(name, item, expected, overwrite)
|
||||
@ -590,23 +640,11 @@ class DynamoHandler(BaseResponse):
|
||||
# expression
|
||||
if not expected:
|
||||
condition_expression = self.body.get('ConditionExpression')
|
||||
if condition_expression and 'OR' not in condition_expression:
|
||||
cond_items = [c.strip()
|
||||
for c in condition_expression.split('AND')]
|
||||
|
||||
if cond_items:
|
||||
expected = {}
|
||||
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
|
||||
not_exists_re = re.compile(
|
||||
'^attribute_not_exists\s*\((.*)\)$')
|
||||
|
||||
for cond in cond_items:
|
||||
exists_m = exists_re.match(cond)
|
||||
not_exists_m = not_exists_re.match(cond)
|
||||
if exists_m:
|
||||
expected[exists_m.group(1)] = {'Exists': True}
|
||||
elif not_exists_m:
|
||||
expected[not_exists_m.group(1)] = {'Exists': False}
|
||||
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
|
||||
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
|
||||
expected = condition_expression_to_expected(condition_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
|
||||
# Support spaces between operators in an update expression
|
||||
# E.g. `a = b + c` -> `a=b+c`
|
||||
|
57
moto/ec2/models.py
Executable file → Normal file
57
moto/ec2/models.py
Executable file → Normal file
@ -388,6 +388,7 @@ class Instance(TaggedEC2Resource, BotoInstance):
|
||||
self.ebs_optimized = kwargs.get("ebs_optimized", False)
|
||||
self.source_dest_check = "true"
|
||||
self.launch_time = utc_date_and_time()
|
||||
self.ami_launch_index = kwargs.get("ami_launch_index", 0)
|
||||
self.disable_api_termination = kwargs.get("disable_api_termination", False)
|
||||
self._spot_fleet_id = kwargs.get("spot_fleet_id", None)
|
||||
associate_public_ip = kwargs.get("associate_public_ip", False)
|
||||
@ -719,6 +720,7 @@ class InstanceBackend(object):
|
||||
instance_tags = tags.get('instance', {})
|
||||
|
||||
for index in range(count):
|
||||
kwargs["ami_launch_index"] = index
|
||||
new_instance = Instance(
|
||||
self,
|
||||
image_id,
|
||||
@ -2464,7 +2466,7 @@ class SubnetBackend(object):
|
||||
default_for_az, map_public_ip_on_launch)
|
||||
|
||||
# AWS associates a new subnet with the default Network ACL
|
||||
self.associate_default_network_acl_with_subnet(subnet_id)
|
||||
self.associate_default_network_acl_with_subnet(subnet_id, vpc_id)
|
||||
self.subnets[availability_zone][subnet_id] = subnet
|
||||
return subnet
|
||||
|
||||
@ -2879,7 +2881,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
|
||||
def __init__(self, ec2_backend, spot_request_id, price, image_id, type,
|
||||
valid_from, valid_until, launch_group, availability_zone_group,
|
||||
key_name, security_groups, user_data, instance_type, placement,
|
||||
kernel_id, ramdisk_id, monitoring_enabled, subnet_id, spot_fleet_id,
|
||||
kernel_id, ramdisk_id, monitoring_enabled, subnet_id, tags, spot_fleet_id,
|
||||
**kwargs):
|
||||
super(SpotInstanceRequest, self).__init__(**kwargs)
|
||||
ls = LaunchSpecification()
|
||||
@ -2903,6 +2905,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
|
||||
ls.monitored = monitoring_enabled
|
||||
ls.subnet_id = subnet_id
|
||||
self.spot_fleet_id = spot_fleet_id
|
||||
self.tags = tags
|
||||
|
||||
if security_groups:
|
||||
for group_name in security_groups:
|
||||
@ -2936,6 +2939,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
|
||||
security_group_names=[],
|
||||
security_group_ids=self.launch_specification.groups,
|
||||
spot_fleet_id=self.spot_fleet_id,
|
||||
tags=self.tags,
|
||||
)
|
||||
instance = reservation.instances[0]
|
||||
return instance
|
||||
@ -2951,15 +2955,16 @@ class SpotRequestBackend(object):
|
||||
valid_until, launch_group, availability_zone_group,
|
||||
key_name, security_groups, user_data,
|
||||
instance_type, placement, kernel_id, ramdisk_id,
|
||||
monitoring_enabled, subnet_id, spot_fleet_id=None):
|
||||
monitoring_enabled, subnet_id, tags=None, spot_fleet_id=None):
|
||||
requests = []
|
||||
tags = tags or {}
|
||||
for _ in range(count):
|
||||
spot_request_id = random_spot_request_id()
|
||||
request = SpotInstanceRequest(self,
|
||||
spot_request_id, price, image_id, type, valid_from, valid_until,
|
||||
launch_group, availability_zone_group, key_name, security_groups,
|
||||
user_data, instance_type, placement, kernel_id, ramdisk_id,
|
||||
monitoring_enabled, subnet_id, spot_fleet_id)
|
||||
monitoring_enabled, subnet_id, tags, spot_fleet_id)
|
||||
self.spot_instance_requests[spot_request_id] = request
|
||||
requests.append(request)
|
||||
return requests
|
||||
@ -2979,8 +2984,8 @@ class SpotRequestBackend(object):
|
||||
|
||||
class SpotFleetLaunchSpec(object):
|
||||
def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id,
|
||||
instance_type, key_name, monitoring, spot_price, subnet_id, user_data,
|
||||
weighted_capacity):
|
||||
instance_type, key_name, monitoring, spot_price, subnet_id, tag_specifications,
|
||||
user_data, weighted_capacity):
|
||||
self.ebs_optimized = ebs_optimized
|
||||
self.group_set = group_set
|
||||
self.iam_instance_profile = iam_instance_profile
|
||||
@ -2990,6 +2995,7 @@ class SpotFleetLaunchSpec(object):
|
||||
self.monitoring = monitoring
|
||||
self.spot_price = spot_price
|
||||
self.subnet_id = subnet_id
|
||||
self.tag_specifications = tag_specifications
|
||||
self.user_data = user_data
|
||||
self.weighted_capacity = float(weighted_capacity)
|
||||
|
||||
@ -3020,6 +3026,7 @@ class SpotFleetRequest(TaggedEC2Resource):
|
||||
monitoring=spec.get('monitoring._enabled'),
|
||||
spot_price=spec.get('spot_price', self.spot_price),
|
||||
subnet_id=spec['subnet_id'],
|
||||
tag_specifications=self._parse_tag_specifications(spec),
|
||||
user_data=spec.get('user_data'),
|
||||
weighted_capacity=spec['weighted_capacity'],
|
||||
)
|
||||
@ -3102,6 +3109,7 @@ class SpotFleetRequest(TaggedEC2Resource):
|
||||
monitoring_enabled=launch_spec.monitoring,
|
||||
subnet_id=launch_spec.subnet_id,
|
||||
spot_fleet_id=self.id,
|
||||
tags=launch_spec.tag_specifications,
|
||||
)
|
||||
self.spot_requests.extend(requests)
|
||||
self.fulfilled_capacity += added_weight
|
||||
@ -3124,6 +3132,25 @@ class SpotFleetRequest(TaggedEC2Resource):
|
||||
self.spot_requests = [req for req in self.spot_requests if req.instance.id not in instance_ids]
|
||||
self.ec2_backend.terminate_instances(instance_ids)
|
||||
|
||||
def _parse_tag_specifications(self, spec):
|
||||
try:
|
||||
tag_spec_num = max([int(key.split('.')[1]) for key in spec if key.startswith("tag_specification_set")])
|
||||
except ValueError: # no tag specifications
|
||||
return {}
|
||||
|
||||
tag_specifications = {}
|
||||
for si in range(1, tag_spec_num + 1):
|
||||
resource_type = spec["tag_specification_set.{si}._resource_type".format(si=si)]
|
||||
|
||||
tags = [key for key in spec if key.startswith("tag_specification_set.{si}._tag".format(si=si))]
|
||||
tag_num = max([int(key.split('.')[3]) for key in tags])
|
||||
tag_specifications[resource_type] = dict((
|
||||
spec["tag_specification_set.{si}._tag.{ti}._key".format(si=si, ti=ti)],
|
||||
spec["tag_specification_set.{si}._tag.{ti}._value".format(si=si, ti=ti)],
|
||||
) for ti in range(1, tag_num + 1))
|
||||
|
||||
return tag_specifications
|
||||
|
||||
|
||||
class SpotFleetBackend(object):
|
||||
def __init__(self):
|
||||
@ -3560,8 +3587,22 @@ class NetworkAclBackend(object):
|
||||
self.get_vpc(vpc_id)
|
||||
network_acl = NetworkAcl(self, network_acl_id, vpc_id, default)
|
||||
self.network_acls[network_acl_id] = network_acl
|
||||
if default:
|
||||
self.add_default_entries(network_acl_id)
|
||||
return network_acl
|
||||
|
||||
def add_default_entries(self, network_acl_id):
|
||||
default_acl_entries = [
|
||||
{'rule_number': 100, 'rule_action': 'allow', 'egress': 'true'},
|
||||
{'rule_number': 32767, 'rule_action': 'deny', 'egress': 'true'},
|
||||
{'rule_number': 100, 'rule_action': 'allow', 'egress': 'false'},
|
||||
{'rule_number': 32767, 'rule_action': 'deny', 'egress': 'false'}
|
||||
]
|
||||
for entry in default_acl_entries:
|
||||
self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1',
|
||||
rule_action=entry['rule_action'], egress=entry['egress'], cidr_block='0.0.0.0/0',
|
||||
icmp_code=None, icmp_type=None, port_range_from=None, port_range_to=None)
|
||||
|
||||
def get_all_network_acls(self, network_acl_ids=None, filters=None):
|
||||
network_acls = self.network_acls.values()
|
||||
|
||||
@ -3636,9 +3677,9 @@ class NetworkAclBackend(object):
|
||||
new_acl.associations[new_assoc_id] = association
|
||||
return association
|
||||
|
||||
def associate_default_network_acl_with_subnet(self, subnet_id):
|
||||
def associate_default_network_acl_with_subnet(self, subnet_id, vpc_id):
|
||||
association_id = random_network_acl_subnet_association_id()
|
||||
acl = next(acl for acl in self.network_acls.values() if acl.default)
|
||||
acl = next(acl for acl in self.network_acls.values() if acl.default and acl.vpc_id == vpc_id)
|
||||
acl.associations[association_id] = NetworkAclAssociation(self, association_id,
|
||||
subnet_id, acl.id)
|
||||
|
||||
|
@ -150,16 +150,18 @@ CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.co
|
||||
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
|
||||
<status>creating</status>
|
||||
<createTime>{{ volume.create_time}}</createTime>
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% if volume.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
<volumeType>standard</volumeType>
|
||||
</CreateVolumeResponse>"""
|
||||
|
||||
@ -191,16 +193,18 @@ DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazon
|
||||
</item>
|
||||
{% endif %}
|
||||
</attachmentSet>
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% if volume.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
<volumeType>standard</volumeType>
|
||||
</item>
|
||||
{% endfor %}
|
||||
|
@ -244,7 +244,7 @@ EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc
|
||||
<reason/>
|
||||
<keyName>{{ instance.key_name }}</keyName>
|
||||
<ebsOptimized>{{ instance.ebs_optimized }}</ebsOptimized>
|
||||
<amiLaunchIndex>0</amiLaunchIndex>
|
||||
<amiLaunchIndex>{{ instance.ami_launch_index }}</amiLaunchIndex>
|
||||
<instanceType>{{ instance.instance_type }}</instanceType>
|
||||
<launchTime>{{ instance.launch_time }}</launchTime>
|
||||
<placement>
|
||||
@ -384,7 +384,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
|
||||
<reason>{{ instance._reason }}</reason>
|
||||
<keyName>{{ instance.key_name }}</keyName>
|
||||
<ebsOptimized>{{ instance.ebs_optimized }}</ebsOptimized>
|
||||
<amiLaunchIndex>0</amiLaunchIndex>
|
||||
<amiLaunchIndex>{{ instance.ami_launch_index }}</amiLaunchIndex>
|
||||
<productCodes/>
|
||||
<instanceType>{{ instance.instance_type }}</instanceType>
|
||||
<launchTime>{{ instance.launch_time }}</launchTime>
|
||||
|
@ -107,6 +107,21 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """<DescribeSpotFleetRequestsResponse xmlns="http
|
||||
</item>
|
||||
{% endfor %}
|
||||
</groupSet>
|
||||
<tagSpecificationSet>
|
||||
{% for resource_type in launch_spec.tag_specifications %}
|
||||
<item>
|
||||
<resourceType>{{ resource_type }}</resourceType>
|
||||
<tag>
|
||||
{% for key, value in launch_spec.tag_specifications[resource_type].items() %}
|
||||
<item>
|
||||
<key>{{ key }}</key>
|
||||
<value>{{ value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tag>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSpecificationSet>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</launchSpecifications>
|
||||
|
@ -3,6 +3,7 @@ from .responses import ECRResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://ecr.(.+).amazonaws.com",
|
||||
"https?://api.ecr.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
|
@ -32,3 +32,48 @@ class MalformedCertificate(RESTError):
|
||||
def __init__(self, cert):
|
||||
super(MalformedCertificate, self).__init__(
|
||||
'MalformedCertificate', 'Certificate {cert} is malformed'.format(cert=cert))
|
||||
|
||||
|
||||
class DuplicateTags(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super(DuplicateTags, self).__init__(
|
||||
'InvalidInput', 'Duplicate tag keys found. Please note that Tag keys are case insensitive.')
|
||||
|
||||
|
||||
class TagKeyTooBig(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tag, param='tags.X.member.key'):
|
||||
super(TagKeyTooBig, self).__init__(
|
||||
'ValidationError', "1 validation error detected: Value '{}' at '{}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 128.".format(tag, param))
|
||||
|
||||
|
||||
class TagValueTooBig(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tag):
|
||||
super(TagValueTooBig, self).__init__(
|
||||
'ValidationError', "1 validation error detected: Value '{}' at 'tags.X.member.value' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 256.".format(tag))
|
||||
|
||||
|
||||
class InvalidTagCharacters(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tag, param='tags.X.member.key'):
|
||||
message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format(tag, param)
|
||||
message += "constraint: Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+"
|
||||
|
||||
super(InvalidTagCharacters, self).__init__('ValidationError', message)
|
||||
|
||||
|
||||
class TooManyTags(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tags, param='tags'):
|
||||
super(TooManyTags, self).__init__(
|
||||
'ValidationError', "1 validation error detected: Value '{}' at '{}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 50.".format(tags, param))
|
||||
|
@ -3,6 +3,7 @@ import base64
|
||||
import sys
|
||||
from datetime import datetime
|
||||
import json
|
||||
import re
|
||||
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
@ -12,7 +13,8 @@ from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import iso_8601_datetime_without_milliseconds
|
||||
|
||||
from .aws_managed_policies import aws_managed_policies_data
|
||||
from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException, MalformedCertificate
|
||||
from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException, MalformedCertificate, \
|
||||
DuplicateTags, TagKeyTooBig, InvalidTagCharacters, TooManyTags, TagValueTooBig
|
||||
from .utils import random_access_key, random_alphanumeric, random_resource_id, random_policy_id
|
||||
|
||||
ACCOUNT_ID = 123456789012
|
||||
@ -32,7 +34,6 @@ class MFADevice(object):
|
||||
|
||||
|
||||
class Policy(BaseModel):
|
||||
|
||||
is_attachable = False
|
||||
|
||||
def __init__(self,
|
||||
@ -132,6 +133,8 @@ class Role(BaseModel):
|
||||
self.policies = {}
|
||||
self.managed_policies = {}
|
||||
self.create_date = datetime.now(pytz.utc)
|
||||
self.tags = {}
|
||||
self.description = ""
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
@ -175,6 +178,9 @@ class Role(BaseModel):
|
||||
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
def get_tags(self):
|
||||
return [self.tags[tag] for tag in self.tags]
|
||||
|
||||
|
||||
class InstanceProfile(BaseModel):
|
||||
|
||||
@ -468,6 +474,16 @@ class IAMBackend(BaseBackend):
|
||||
policy = arns[policy_arn]
|
||||
policy.attach_to(self.get_role(role_name))
|
||||
|
||||
def update_role_description(self, role_name, role_description):
|
||||
role = self.get_role(role_name)
|
||||
role.description = role_description
|
||||
return role
|
||||
|
||||
def update_role(self, role_name, role_description):
|
||||
role = self.get_role(role_name)
|
||||
role.description = role_description
|
||||
return role
|
||||
|
||||
def detach_role_policy(self, policy_arn, role_name):
|
||||
arns = dict((p.arn, p) for p in self.managed_policies.values())
|
||||
try:
|
||||
@ -614,6 +630,86 @@ class IAMBackend(BaseBackend):
|
||||
role = self.get_role(role_name)
|
||||
return role.policies.keys()
|
||||
|
||||
def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'):
|
||||
"""Validates the tag key.
|
||||
|
||||
:param all_tags: Dict to check if there is a duplicate tag.
|
||||
:param tag_key: The tag key to check against.
|
||||
:param exception_param: The exception parameter to send over to help format the message. This is to reflect
|
||||
the difference between the tag and untag APIs.
|
||||
:return:
|
||||
"""
|
||||
# Validate that the key length is correct:
|
||||
if len(tag_key) > 128:
|
||||
raise TagKeyTooBig(tag_key, param=exception_param)
|
||||
|
||||
# Validate that the tag key fits the proper Regex:
|
||||
# [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+
|
||||
match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key)
|
||||
# Kudos if you can come up with a better way of doing a global search :)
|
||||
if not len(match) or len(match[0]) < len(tag_key):
|
||||
raise InvalidTagCharacters(tag_key, param=exception_param)
|
||||
|
||||
def _check_tag_duplicate(self, all_tags, tag_key):
|
||||
"""Validates that a tag key is not a duplicate
|
||||
|
||||
:param all_tags: Dict to check if there is a duplicate tag.
|
||||
:param tag_key: The tag key to check against.
|
||||
:return:
|
||||
"""
|
||||
if tag_key in all_tags:
|
||||
raise DuplicateTags()
|
||||
|
||||
def list_role_tags(self, role_name, marker, max_items=100):
|
||||
role = self.get_role(role_name)
|
||||
|
||||
max_items = int(max_items)
|
||||
tag_index = sorted(role.tags)
|
||||
start_idx = int(marker) if marker else 0
|
||||
|
||||
tag_index = tag_index[start_idx:start_idx + max_items]
|
||||
|
||||
if len(role.tags) <= (start_idx + max_items):
|
||||
marker = None
|
||||
else:
|
||||
marker = str(start_idx + max_items)
|
||||
|
||||
# Make the tag list of dict's:
|
||||
tags = [role.tags[tag] for tag in tag_index]
|
||||
|
||||
return tags, marker
|
||||
|
||||
def tag_role(self, role_name, tags):
|
||||
if len(tags) > 50:
|
||||
raise TooManyTags(tags)
|
||||
|
||||
role = self.get_role(role_name)
|
||||
|
||||
tag_keys = {}
|
||||
for tag in tags:
|
||||
# Need to index by the lowercase tag key since the keys are case insensitive, but their case is retained.
|
||||
ref_key = tag['Key'].lower()
|
||||
self._check_tag_duplicate(tag_keys, ref_key)
|
||||
self._validate_tag_key(tag['Key'])
|
||||
if len(tag['Value']) > 256:
|
||||
raise TagValueTooBig(tag['Value'])
|
||||
|
||||
tag_keys[ref_key] = tag
|
||||
|
||||
role.tags.update(tag_keys)
|
||||
|
||||
def untag_role(self, role_name, tag_keys):
|
||||
if len(tag_keys) > 50:
|
||||
raise TooManyTags(tag_keys, param='tagKeys')
|
||||
|
||||
role = self.get_role(role_name)
|
||||
|
||||
for key in tag_keys:
|
||||
ref_key = key.lower()
|
||||
self._validate_tag_key(key, exception_param='tagKeys')
|
||||
|
||||
role.tags.pop(ref_key, None)
|
||||
|
||||
def create_policy_version(self, policy_arn, policy_document, set_as_default):
|
||||
policy = self.get_policy(policy_arn)
|
||||
if not policy:
|
||||
@ -796,6 +892,16 @@ class IAMBackend(BaseBackend):
|
||||
|
||||
return users
|
||||
|
||||
def list_roles(self, path_prefix, marker, max_items):
|
||||
roles = None
|
||||
try:
|
||||
roles = self.roles.values()
|
||||
except KeyError:
|
||||
raise IAMNotFoundException(
|
||||
"Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items))
|
||||
|
||||
return roles
|
||||
|
||||
def upload_signing_certificate(self, user_name, body):
|
||||
user = self.get_user(user_name)
|
||||
cert_id = random_resource_id(size=32)
|
||||
|
@ -107,6 +107,69 @@ class IamResponse(BaseResponse):
|
||||
template = self.response_template(LIST_POLICIES_TEMPLATE)
|
||||
return template.render(policies=policies, marker=marker)
|
||||
|
||||
def list_entities_for_policy(self):
|
||||
policy_arn = self._get_param('PolicyArn')
|
||||
|
||||
# Options 'User'|'Role'|'Group'|'LocalManagedPolicy'|'AWSManagedPolicy
|
||||
entity = self._get_param('EntityFilter')
|
||||
path_prefix = self._get_param('PathPrefix')
|
||||
# policy_usage_filter = self._get_param('PolicyUsageFilter')
|
||||
marker = self._get_param('Marker')
|
||||
max_items = self._get_param('MaxItems')
|
||||
|
||||
entity_roles = []
|
||||
entity_groups = []
|
||||
entity_users = []
|
||||
|
||||
if entity == 'User':
|
||||
users = iam_backend.list_users(path_prefix, marker, max_items)
|
||||
if users:
|
||||
for user in users:
|
||||
for p in user.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_users.append(user.name)
|
||||
|
||||
elif entity == 'Role':
|
||||
roles = iam_backend.list_roles(path_prefix, marker, max_items)
|
||||
if roles:
|
||||
for role in roles:
|
||||
for p in role.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_roles.append(role.name)
|
||||
|
||||
elif entity == 'Group':
|
||||
groups = iam_backend.list_groups()
|
||||
if groups:
|
||||
for group in groups:
|
||||
for p in group.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_groups.append(group.name)
|
||||
|
||||
elif entity == 'LocalManagedPolicy' or entity == 'AWSManagedPolicy':
|
||||
users = iam_backend.list_users(path_prefix, marker, max_items)
|
||||
if users:
|
||||
for user in users:
|
||||
for p in user.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_users.append(user.name)
|
||||
|
||||
roles = iam_backend.list_roles(path_prefix, marker, max_items)
|
||||
if roles:
|
||||
for role in roles:
|
||||
for p in role.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_roles.append(role.name)
|
||||
|
||||
groups = iam_backend.list_groups()
|
||||
if groups:
|
||||
for group in groups:
|
||||
for p in group.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_groups.append(group.name)
|
||||
|
||||
template = self.response_template(LIST_ENTITIES_FOR_POLICY_TEMPLATE)
|
||||
return template.render(roles=entity_roles, users=entity_users, groups=entity_groups)
|
||||
|
||||
def create_role(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
path = self._get_param('Path')
|
||||
@ -169,6 +232,20 @@ class IamResponse(BaseResponse):
|
||||
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
|
||||
return template.render(name="UpdateAssumeRolePolicyResponse")
|
||||
|
||||
def update_role_description(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
description = self._get_param('Description')
|
||||
role = iam_backend.update_role_description(role_name, description)
|
||||
template = self.response_template(UPDATE_ROLE_DESCRIPTION_TEMPLATE)
|
||||
return template.render(role=role)
|
||||
|
||||
def update_role(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
description = self._get_param('Description')
|
||||
role = iam_backend.update_role(role_name, description)
|
||||
template = self.response_template(UPDATE_ROLE_TEMPLATE)
|
||||
return template.render(role=role)
|
||||
|
||||
def create_policy_version(self):
|
||||
policy_arn = self._get_param('PolicyArn')
|
||||
policy_document = self._get_param('PolicyDocument')
|
||||
@ -554,7 +631,8 @@ class IamResponse(BaseResponse):
|
||||
policies=account_details['managed_policies'],
|
||||
users=account_details['users'],
|
||||
groups=account_details['groups'],
|
||||
roles=account_details['roles']
|
||||
roles=account_details['roles'],
|
||||
get_groups_for_user=iam_backend.get_groups_for_user
|
||||
)
|
||||
|
||||
def create_saml_provider(self):
|
||||
@ -625,6 +703,65 @@ class IamResponse(BaseResponse):
|
||||
template = self.response_template(LIST_SIGNING_CERTIFICATES_TEMPLATE)
|
||||
return template.render(user_name=user_name, certificates=certs)
|
||||
|
||||
def list_role_tags(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
marker = self._get_param('Marker')
|
||||
max_items = self._get_param('MaxItems', 100)
|
||||
|
||||
tags, marker = iam_backend.list_role_tags(role_name, marker, max_items)
|
||||
|
||||
template = self.response_template(LIST_ROLE_TAG_TEMPLATE)
|
||||
return template.render(tags=tags, marker=marker)
|
||||
|
||||
def tag_role(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
tags = self._get_multi_param('Tags.member')
|
||||
|
||||
iam_backend.tag_role(role_name, tags)
|
||||
|
||||
template = self.response_template(TAG_ROLE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def untag_role(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
tag_keys = self._get_multi_param('TagKeys.member')
|
||||
|
||||
iam_backend.untag_role(role_name, tag_keys)
|
||||
|
||||
template = self.response_template(UNTAG_ROLE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
|
||||
LIST_ENTITIES_FOR_POLICY_TEMPLATE = """<ListEntitiesForPolicyResponse>
|
||||
<ListEntitiesForPolicyResult>
|
||||
<PolicyRoles>
|
||||
{% for role in roles %}
|
||||
<member>
|
||||
<RoleName>{{ role }}</RoleName>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</PolicyRoles>
|
||||
<PolicyGroups>
|
||||
{% for group in groups %}
|
||||
<member>
|
||||
<GroupName>{{ group }}</GroupName>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</PolicyGroups>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<PolicyUsers>
|
||||
{% for user in users %}
|
||||
<member>
|
||||
<UserName>{{ user }}</UserName>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</PolicyUsers>
|
||||
</ListEntitiesForPolicyResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>eb358e22-9d1f-11e4-93eb-190ecEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListEntitiesForPolicyResponse>"""
|
||||
|
||||
|
||||
ATTACH_ROLE_POLICY_TEMPLATE = """<AttachRolePolicyResponse>
|
||||
<ResponseMetadata>
|
||||
@ -869,6 +1006,40 @@ GET_ROLE_POLICY_TEMPLATE = """<GetRolePolicyResponse xmlns="https://iam.amazonaw
|
||||
</ResponseMetadata>
|
||||
</GetRolePolicyResponse>"""
|
||||
|
||||
UPDATE_ROLE_TEMPLATE = """<UpdateRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<UpdateRoleResult>
|
||||
</UpdateRoleResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateRoleResponse>"""
|
||||
|
||||
UPDATE_ROLE_DESCRIPTION_TEMPLATE = """<UpdateRoleDescriptionResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<UpdateRoleDescriptionResult>
|
||||
<Role>
|
||||
<Path>{{ role.path }}</Path>
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>{{ role.create_date.isoformat() }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
{% if role.tags %}
|
||||
<Tags>
|
||||
{% for tag in role.get_tags() %}
|
||||
<member>
|
||||
<Key>{{ tag['Key'] }}</Key>
|
||||
<Value>{{ tag['Value'] }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
{% endif %}
|
||||
</Role>
|
||||
</UpdateRoleDescriptionResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateRoleDescriptionResponse>"""
|
||||
|
||||
GET_ROLE_TEMPLATE = """<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<GetRoleResult>
|
||||
<Role>
|
||||
@ -878,6 +1049,16 @@ GET_ROLE_TEMPLATE = """<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/201
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
{% if role.tags %}
|
||||
<Tags>
|
||||
{% for tag in role.get_tags() %}
|
||||
<member>
|
||||
<Key>{{ tag['Key'] }}</Key>
|
||||
<Value>{{ tag['Value'] }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
{% endif %}
|
||||
</Role>
|
||||
</GetRoleResult>
|
||||
<ResponseMetadata>
|
||||
@ -1461,8 +1642,19 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
|
||||
<UserDetailList>
|
||||
{% for user in users %}
|
||||
<member>
|
||||
<GroupList />
|
||||
<AttachedManagedPolicies/>
|
||||
<GroupList>
|
||||
{% for group in get_groups_for_user(user.name) %}
|
||||
<member>{{ group.name }}</member>
|
||||
{% endfor %}
|
||||
</GroupList>
|
||||
<AttachedManagedPolicies>
|
||||
{% for policy in user.managed_policies %}
|
||||
<member>
|
||||
<PolicyName>{{ user.managed_policies[policy].name }}</PolicyName>
|
||||
<PolicyArn>{{ policy }}</PolicyArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AttachedManagedPolicies>
|
||||
<UserId>{{ user.id }}</UserId>
|
||||
<Path>{{ user.path }}</Path>
|
||||
<UserName>{{ user.name }}</UserName>
|
||||
@ -1476,33 +1668,55 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
|
||||
<member>
|
||||
<GroupId>{{ group.id }}</GroupId>
|
||||
<AttachedManagedPolicies>
|
||||
{% for policy in group.managed_policies %}
|
||||
<member>
|
||||
<PolicyName>{{ policy.name }}</PolicyName>
|
||||
<PolicyArn>{{ policy.arn }}</PolicyArn>
|
||||
</member>
|
||||
{% for policy_arn in group.managed_policies %}
|
||||
<member>
|
||||
<PolicyName>{{ group.managed_policies[policy_arn].name }}</PolicyName>
|
||||
<PolicyArn>{{ policy_arn }}</PolicyArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AttachedManagedPolicies>
|
||||
<GroupName>{{ group.name }}</GroupName>
|
||||
<Path>{{ group.path }}</Path>
|
||||
<Arn>{{ group.arn }}</Arn>
|
||||
<CreateDate>{{ group.create_date }}</CreateDate>
|
||||
<GroupPolicyList/>
|
||||
<GroupPolicyList>
|
||||
{% for policy in group.policies %}
|
||||
<member>
|
||||
<PolicyName>{{ policy }}</PolicyName>
|
||||
<PolicyDocument>{{ group.get_policy(policy) }}</PolicyDocument>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</GroupPolicyList>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</GroupDetailList>
|
||||
<RoleDetailList>
|
||||
{% for role in roles %}
|
||||
<member>
|
||||
<RolePolicyList/>
|
||||
<AttachedManagedPolicies>
|
||||
{% for policy in role.managed_policies %}
|
||||
<RolePolicyList>
|
||||
{% for inline_policy in role.policies %}
|
||||
<member>
|
||||
<PolicyName>{{ policy.name }}</PolicyName>
|
||||
<PolicyArn>{{ policy.arn }}</PolicyArn>
|
||||
<PolicyName>{{ inline_policy }}</PolicyName>
|
||||
<PolicyDocument>{{ role.policies[inline_policy] }}</PolicyDocument>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</RolePolicyList>
|
||||
<AttachedManagedPolicies>
|
||||
{% for policy_arn in role.managed_policies %}
|
||||
<member>
|
||||
<PolicyName>{{ role.managed_policies[policy_arn].name }}</PolicyName>
|
||||
<PolicyArn>{{ policy_arn }}</PolicyArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AttachedManagedPolicies>
|
||||
<Tags>
|
||||
{% for tag in role.get_tags() %}
|
||||
<member>
|
||||
<Key>{{ tag['Key'] }}</Key>
|
||||
<Value>{{ tag['Value'] }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
<InstanceProfileList>
|
||||
{% for profile in instance_profiles %}
|
||||
<member>
|
||||
@ -1543,19 +1757,14 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
|
||||
<PolicyId>{{ policy.id }}</PolicyId>
|
||||
<Path>{{ policy.path }}</Path>
|
||||
<PolicyVersionList>
|
||||
{% for policy_version in policy.versions %}
|
||||
<member>
|
||||
<Document>
|
||||
{"Version":"2012-10-17","Statement":{"Effect":"Allow",
|
||||
"Action":["iam:CreatePolicy","iam:CreatePolicyVersion",
|
||||
"iam:DeletePolicy","iam:DeletePolicyVersion","iam:GetPolicy",
|
||||
"iam:GetPolicyVersion","iam:ListPolicies",
|
||||
"iam:ListPolicyVersions","iam:SetDefaultPolicyVersion"],
|
||||
"Resource":"*"}}
|
||||
</Document>
|
||||
<IsDefaultVersion>true</IsDefaultVersion>
|
||||
<VersionId>v1</VersionId>
|
||||
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
|
||||
<Document>{{ policy_version.document }}</Document>
|
||||
<IsDefaultVersion>{{ policy_version.is_default }}</IsDefaultVersion>
|
||||
<VersionId>{{ policy_version.version_id }}</VersionId>
|
||||
<CreateDate>{{ policy_version.create_datetime }}</CreateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</PolicyVersionList>
|
||||
<Arn>{{ policy.arn }}</Arn>
|
||||
<AttachmentCount>1</AttachmentCount>
|
||||
@ -1671,3 +1880,38 @@ LIST_SIGNING_CERTIFICATES_TEMPLATE = """<ListSigningCertificatesResponse>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListSigningCertificatesResponse>"""
|
||||
|
||||
|
||||
TAG_ROLE_TEMPLATE = """<TagRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</TagRoleResponse>"""
|
||||
|
||||
|
||||
LIST_ROLE_TAG_TEMPLATE = """<ListRoleTagsResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ListRoleTagsResult>
|
||||
<IsTruncated>{{ 'true' if marker else 'false' }}</IsTruncated>
|
||||
{% if marker %}
|
||||
<Marker>{{ marker }}</Marker>
|
||||
{% endif %}
|
||||
<Tags>
|
||||
{% for tag in tags %}
|
||||
<member>
|
||||
<Key>{{ tag['Key'] }}</Key>
|
||||
<Value>{{ tag['Value'] }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
</ListRoleTagsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListRoleTagsResponse>"""
|
||||
|
||||
|
||||
UNTAG_ROLE_TEMPLATE = """<UntagRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UntagRoleResponse>"""
|
||||
|
@ -21,6 +21,7 @@ class Key(BaseModel):
|
||||
self.account_id = "0123456789012"
|
||||
self.key_rotation_status = False
|
||||
self.deletion_date = None
|
||||
self.tags = {}
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
@ -35,7 +36,7 @@ class Key(BaseModel):
|
||||
"KeyMetadata": {
|
||||
"AWSAccountId": self.account_id,
|
||||
"Arn": self.arn,
|
||||
"CreationDate": "2015-01-01 00:00:00",
|
||||
"CreationDate": datetime.strftime(datetime.utcnow(), "%s"),
|
||||
"Description": self.description,
|
||||
"Enabled": self.enabled,
|
||||
"KeyId": self.id,
|
||||
@ -63,7 +64,6 @@ class Key(BaseModel):
|
||||
)
|
||||
key.key_rotation_status = properties['EnableKeyRotation']
|
||||
key.enabled = properties['Enabled']
|
||||
|
||||
return key
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
@ -84,6 +84,18 @@ class KmsBackend(BaseBackend):
|
||||
self.keys[key.id] = key
|
||||
return key
|
||||
|
||||
def update_key_description(self, key_id, description):
|
||||
key = self.keys[self.get_key_id(key_id)]
|
||||
key.description = description
|
||||
|
||||
def tag_resource(self, key_id, tags):
|
||||
key = self.keys[self.get_key_id(key_id)]
|
||||
key.tags = tags
|
||||
|
||||
def list_resource_tags(self, key_id):
|
||||
key = self.keys[self.get_key_id(key_id)]
|
||||
return key.tags
|
||||
|
||||
def delete_key(self, key_id):
|
||||
if key_id in self.keys:
|
||||
if key_id in self.key_to_aliases:
|
||||
|
@ -38,6 +38,28 @@ class KmsResponse(BaseResponse):
|
||||
policy, key_usage, description, self.region)
|
||||
return json.dumps(key.to_dict())
|
||||
|
||||
def update_key_description(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
description = self.parameters.get('Description')
|
||||
|
||||
self.kms_backend.update_key_description(key_id, description)
|
||||
return json.dumps(None)
|
||||
|
||||
def tag_resource(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
tags = self.parameters.get('Tags')
|
||||
self.kms_backend.tag_resource(key_id, tags)
|
||||
return json.dumps({})
|
||||
|
||||
def list_resource_tags(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
tags = self.kms_backend.list_resource_tags(key_id)
|
||||
return json.dumps({
|
||||
"Tags": tags,
|
||||
"NextMarker": None,
|
||||
"Truncated": False,
|
||||
})
|
||||
|
||||
def describe_key(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
try:
|
||||
|
@ -242,7 +242,8 @@ class LogsBackend(BaseBackend):
|
||||
if next_token is None:
|
||||
next_token = 0
|
||||
|
||||
groups = sorted(group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix))
|
||||
groups = [group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix)]
|
||||
groups = sorted(groups, key=lambda x: x['creationTime'], reverse=True)
|
||||
groups_page = groups[next_token:next_token + limit]
|
||||
|
||||
next_token += limit
|
||||
|
@ -1113,4 +1113,4 @@ def httprettified(test):
|
||||
|
||||
if isinstance(test, ClassTypes):
|
||||
return decorate_class(test)
|
||||
return decorate_callable(test)
|
||||
return decorate_callable(test)
|
@ -29,7 +29,6 @@ import re
|
||||
from .compat import BaseClass
|
||||
from .utils import decode_utf8
|
||||
|
||||
|
||||
STATUSES = {
|
||||
100: "Continue",
|
||||
101: "Switching Protocols",
|
||||
|
@ -24,7 +24,7 @@ class HealthCheck(BaseModel):
|
||||
self.id = health_check_id
|
||||
self.ip_address = health_check_args.get("ip_address")
|
||||
self.port = health_check_args.get("port", 80)
|
||||
self._type = health_check_args.get("type")
|
||||
self.type_ = health_check_args.get("type")
|
||||
self.resource_path = health_check_args.get("resource_path")
|
||||
self.fqdn = health_check_args.get("fqdn")
|
||||
self.search_string = health_check_args.get("search_string")
|
||||
@ -58,7 +58,7 @@ class HealthCheck(BaseModel):
|
||||
<HealthCheckConfig>
|
||||
<IPAddress>{{ health_check.ip_address }}</IPAddress>
|
||||
<Port>{{ health_check.port }}</Port>
|
||||
<Type>{{ health_check._type }}</Type>
|
||||
<Type>{{ health_check.type_ }}</Type>
|
||||
<ResourcePath>{{ health_check.resource_path }}</ResourcePath>
|
||||
<FullyQualifiedDomainName>{{ health_check.fqdn }}</FullyQualifiedDomainName>
|
||||
<RequestInterval>{{ health_check.request_interval }}</RequestInterval>
|
||||
@ -76,7 +76,7 @@ class RecordSet(BaseModel):
|
||||
|
||||
def __init__(self, kwargs):
|
||||
self.name = kwargs.get('Name')
|
||||
self._type = kwargs.get('Type')
|
||||
self.type_ = kwargs.get('Type')
|
||||
self.ttl = kwargs.get('TTL')
|
||||
self.records = kwargs.get('ResourceRecords', [])
|
||||
self.set_identifier = kwargs.get('SetIdentifier')
|
||||
@ -130,7 +130,7 @@ class RecordSet(BaseModel):
|
||||
def to_xml(self):
|
||||
template = Template("""<ResourceRecordSet>
|
||||
<Name>{{ record_set.name }}</Name>
|
||||
<Type>{{ record_set._type }}</Type>
|
||||
<Type>{{ record_set.type_ }}</Type>
|
||||
{% if record_set.set_identifier %}
|
||||
<SetIdentifier>{{ record_set.set_identifier }}</SetIdentifier>
|
||||
{% endif %}
|
||||
@ -183,7 +183,7 @@ class FakeZone(BaseModel):
|
||||
def upsert_rrset(self, record_set):
|
||||
new_rrset = RecordSet(record_set)
|
||||
for i, rrset in enumerate(self.rrsets):
|
||||
if rrset.name == new_rrset.name:
|
||||
if rrset.name == new_rrset.name and rrset.type_ == new_rrset.type_:
|
||||
self.rrsets[i] = new_rrset
|
||||
break
|
||||
else:
|
||||
@ -202,7 +202,7 @@ class FakeZone(BaseModel):
|
||||
record_sets = list(self.rrsets) # Copy the list
|
||||
if start_type:
|
||||
record_sets = [
|
||||
record_set for record_set in record_sets if record_set._type >= start_type]
|
||||
record_set for record_set in record_sets if record_set.type_ >= start_type]
|
||||
if start_name:
|
||||
record_sets = [
|
||||
record_set for record_set in record_sets if record_set.name >= start_name]
|
||||
|
@ -10,6 +10,7 @@ import random
|
||||
import string
|
||||
import tempfile
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
import six
|
||||
|
||||
@ -35,7 +36,7 @@ class FakeDeleteMarker(BaseModel):
|
||||
self.key = key
|
||||
self.name = key.name
|
||||
self.last_modified = datetime.datetime.utcnow()
|
||||
self._version_id = key.version_id + 1
|
||||
self._version_id = str(uuid.uuid4())
|
||||
|
||||
@property
|
||||
def last_modified_ISO8601(self):
|
||||
@ -115,15 +116,16 @@ class FakeKey(BaseModel):
|
||||
self.last_modified = datetime.datetime.utcnow()
|
||||
self._etag = None # must recalculate etag
|
||||
if self._is_versioned:
|
||||
self._version_id += 1
|
||||
self._version_id = str(uuid.uuid4())
|
||||
else:
|
||||
self._is_versioned = 0
|
||||
self._version_id = None
|
||||
|
||||
def restore(self, days):
|
||||
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
|
||||
|
||||
def increment_version(self):
|
||||
self._version_id += 1
|
||||
def refresh_version(self):
|
||||
self._version_id = str(uuid.uuid4())
|
||||
self.last_modified = datetime.datetime.utcnow()
|
||||
|
||||
@property
|
||||
def etag(self):
|
||||
@ -716,17 +718,18 @@ class S3Backend(BaseBackend):
|
||||
|
||||
def get_bucket_latest_versions(self, bucket_name):
|
||||
versions = self.get_bucket_versions(bucket_name)
|
||||
maximum_version_per_key = {}
|
||||
latest_modified_per_key = {}
|
||||
latest_versions = {}
|
||||
|
||||
for version in versions:
|
||||
name = version.name
|
||||
last_modified = version.last_modified
|
||||
version_id = version.version_id
|
||||
maximum_version_per_key[name] = max(
|
||||
version_id,
|
||||
maximum_version_per_key.get(name, -1)
|
||||
latest_modified_per_key[name] = max(
|
||||
last_modified,
|
||||
latest_modified_per_key.get(name, datetime.datetime.min)
|
||||
)
|
||||
if version_id == maximum_version_per_key[name]:
|
||||
if last_modified == latest_modified_per_key[name]:
|
||||
latest_versions[name] = version_id
|
||||
|
||||
return latest_versions
|
||||
@ -774,20 +777,19 @@ class S3Backend(BaseBackend):
|
||||
|
||||
bucket = self.get_bucket(bucket_name)
|
||||
|
||||
old_key = bucket.keys.get(key_name, None)
|
||||
if old_key is not None and bucket.is_versioned:
|
||||
new_version_id = old_key._version_id + 1
|
||||
else:
|
||||
new_version_id = 0
|
||||
|
||||
new_key = FakeKey(
|
||||
name=key_name,
|
||||
value=value,
|
||||
storage=storage,
|
||||
etag=etag,
|
||||
is_versioned=bucket.is_versioned,
|
||||
version_id=new_version_id)
|
||||
bucket.keys[key_name] = new_key
|
||||
version_id=str(uuid.uuid4()) if bucket.is_versioned else None)
|
||||
|
||||
keys = [
|
||||
key for key in bucket.keys.getlist(key_name, [])
|
||||
if key.version_id != new_key.version_id
|
||||
] + [new_key]
|
||||
bucket.keys.setlist(key_name, keys)
|
||||
|
||||
return new_key
|
||||
|
||||
@ -977,7 +979,7 @@ class S3Backend(BaseBackend):
|
||||
|
||||
# By this point, the destination key must exist, or KeyError
|
||||
if dest_bucket.is_versioned:
|
||||
dest_bucket.keys[dest_key_name].increment_version()
|
||||
dest_bucket.keys[dest_key_name].refresh_version()
|
||||
if storage is not None:
|
||||
key.set_storage_class(storage)
|
||||
if acl is not None:
|
||||
|
@ -19,7 +19,7 @@ from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, Missi
|
||||
MalformedACLError, InvalidNotificationARN, InvalidNotificationEvent
|
||||
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \
|
||||
FakeTag
|
||||
from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url
|
||||
from .utils import bucket_name_from_url, clean_key_name, metadata_from_headers, parse_region_from_url
|
||||
from xml.dom import minidom
|
||||
|
||||
|
||||
@ -733,7 +733,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
||||
# Copy key
|
||||
# you can have a quoted ?version=abc with a version Id, so work on
|
||||
# we need to parse the unquoted string first
|
||||
src_key = request.headers.get("x-amz-copy-source")
|
||||
src_key = clean_key_name(request.headers.get("x-amz-copy-source"))
|
||||
if isinstance(src_key, six.binary_type):
|
||||
src_key = src_key.decode('utf-8')
|
||||
src_key_parsed = urlparse(src_key)
|
||||
@ -1303,7 +1303,7 @@ S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
{% for key in key_list %}
|
||||
<Version>
|
||||
<Key>{{ key.name }}</Key>
|
||||
<VersionId>{{ key.version_id }}</VersionId>
|
||||
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
|
||||
<IsLatest>{% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %}</IsLatest>
|
||||
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
|
||||
<ETag>{{ key.etag }}</ETag>
|
||||
|
6
setup.py
6
setup.py
@ -21,8 +21,8 @@ def read(*parts):
|
||||
install_requires = [
|
||||
"Jinja2>=2.7.3",
|
||||
"boto>=2.36.0",
|
||||
"boto3>=1.6.16",
|
||||
"botocore>=1.12.13",
|
||||
"boto3>=1.9.86",
|
||||
"botocore>=1.12.86",
|
||||
"cryptography>=2.3.0",
|
||||
"requests>=2.5",
|
||||
"xmltodict",
|
||||
@ -37,7 +37,7 @@ install_requires = [
|
||||
"jsondiff==1.1.2",
|
||||
"aws-xray-sdk!=0.96,>=0.93",
|
||||
"responses>=0.9.0",
|
||||
"idna<2.8,>=2.5",
|
||||
"idna<2.9,>=2.5",
|
||||
"cfn-lint",
|
||||
]
|
||||
|
||||
|
@ -12,6 +12,8 @@ import sure # noqa
|
||||
|
||||
from freezegun import freeze_time
|
||||
from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings
|
||||
from nose.tools import assert_raises
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
_lambda_region = 'us-west-2'
|
||||
|
||||
@ -397,6 +399,11 @@ def test_get_function():
|
||||
result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST')
|
||||
result['Configuration']['Version'].should.equal('$LATEST')
|
||||
|
||||
# Test get function when can't find function name
|
||||
with assert_raises(ClientError):
|
||||
conn.get_function(FunctionName='junk', Qualifier='$LATEST')
|
||||
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
@ -819,3 +826,87 @@ def get_function_policy():
|
||||
assert isinstance(response['Policy'], str)
|
||||
res = json.loads(response['Policy'])
|
||||
assert res['Statement'][0]['Action'] == 'lambda:InvokeFunction'
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
def test_list_versions_by_function():
|
||||
s3_conn = boto3.client('s3', 'us-west-2')
|
||||
s3_conn.create_bucket(Bucket='test-bucket')
|
||||
|
||||
zip_content = get_test_zip_file2()
|
||||
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
|
||||
conn = boto3.client('lambda', 'us-west-2')
|
||||
|
||||
conn.create_function(
|
||||
FunctionName='testFunction',
|
||||
Runtime='python2.7',
|
||||
Role='test-iam-role',
|
||||
Handler='lambda_function.lambda_handler',
|
||||
Code={
|
||||
'S3Bucket': 'test-bucket',
|
||||
'S3Key': 'test.zip',
|
||||
},
|
||||
Description='test lambda function',
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
Publish=True,
|
||||
)
|
||||
|
||||
conn.publish_version(FunctionName='testFunction')
|
||||
|
||||
versions = conn.list_versions_by_function(FunctionName='testFunction')
|
||||
|
||||
assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST'
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
def test_create_function_with_already_exists():
|
||||
s3_conn = boto3.client('s3', 'us-west-2')
|
||||
s3_conn.create_bucket(Bucket='test-bucket')
|
||||
|
||||
zip_content = get_test_zip_file2()
|
||||
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
|
||||
conn = boto3.client('lambda', 'us-west-2')
|
||||
|
||||
conn.create_function(
|
||||
FunctionName='testFunction',
|
||||
Runtime='python2.7',
|
||||
Role='test-iam-role',
|
||||
Handler='lambda_function.lambda_handler',
|
||||
Code={
|
||||
'S3Bucket': 'test-bucket',
|
||||
'S3Key': 'test.zip',
|
||||
},
|
||||
Description='test lambda function',
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
Publish=True,
|
||||
)
|
||||
|
||||
response = conn.create_function(
|
||||
FunctionName='testFunction',
|
||||
Runtime='python2.7',
|
||||
Role='test-iam-role',
|
||||
Handler='lambda_function.lambda_handler',
|
||||
Code={
|
||||
'S3Bucket': 'test-bucket',
|
||||
'S3Key': 'test.zip',
|
||||
},
|
||||
Description='test lambda function',
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
Publish=True,
|
||||
)
|
||||
|
||||
assert response['FunctionName'] == 'testFunction'
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
def test_list_versions_by_function_for_nonexistent_function():
|
||||
conn = boto3.client('lambda', 'us-west-2')
|
||||
versions = conn.list_versions_by_function(FunctionName='testFunction')
|
||||
|
||||
assert len(versions['Versions']) == 0
|
||||
|
@ -323,6 +323,54 @@ def test_create_job_queue():
|
||||
resp.should.contain('jobQueues')
|
||||
len(resp['jobQueues']).should.equal(0)
|
||||
|
||||
# Create job queue which already exists
|
||||
try:
|
||||
resp = batch_client.create_job_queue(
|
||||
jobQueueName='test_job_queue',
|
||||
state='ENABLED',
|
||||
priority=123,
|
||||
computeEnvironmentOrder=[
|
||||
{
|
||||
'order': 123,
|
||||
'computeEnvironment': arn
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
except ClientError as err:
|
||||
err.response['Error']['Code'].should.equal('ClientException')
|
||||
|
||||
|
||||
# Create job queue with incorrect state
|
||||
try:
|
||||
resp = batch_client.create_job_queue(
|
||||
jobQueueName='test_job_queue2',
|
||||
state='JUNK',
|
||||
priority=123,
|
||||
computeEnvironmentOrder=[
|
||||
{
|
||||
'order': 123,
|
||||
'computeEnvironment': arn
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
except ClientError as err:
|
||||
err.response['Error']['Code'].should.equal('ClientException')
|
||||
|
||||
# Create job queue with no compute env
|
||||
try:
|
||||
resp = batch_client.create_job_queue(
|
||||
jobQueueName='test_job_queue3',
|
||||
state='JUNK',
|
||||
priority=123,
|
||||
computeEnvironmentOrder=[
|
||||
|
||||
]
|
||||
)
|
||||
|
||||
except ClientError as err:
|
||||
err.response['Error']['Code'].should.equal('ClientException')
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
@ -397,6 +445,17 @@ def test_update_job_queue():
|
||||
len(resp['jobQueues']).should.equal(1)
|
||||
resp['jobQueues'][0]['priority'].should.equal(5)
|
||||
|
||||
batch_client.update_job_queue(
|
||||
jobQueue='test_job_queue',
|
||||
priority=5
|
||||
)
|
||||
|
||||
resp = batch_client.describe_job_queues()
|
||||
resp.should.contain('jobQueues')
|
||||
len(resp['jobQueues']).should.equal(1)
|
||||
resp['jobQueues'][0]['priority'].should.equal(5)
|
||||
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
|
@ -399,6 +399,32 @@ def test_create_change_set_from_s3_url():
|
||||
assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId']
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
def test_describe_change_set():
|
||||
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
|
||||
cf_conn.create_change_set(
|
||||
StackName='NewStack',
|
||||
TemplateBody=dummy_template_json,
|
||||
ChangeSetName='NewChangeSet',
|
||||
ChangeSetType='CREATE',
|
||||
)
|
||||
|
||||
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet")
|
||||
stack['ChangeSetName'].should.equal('NewChangeSet')
|
||||
stack['StackName'].should.equal('NewStack')
|
||||
|
||||
cf_conn.create_change_set(
|
||||
StackName='NewStack',
|
||||
TemplateBody=dummy_update_template_json,
|
||||
ChangeSetName='NewChangeSet2',
|
||||
ChangeSetType='UPDATE',
|
||||
)
|
||||
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet2")
|
||||
stack['ChangeSetName'].should.equal('NewChangeSet2')
|
||||
stack['StackName'].should.equal('NewStack')
|
||||
stack['Changes'].should.have.length_of(2)
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
def test_execute_change_set_w_arn():
|
||||
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
|
||||
@ -420,7 +446,7 @@ def test_execute_change_set_w_name():
|
||||
ChangeSetName='NewChangeSet',
|
||||
ChangeSetType='CREATE',
|
||||
)
|
||||
cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack')
|
||||
cf_conn.execute_change_set(ChangeSetName='NewChangeSet', StackName='NewStack')
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
@ -489,6 +515,20 @@ def test_describe_stack_by_stack_id():
|
||||
stack_by_id['StackName'].should.equal("test_stack")
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
def test_list_change_sets():
|
||||
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
|
||||
cf_conn.create_change_set(
|
||||
StackName='NewStack2',
|
||||
TemplateBody=dummy_template_json,
|
||||
ChangeSetName='NewChangeSet2',
|
||||
ChangeSetType='CREATE',
|
||||
)
|
||||
change_set = cf_conn.list_change_sets(StackName='NewStack2')['Summaries'][0]
|
||||
change_set['StackName'].should.equal('NewStack2')
|
||||
change_set['ChangeSetName'].should.equal('NewChangeSet2')
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
def test_list_stacks():
|
||||
cf = boto3.resource('cloudformation', region_name='us-east-1')
|
||||
@ -521,6 +561,22 @@ def test_delete_stack_from_resource():
|
||||
list(cf.stacks.all()).should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
@mock_ec2
|
||||
def test_delete_change_set():
|
||||
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
|
||||
cf_conn.create_change_set(
|
||||
StackName='NewStack',
|
||||
TemplateBody=dummy_template_json,
|
||||
ChangeSetName='NewChangeSet',
|
||||
ChangeSetType='CREATE',
|
||||
)
|
||||
|
||||
cf_conn.list_change_sets(StackName='NewStack')['Summaries'].should.have.length_of(1)
|
||||
cf_conn.delete_change_set(ChangeSetName='NewChangeSet', StackName='NewStack')
|
||||
cf_conn.list_change_sets(StackName='NewStack')['Summaries'].should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
@mock_ec2
|
||||
def test_delete_stack_by_name():
|
||||
|
491
tests/test_config/test_config.py
Normal file
491
tests/test_config/test_config.py
Normal file
@ -0,0 +1,491 @@
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
from nose.tools import assert_raises
|
||||
|
||||
from moto.config import mock_config
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_put_configuration_recorder():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Try without a name supplied:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={'roleARN': 'somearn'})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidConfigurationRecorderNameException'
|
||||
assert 'is not valid, blank string.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Try with a really long name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={'name': 'a' * 257, 'roleARN': 'somearn'})
|
||||
assert ce.exception.response['Error']['Code'] == 'ValidationException'
|
||||
assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With resource types and flags set to True:
|
||||
bad_groups = [
|
||||
{'allSupported': True, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']},
|
||||
{'allSupported': False, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']},
|
||||
{'allSupported': True, 'includeGlobalResourceTypes': False, 'resourceTypes': ['item']},
|
||||
{'allSupported': False, 'includeGlobalResourceTypes': False, 'resourceTypes': []},
|
||||
{'includeGlobalResourceTypes': False, 'resourceTypes': []},
|
||||
{'includeGlobalResourceTypes': True},
|
||||
{'resourceTypes': []},
|
||||
{}
|
||||
]
|
||||
|
||||
for bg in bad_groups:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'default',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': bg
|
||||
})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidRecordingGroupException'
|
||||
assert ce.exception.response['Error']['Message'] == 'The recording group provided is not valid'
|
||||
|
||||
# With an invalid Resource Type:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'default',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
# 2 good, and 2 bad:
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'LOLNO', 'AWS::EC2::VPC', 'LOLSTILLNO']
|
||||
}
|
||||
})
|
||||
assert ce.exception.response['Error']['Code'] == 'ValidationException'
|
||||
assert "2 validation error detected: Value '['LOLNO', 'LOLSTILLNO']" in str(ce.exception.response['Error']['Message'])
|
||||
assert 'AWS::EC2::Instance' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Create a proper one:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
result = client.describe_configuration_recorders()['ConfigurationRecorders']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert result[0]['roleARN'] == 'somearn'
|
||||
assert not result[0]['recordingGroup']['allSupported']
|
||||
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
|
||||
assert len(result[0]['recordingGroup']['resourceTypes']) == 2
|
||||
assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \
|
||||
and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes']
|
||||
|
||||
# Now update the configuration recorder:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': True,
|
||||
'includeGlobalResourceTypes': True
|
||||
}
|
||||
})
|
||||
result = client.describe_configuration_recorders()['ConfigurationRecorders']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert result[0]['roleARN'] == 'somearn'
|
||||
assert result[0]['recordingGroup']['allSupported']
|
||||
assert result[0]['recordingGroup']['includeGlobalResourceTypes']
|
||||
assert len(result[0]['recordingGroup']['resourceTypes']) == 0
|
||||
|
||||
# With a default recording group (i.e. lacking one)
|
||||
client.put_configuration_recorder(ConfigurationRecorder={'name': 'testrecorder', 'roleARN': 'somearn'})
|
||||
result = client.describe_configuration_recorders()['ConfigurationRecorders']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert result[0]['roleARN'] == 'somearn'
|
||||
assert result[0]['recordingGroup']['allSupported']
|
||||
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
|
||||
assert not result[0]['recordingGroup'].get('resourceTypes')
|
||||
|
||||
# Can currently only have exactly 1 Config Recorder in an account/region:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'someotherrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
}
|
||||
})
|
||||
assert ce.exception.response['Error']['Code'] == 'MaxNumberOfConfigurationRecordersExceededException'
|
||||
assert "maximum number of configuration recorders: 1 is reached." in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_describe_configurations():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Without any configurations:
|
||||
result = client.describe_configuration_recorders()
|
||||
assert not result['ConfigurationRecorders']
|
||||
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
result = client.describe_configuration_recorders()['ConfigurationRecorders']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert result[0]['roleARN'] == 'somearn'
|
||||
assert not result[0]['recordingGroup']['allSupported']
|
||||
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
|
||||
assert len(result[0]['recordingGroup']['resourceTypes']) == 2
|
||||
assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \
|
||||
and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes']
|
||||
|
||||
# Specify an incorrect name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_configuration_recorders(ConfigurationRecorderNames=['wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
# And with both a good and wrong name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_configuration_recorders(ConfigurationRecorderNames=['testrecorder', 'wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_delivery_channels():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Try without a config recorder:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={})
|
||||
assert ce.exception.response['Error']['Code'] == 'NoAvailableConfigurationRecorderException'
|
||||
assert ce.exception.response['Error']['Message'] == 'Configuration recorder is not available to ' \
|
||||
'put delivery channel.'
|
||||
|
||||
# Create a config recorder to continue testing:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Try without a name supplied:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryChannelNameException'
|
||||
assert 'is not valid, blank string.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Try with a really long name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'a' * 257})
|
||||
assert ce.exception.response['Error']['Code'] == 'ValidationException'
|
||||
assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Without specifying a bucket name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel'})
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException'
|
||||
assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.'
|
||||
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': ''})
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException'
|
||||
assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.'
|
||||
|
||||
# With an empty string for the S3 key prefix:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel', 's3BucketName': 'somebucket', 's3KeyPrefix': ''})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidS3KeyPrefixException'
|
||||
assert 'empty s3 key prefix.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With an empty string for the SNS ARN:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel', 's3BucketName': 'somebucket', 'snsTopicARN': ''})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidSNSTopicARNException'
|
||||
assert 'The sns topic arn' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With an invalid delivery frequency:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel',
|
||||
's3BucketName': 'somebucket',
|
||||
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'WRONG'}
|
||||
})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryFrequency'
|
||||
assert 'WRONG' in ce.exception.response['Error']['Message']
|
||||
assert 'TwentyFour_Hours' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Create a proper one:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
result = client.describe_delivery_channels()['DeliveryChannels']
|
||||
assert len(result) == 1
|
||||
assert len(result[0].keys()) == 2
|
||||
assert result[0]['name'] == 'testchannel'
|
||||
assert result[0]['s3BucketName'] == 'somebucket'
|
||||
|
||||
# Overwrite it with another proper configuration:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel',
|
||||
's3BucketName': 'somebucket',
|
||||
'snsTopicARN': 'sometopicarn',
|
||||
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'}
|
||||
})
|
||||
result = client.describe_delivery_channels()['DeliveryChannels']
|
||||
assert len(result) == 1
|
||||
assert len(result[0].keys()) == 4
|
||||
assert result[0]['name'] == 'testchannel'
|
||||
assert result[0]['s3BucketName'] == 'somebucket'
|
||||
assert result[0]['snsTopicARN'] == 'sometopicarn'
|
||||
assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours'
|
||||
|
||||
# Can only have 1:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel2', 's3BucketName': 'somebucket'})
|
||||
assert ce.exception.response['Error']['Code'] == 'MaxNumberOfDeliveryChannelsExceededException'
|
||||
assert 'because the maximum number of delivery channels: 1 is reached.' in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_describe_delivery_channels():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Without any channels:
|
||||
result = client.describe_delivery_channels()
|
||||
assert not result['DeliveryChannels']
|
||||
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
result = client.describe_delivery_channels()['DeliveryChannels']
|
||||
assert len(result) == 1
|
||||
assert len(result[0].keys()) == 2
|
||||
assert result[0]['name'] == 'testchannel'
|
||||
assert result[0]['s3BucketName'] == 'somebucket'
|
||||
|
||||
# Overwrite it with another proper configuration:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel',
|
||||
's3BucketName': 'somebucket',
|
||||
'snsTopicARN': 'sometopicarn',
|
||||
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'}
|
||||
})
|
||||
result = client.describe_delivery_channels()['DeliveryChannels']
|
||||
assert len(result) == 1
|
||||
assert len(result[0].keys()) == 4
|
||||
assert result[0]['name'] == 'testchannel'
|
||||
assert result[0]['s3BucketName'] == 'somebucket'
|
||||
assert result[0]['snsTopicARN'] == 'sometopicarn'
|
||||
assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours'
|
||||
|
||||
# Specify an incorrect name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_delivery_channels(DeliveryChannelNames=['wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
# And with both a good and wrong name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_delivery_channels(DeliveryChannelNames=['testchannel', 'wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_start_configuration_recorder():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Without a config recorder:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
|
||||
# Make the config recorder;
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Without a delivery channel:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoAvailableDeliveryChannelException'
|
||||
|
||||
# Make the delivery channel:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
|
||||
# Start it:
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# Verify it's enabled:
|
||||
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
|
||||
lower_bound = (datetime.utcnow() - timedelta(minutes=5))
|
||||
assert result[0]['recording']
|
||||
assert result[0]['lastStatus'] == 'PENDING'
|
||||
assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_stop_configuration_recorder():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Without a config recorder:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
|
||||
# Make the config recorder;
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Make the delivery channel for creation:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
|
||||
# Start it:
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# Verify it's disabled:
|
||||
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
|
||||
lower_bound = (datetime.utcnow() - timedelta(minutes=5))
|
||||
assert not result[0]['recording']
|
||||
assert result[0]['lastStatus'] == 'PENDING'
|
||||
assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
assert lower_bound < result[0]['lastStopTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_describe_configuration_recorder_status():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Without any:
|
||||
result = client.describe_configuration_recorder_status()
|
||||
assert not result['ConfigurationRecordersStatus']
|
||||
|
||||
# Make the config recorder;
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Without specifying a config recorder:
|
||||
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert not result[0]['recording']
|
||||
|
||||
# With a proper name:
|
||||
result = client.describe_configuration_recorder_status(
|
||||
ConfigurationRecorderNames=['testrecorder'])['ConfigurationRecordersStatus']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert not result[0]['recording']
|
||||
|
||||
# Invalid name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_configuration_recorder_status(ConfigurationRecorderNames=['testrecorder', 'wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_delete_configuration_recorder():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Make the config recorder;
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Delete it:
|
||||
client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# Try again -- it should be deleted:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_delete_delivery_channel():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Need a recorder to test the constraint on recording being enabled:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# With the recorder enabled:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.delete_delivery_channel(DeliveryChannelName='testchannel')
|
||||
assert ce.exception.response['Error']['Code'] == 'LastDeliveryChannelDeleteFailedException'
|
||||
assert 'because there is a running configuration recorder.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Stop recording:
|
||||
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# Try again:
|
||||
client.delete_delivery_channel(DeliveryChannelName='testchannel')
|
||||
|
||||
# Verify:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.delete_delivery_channel(DeliveryChannelName='testchannel')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'
|
@ -1505,3 +1505,110 @@ def test_dynamodb_streams_2():
|
||||
assert 'LatestStreamLabel' in resp['TableDescription']
|
||||
assert 'LatestStreamArn' in resp['TableDescription']
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_condition_expressions():
|
||||
client = boto3.client('dynamodb', region_name='us-east-1')
|
||||
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
client.create_table(
|
||||
TableName='test1',
|
||||
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
|
||||
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
|
||||
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
|
||||
)
|
||||
client.put_item(
|
||||
TableName='test1',
|
||||
Item={
|
||||
'client': {'S': 'client1'},
|
||||
'app': {'S': 'app1'},
|
||||
'match': {'S': 'match'},
|
||||
'existing': {'S': 'existing'},
|
||||
}
|
||||
)
|
||||
|
||||
client.put_item(
|
||||
TableName='test1',
|
||||
Item={
|
||||
'client': {'S': 'client1'},
|
||||
'app': {'S': 'app1'},
|
||||
'match': {'S': 'match'},
|
||||
'existing': {'S': 'existing'},
|
||||
},
|
||||
ConditionExpression='attribute_exists(#existing) AND attribute_not_exists(#nonexistent) AND #match = :match',
|
||||
ExpressionAttributeNames={
|
||||
'#existing': 'existing',
|
||||
'#nonexistent': 'nope',
|
||||
'#match': 'match',
|
||||
},
|
||||
ExpressionAttributeValues={
|
||||
':match': {'S': 'match'}
|
||||
}
|
||||
)
|
||||
|
||||
client.put_item(
|
||||
TableName='test1',
|
||||
Item={
|
||||
'client': {'S': 'client1'},
|
||||
'app': {'S': 'app1'},
|
||||
'match': {'S': 'match'},
|
||||
'existing': {'S': 'existing'},
|
||||
},
|
||||
ConditionExpression='NOT(attribute_exists(#nonexistent1) AND attribute_exists(#nonexistent2))',
|
||||
ExpressionAttributeNames={
|
||||
'#nonexistent1': 'nope',
|
||||
'#nonexistent2': 'nope2'
|
||||
}
|
||||
)
|
||||
|
||||
with assert_raises(client.exceptions.ConditionalCheckFailedException):
|
||||
client.put_item(
|
||||
TableName='test1',
|
||||
Item={
|
||||
'client': {'S': 'client1'},
|
||||
'app': {'S': 'app1'},
|
||||
'match': {'S': 'match'},
|
||||
'existing': {'S': 'existing'},
|
||||
},
|
||||
ConditionExpression='attribute_exists(#nonexistent1) AND attribute_exists(#nonexistent2)',
|
||||
ExpressionAttributeNames={
|
||||
'#nonexistent1': 'nope',
|
||||
'#nonexistent2': 'nope2'
|
||||
}
|
||||
)
|
||||
|
||||
with assert_raises(client.exceptions.ConditionalCheckFailedException):
|
||||
client.put_item(
|
||||
TableName='test1',
|
||||
Item={
|
||||
'client': {'S': 'client1'},
|
||||
'app': {'S': 'app1'},
|
||||
'match': {'S': 'match'},
|
||||
'existing': {'S': 'existing'},
|
||||
},
|
||||
ConditionExpression='NOT(attribute_not_exists(#nonexistent1) AND attribute_not_exists(#nonexistent2))',
|
||||
ExpressionAttributeNames={
|
||||
'#nonexistent1': 'nope',
|
||||
'#nonexistent2': 'nope2'
|
||||
}
|
||||
)
|
||||
|
||||
with assert_raises(client.exceptions.ConditionalCheckFailedException):
|
||||
client.put_item(
|
||||
TableName='test1',
|
||||
Item={
|
||||
'client': {'S': 'client1'},
|
||||
'app': {'S': 'app1'},
|
||||
'match': {'S': 'match'},
|
||||
'existing': {'S': 'existing'},
|
||||
},
|
||||
ConditionExpression='attribute_exists(#existing) AND attribute_not_exists(#nonexistent) AND #match = :match',
|
||||
ExpressionAttributeNames={
|
||||
'#existing': 'existing',
|
||||
'#nonexistent': 'nope',
|
||||
'#match': 'match',
|
||||
},
|
||||
ExpressionAttributeValues={
|
||||
':match': {'S': 'match2'}
|
||||
}
|
||||
)
|
||||
|
@ -750,6 +750,47 @@ def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_n
|
||||
returned_item = table.get_item(Key={'username': 'johndoe'})
|
||||
assert dict(returned_item)['Item']['foo'].should.equal("baz")
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_boto3_update_settype_item_with_conditions():
|
||||
class OrderedSet(set):
|
||||
"""A set with predictable iteration order"""
|
||||
def __init__(self, values):
|
||||
super(OrderedSet, self).__init__(values)
|
||||
self.__ordered_values = values
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__ordered_values)
|
||||
|
||||
table = _create_user_table()
|
||||
table.put_item(Item={'username': 'johndoe'})
|
||||
table.update_item(
|
||||
Key={'username': 'johndoe'},
|
||||
UpdateExpression='SET foo=:new_value',
|
||||
ExpressionAttributeValues={
|
||||
':new_value': OrderedSet(['hello', 'world']),
|
||||
},
|
||||
)
|
||||
|
||||
table.update_item(
|
||||
Key={'username': 'johndoe'},
|
||||
UpdateExpression='SET foo=:new_value',
|
||||
ExpressionAttributeValues={
|
||||
':new_value': set(['baz']),
|
||||
},
|
||||
Expected={
|
||||
'foo': {
|
||||
'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': [
|
||||
OrderedSet(['world', 'hello']), # Opposite order to original
|
||||
],
|
||||
}
|
||||
},
|
||||
)
|
||||
returned_item = table.get_item(Key={'username': 'johndoe'})
|
||||
assert dict(returned_item)['Item']['foo'].should.equal(set(['baz']))
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_boto3_put_item_conditions_pass():
|
||||
table = _create_user_table()
|
||||
|
@ -589,6 +589,18 @@ def test_volume_tag_escaping():
|
||||
dict(snaps[0].tags).should.equal({'key': '</closed>'})
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_volume_property_hidden_when_no_tags_exist():
|
||||
ec2_client = boto3.client('ec2', region_name='us-east-1')
|
||||
|
||||
volume_response = ec2_client.create_volume(
|
||||
Size=10,
|
||||
AvailabilityZone='us-east-1a'
|
||||
)
|
||||
|
||||
volume_response.get('Tags').should.equal(None)
|
||||
|
||||
|
||||
@freeze_time
|
||||
@mock_ec2
|
||||
def test_copy_snapshot():
|
||||
@ -602,26 +614,26 @@ def test_copy_snapshot():
|
||||
create_snapshot_response = ec2_client.create_snapshot(
|
||||
VolumeId=volume_response['VolumeId']
|
||||
)
|
||||
|
||||
|
||||
copy_snapshot_response = dest_ec2_client.copy_snapshot(
|
||||
SourceSnapshotId=create_snapshot_response['SnapshotId'],
|
||||
SourceRegion="eu-west-1"
|
||||
)
|
||||
|
||||
|
||||
ec2 = boto3.resource('ec2', region_name='eu-west-1')
|
||||
dest_ec2 = boto3.resource('ec2', region_name='eu-west-2')
|
||||
|
||||
|
||||
source = ec2.Snapshot(create_snapshot_response['SnapshotId'])
|
||||
dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId'])
|
||||
|
||||
|
||||
attribs = ['data_encryption_key_id', 'encrypted',
|
||||
'kms_key_id', 'owner_alias', 'owner_id',
|
||||
'progress', 'state', 'state_message',
|
||||
'tags', 'volume_id', 'volume_size']
|
||||
|
||||
|
||||
for attrib in attribs:
|
||||
getattr(source, attrib).should.equal(getattr(dest, attrib))
|
||||
|
||||
|
||||
# Copy from non-existent source ID.
|
||||
with assert_raises(ClientError) as cm:
|
||||
create_snapshot_error = ec2_client.create_snapshot(
|
||||
|
@ -1254,3 +1254,18 @@ def test_create_instance_ebs_optimized():
|
||||
)
|
||||
instance.load()
|
||||
instance.ebs_optimized.should.be(False)
|
||||
|
||||
@mock_ec2
|
||||
def test_run_multiple_instances_in_same_command():
|
||||
instance_count = 4
|
||||
client = boto3.client('ec2', region_name='us-east-1')
|
||||
client.run_instances(ImageId='ami-1234abcd',
|
||||
MinCount=instance_count,
|
||||
MaxCount=instance_count)
|
||||
reservations = client.describe_instances()['Reservations']
|
||||
|
||||
reservations[0]['Instances'].should.have.length_of(instance_count)
|
||||
|
||||
instances = reservations[0]['Instances']
|
||||
for i in range(0, instance_count):
|
||||
instances[i]['AmiLaunchIndex'].should.be(i)
|
||||
|
@ -1,8 +1,9 @@
|
||||
from __future__ import unicode_literals
|
||||
import boto
|
||||
import boto3
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2_deprecated
|
||||
from moto import mock_ec2_deprecated, mock_ec2
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
@ -173,3 +174,43 @@ def test_network_acl_tagging():
|
||||
if na.id == network_acl.id)
|
||||
test_network_acl.tags.should.have.length_of(1)
|
||||
test_network_acl.tags["a key"].should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_new_subnet_in_new_vpc_associates_with_default_network_acl():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-1')
|
||||
new_vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
new_vpc.reload()
|
||||
|
||||
subnet = ec2.create_subnet(VpcId=new_vpc.id, CidrBlock='10.0.0.0/24')
|
||||
subnet.reload()
|
||||
|
||||
new_vpcs_default_network_acl = next(iter(new_vpc.network_acls.all()), None)
|
||||
new_vpcs_default_network_acl.reload()
|
||||
new_vpcs_default_network_acl.vpc_id.should.equal(new_vpc.id)
|
||||
new_vpcs_default_network_acl.associations.should.have.length_of(1)
|
||||
new_vpcs_default_network_acl.associations[0]['SubnetId'].should.equal(subnet.id)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_default_network_acl_default_entries():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-1')
|
||||
default_network_acl = next(iter(ec2.network_acls.all()), None)
|
||||
default_network_acl.is_default.should.be.ok
|
||||
|
||||
default_network_acl.entries.should.have.length_of(4)
|
||||
unique_entries = []
|
||||
for entry in default_network_acl.entries:
|
||||
entry['CidrBlock'].should.equal('0.0.0.0/0')
|
||||
entry['Protocol'].should.equal('-1')
|
||||
entry['RuleNumber'].should.be.within([100, 32767])
|
||||
entry['RuleAction'].should.be.within(['allow', 'deny'])
|
||||
assert type(entry['Egress']) is bool
|
||||
if entry['RuleAction'] == 'allow':
|
||||
entry['RuleNumber'].should.be.equal(100)
|
||||
else:
|
||||
entry['RuleNumber'].should.be.equal(32767)
|
||||
if entry not in unique_entries:
|
||||
unique_entries.append(entry)
|
||||
|
||||
unique_entries.should.have.length_of(4)
|
||||
|
@ -54,7 +54,7 @@ def spot_config(subnet_id, allocation_strategy="lowestPrice"):
|
||||
},
|
||||
'EbsOptimized': False,
|
||||
'WeightedCapacity': 2.0,
|
||||
'SpotPrice': '0.13'
|
||||
'SpotPrice': '0.13',
|
||||
}, {
|
||||
'ImageId': 'ami-123',
|
||||
'KeyName': 'my-key',
|
||||
@ -148,6 +148,48 @@ def test_create_diversified_spot_fleet():
|
||||
instances[0]['InstanceId'].should.contain("i-")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_spot_fleet_request_with_tag_spec():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
tag_spec = [
|
||||
{
|
||||
'ResourceType': 'instance',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'tag-1',
|
||||
'Value': 'foo',
|
||||
},
|
||||
{
|
||||
'Key': 'tag-2',
|
||||
'Value': 'bar',
|
||||
},
|
||||
]
|
||||
},
|
||||
]
|
||||
config = spot_config(subnet_id)
|
||||
config['LaunchSpecifications'][0]['TagSpecifications'] = tag_spec
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=config
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||
spot_fleet_config = spot_fleet_requests[0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['LaunchSpecifications'][0]['TagSpecifications'][0][
|
||||
'ResourceType'].should.equal('instance')
|
||||
for tag in tag_spec[0]['Tags']:
|
||||
spot_fleet_config['LaunchSpecifications'][0]['TagSpecifications'][0]['Tags'].should.contain(tag)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = conn.describe_instances(InstanceIds=[i['InstanceId'] for i in instance_res['ActiveInstances']])
|
||||
for instance in instances['Reservations'][0]['Instances']:
|
||||
for tag in tag_spec[0]['Tags']:
|
||||
instance['Tags'].should.contain(tag)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_cancel_spot_fleet_request():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
|
@ -1,5 +1,4 @@
|
||||
import random
|
||||
|
||||
import boto3
|
||||
import json
|
||||
|
||||
@ -7,7 +6,6 @@ from moto.events import mock_events
|
||||
from botocore.exceptions import ClientError
|
||||
from nose.tools import assert_raises
|
||||
|
||||
|
||||
RULES = [
|
||||
{'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'},
|
||||
{'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'},
|
||||
@ -109,6 +107,13 @@ def test_enable_disable_rule():
|
||||
rule = client.describe_rule(Name=rule_name)
|
||||
assert(rule['State'] == 'ENABLED')
|
||||
|
||||
# Test invalid name
|
||||
try:
|
||||
client.enable_rule(Name='junk')
|
||||
|
||||
except ClientError as ce:
|
||||
assert ce.response['Error']['Code'] == 'ResourceNotFoundException'
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_rule_names_by_target():
|
||||
|
@ -306,6 +306,7 @@ def test_create_policy_versions():
|
||||
PolicyDocument='{"some":"policy"}')
|
||||
version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'})
|
||||
|
||||
|
||||
@mock_iam
|
||||
def test_get_policy():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
@ -579,6 +580,7 @@ def test_get_credential_report():
|
||||
'get_credential_report_result']['content'].encode('ascii')).decode('ascii')
|
||||
report.should.match(r'.*my-user.*')
|
||||
|
||||
|
||||
@mock_iam
|
||||
def test_boto3_get_credential_report():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
@ -757,6 +759,17 @@ def test_get_access_key_last_used():
|
||||
@mock_iam
|
||||
def test_get_account_authorization_details():
|
||||
import json
|
||||
test_policy = json.dumps({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "s3:ListBucket",
|
||||
"Resource": "*",
|
||||
"Effect": "Allow",
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/")
|
||||
conn.create_user(Path='/', UserName='testUser')
|
||||
@ -764,21 +777,34 @@ def test_get_account_authorization_details():
|
||||
conn.create_policy(
|
||||
PolicyName='testPolicy',
|
||||
Path='/',
|
||||
PolicyDocument=json.dumps({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "s3:ListBucket",
|
||||
"Resource": "*",
|
||||
"Effect": "Allow",
|
||||
}
|
||||
]
|
||||
}),
|
||||
PolicyDocument=test_policy,
|
||||
Description='Test Policy'
|
||||
)
|
||||
|
||||
# Attach things to the user and group:
|
||||
conn.put_user_policy(UserName='testUser', PolicyName='testPolicy', PolicyDocument=test_policy)
|
||||
conn.put_group_policy(GroupName='testGroup', PolicyName='testPolicy', PolicyDocument=test_policy)
|
||||
|
||||
conn.attach_user_policy(UserName='testUser', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy')
|
||||
conn.attach_group_policy(GroupName='testGroup', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy')
|
||||
|
||||
conn.add_user_to_group(UserName='testUser', GroupName='testGroup')
|
||||
|
||||
# Add things to the role:
|
||||
conn.create_instance_profile(InstanceProfileName='ipn')
|
||||
conn.add_role_to_instance_profile(InstanceProfileName='ipn', RoleName='my-role')
|
||||
conn.tag_role(RoleName='my-role', Tags=[
|
||||
{
|
||||
'Key': 'somekey',
|
||||
'Value': 'somevalue'
|
||||
},
|
||||
{
|
||||
'Key': 'someotherkey',
|
||||
'Value': 'someothervalue'
|
||||
}
|
||||
])
|
||||
conn.put_role_policy(RoleName='my-role', PolicyName='test-policy', PolicyDocument=test_policy)
|
||||
conn.attach_role_policy(RoleName='my-role', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy')
|
||||
|
||||
result = conn.get_account_authorization_details(Filter=['Role'])
|
||||
assert len(result['RoleDetailList']) == 1
|
||||
@ -786,24 +812,41 @@ def test_get_account_authorization_details():
|
||||
assert len(result['GroupDetailList']) == 0
|
||||
assert len(result['Policies']) == 0
|
||||
assert len(result['RoleDetailList'][0]['InstanceProfileList']) == 1
|
||||
assert len(result['RoleDetailList'][0]['Tags']) == 2
|
||||
assert len(result['RoleDetailList'][0]['RolePolicyList']) == 1
|
||||
assert len(result['RoleDetailList'][0]['AttachedManagedPolicies']) == 1
|
||||
assert result['RoleDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy'
|
||||
assert result['RoleDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \
|
||||
'arn:aws:iam::123456789012:policy/testPolicy'
|
||||
|
||||
result = conn.get_account_authorization_details(Filter=['User'])
|
||||
assert len(result['RoleDetailList']) == 0
|
||||
assert len(result['UserDetailList']) == 1
|
||||
assert len(result['UserDetailList'][0]['GroupList']) == 1
|
||||
assert len(result['UserDetailList'][0]['AttachedManagedPolicies']) == 1
|
||||
assert len(result['GroupDetailList']) == 0
|
||||
assert len(result['Policies']) == 0
|
||||
assert result['UserDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy'
|
||||
assert result['UserDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \
|
||||
'arn:aws:iam::123456789012:policy/testPolicy'
|
||||
|
||||
result = conn.get_account_authorization_details(Filter=['Group'])
|
||||
assert len(result['RoleDetailList']) == 0
|
||||
assert len(result['UserDetailList']) == 0
|
||||
assert len(result['GroupDetailList']) == 1
|
||||
assert len(result['GroupDetailList'][0]['GroupPolicyList']) == 1
|
||||
assert len(result['GroupDetailList'][0]['AttachedManagedPolicies']) == 1
|
||||
assert len(result['Policies']) == 0
|
||||
assert result['GroupDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy'
|
||||
assert result['GroupDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \
|
||||
'arn:aws:iam::123456789012:policy/testPolicy'
|
||||
|
||||
result = conn.get_account_authorization_details(Filter=['LocalManagedPolicy'])
|
||||
assert len(result['RoleDetailList']) == 0
|
||||
assert len(result['UserDetailList']) == 0
|
||||
assert len(result['GroupDetailList']) == 0
|
||||
assert len(result['Policies']) == 1
|
||||
assert len(result['Policies'][0]['PolicyVersionList']) == 1
|
||||
|
||||
# Check for greater than 1 since this should always be greater than one but might change.
|
||||
# See iam/aws_managed_policies.py
|
||||
@ -872,6 +915,7 @@ def test_signing_certs():
|
||||
with assert_raises(ClientError):
|
||||
client.delete_signing_certificate(UserName='notauser', CertificateId=cert_id)
|
||||
|
||||
|
||||
@mock_iam()
|
||||
def test_create_saml_provider():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
@ -881,6 +925,7 @@ def test_create_saml_provider():
|
||||
)
|
||||
response['SAMLProviderArn'].should.equal("arn:aws:iam::123456789012:saml-provider/TestSAMLProvider")
|
||||
|
||||
|
||||
@mock_iam()
|
||||
def test_get_saml_provider():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
@ -893,6 +938,7 @@ def test_get_saml_provider():
|
||||
)
|
||||
response['SAMLMetadataDocument'].should.equal('a' * 1024)
|
||||
|
||||
|
||||
@mock_iam()
|
||||
def test_list_saml_providers():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
@ -903,6 +949,7 @@ def test_list_saml_providers():
|
||||
response = conn.list_saml_providers()
|
||||
response['SAMLProviderList'][0]['Arn'].should.equal("arn:aws:iam::123456789012:saml-provider/TestSAMLProvider")
|
||||
|
||||
|
||||
@mock_iam()
|
||||
def test_delete_saml_provider():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
@ -929,3 +976,293 @@ def test_delete_saml_provider():
|
||||
# Verify that it's not in the list:
|
||||
resp = conn.list_signing_certificates(UserName='testing')
|
||||
assert not resp['Certificates']
|
||||
|
||||
|
||||
@mock_iam()
|
||||
def test_tag_role():
|
||||
"""Tests both the tag_role and get_role_tags capability"""
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="{}")
|
||||
|
||||
# Get without tags:
|
||||
role = conn.get_role(RoleName='my-role')['Role']
|
||||
assert not role.get('Tags')
|
||||
|
||||
# With proper tag values:
|
||||
conn.tag_role(RoleName='my-role', Tags=[
|
||||
{
|
||||
'Key': 'somekey',
|
||||
'Value': 'somevalue'
|
||||
},
|
||||
{
|
||||
'Key': 'someotherkey',
|
||||
'Value': 'someothervalue'
|
||||
}
|
||||
])
|
||||
|
||||
# Get role:
|
||||
role = conn.get_role(RoleName='my-role')['Role']
|
||||
assert len(role['Tags']) == 2
|
||||
assert role['Tags'][0]['Key'] == 'somekey'
|
||||
assert role['Tags'][0]['Value'] == 'somevalue'
|
||||
assert role['Tags'][1]['Key'] == 'someotherkey'
|
||||
assert role['Tags'][1]['Value'] == 'someothervalue'
|
||||
|
||||
# Same -- but for list_role_tags:
|
||||
tags = conn.list_role_tags(RoleName='my-role')
|
||||
assert len(tags['Tags']) == 2
|
||||
assert role['Tags'][0]['Key'] == 'somekey'
|
||||
assert role['Tags'][0]['Value'] == 'somevalue'
|
||||
assert role['Tags'][1]['Key'] == 'someotherkey'
|
||||
assert role['Tags'][1]['Value'] == 'someothervalue'
|
||||
assert not tags['IsTruncated']
|
||||
assert not tags.get('Marker')
|
||||
|
||||
# Test pagination:
|
||||
tags = conn.list_role_tags(RoleName='my-role', MaxItems=1)
|
||||
assert len(tags['Tags']) == 1
|
||||
assert tags['IsTruncated']
|
||||
assert tags['Tags'][0]['Key'] == 'somekey'
|
||||
assert tags['Tags'][0]['Value'] == 'somevalue'
|
||||
assert tags['Marker'] == '1'
|
||||
|
||||
tags = conn.list_role_tags(RoleName='my-role', Marker=tags['Marker'])
|
||||
assert len(tags['Tags']) == 1
|
||||
assert tags['Tags'][0]['Key'] == 'someotherkey'
|
||||
assert tags['Tags'][0]['Value'] == 'someothervalue'
|
||||
assert not tags['IsTruncated']
|
||||
assert not tags.get('Marker')
|
||||
|
||||
# Test updating an existing tag:
|
||||
conn.tag_role(RoleName='my-role', Tags=[
|
||||
{
|
||||
'Key': 'somekey',
|
||||
'Value': 'somenewvalue'
|
||||
}
|
||||
])
|
||||
tags = conn.list_role_tags(RoleName='my-role')
|
||||
assert len(tags['Tags']) == 2
|
||||
assert tags['Tags'][0]['Key'] == 'somekey'
|
||||
assert tags['Tags'][0]['Value'] == 'somenewvalue'
|
||||
|
||||
# Empty is good:
|
||||
conn.tag_role(RoleName='my-role', Tags=[
|
||||
{
|
||||
'Key': 'somekey',
|
||||
'Value': ''
|
||||
}
|
||||
])
|
||||
tags = conn.list_role_tags(RoleName='my-role')
|
||||
assert len(tags['Tags']) == 2
|
||||
assert tags['Tags'][0]['Key'] == 'somekey'
|
||||
assert tags['Tags'][0]['Value'] == ''
|
||||
|
||||
# Test creating tags with invalid values:
|
||||
# With more than 50 tags:
|
||||
with assert_raises(ClientError) as ce:
|
||||
too_many_tags = list(map(lambda x: {'Key': str(x), 'Value': str(x)}, range(0, 51)))
|
||||
conn.tag_role(RoleName='my-role', Tags=too_many_tags)
|
||||
assert 'failed to satisfy constraint: Member must have length less than or equal to 50.' \
|
||||
in ce.exception.response['Error']['Message']
|
||||
|
||||
# With a duplicate tag:
|
||||
with assert_raises(ClientError) as ce:
|
||||
conn.tag_role(RoleName='my-role', Tags=[{'Key': '0', 'Value': ''}, {'Key': '0', 'Value': ''}])
|
||||
assert 'Duplicate tag keys found. Please note that Tag keys are case insensitive.' \
|
||||
in ce.exception.response['Error']['Message']
|
||||
|
||||
# Duplicate tag with different casing:
|
||||
with assert_raises(ClientError) as ce:
|
||||
conn.tag_role(RoleName='my-role', Tags=[{'Key': 'a', 'Value': ''}, {'Key': 'A', 'Value': ''}])
|
||||
assert 'Duplicate tag keys found. Please note that Tag keys are case insensitive.' \
|
||||
in ce.exception.response['Error']['Message']
|
||||
|
||||
# With a really big key:
|
||||
with assert_raises(ClientError) as ce:
|
||||
conn.tag_role(RoleName='my-role', Tags=[{'Key': '0' * 129, 'Value': ''}])
|
||||
assert 'Member must have length less than or equal to 128.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With a really big value:
|
||||
with assert_raises(ClientError) as ce:
|
||||
conn.tag_role(RoleName='my-role', Tags=[{'Key': '0', 'Value': '0' * 257}])
|
||||
assert 'Member must have length less than or equal to 256.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With an invalid character:
|
||||
with assert_raises(ClientError) as ce:
|
||||
conn.tag_role(RoleName='my-role', Tags=[{'Key': 'NOWAY!', 'Value': ''}])
|
||||
assert 'Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+' \
|
||||
in ce.exception.response['Error']['Message']
|
||||
|
||||
# With a role that doesn't exist:
|
||||
with assert_raises(ClientError):
|
||||
conn.tag_role(RoleName='notarole', Tags=[{'Key': 'some', 'Value': 'value'}])
|
||||
|
||||
|
||||
@mock_iam
|
||||
def test_untag_role():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="{}")
|
||||
|
||||
# With proper tag values:
|
||||
conn.tag_role(RoleName='my-role', Tags=[
|
||||
{
|
||||
'Key': 'somekey',
|
||||
'Value': 'somevalue'
|
||||
},
|
||||
{
|
||||
'Key': 'someotherkey',
|
||||
'Value': 'someothervalue'
|
||||
}
|
||||
])
|
||||
|
||||
# Remove them:
|
||||
conn.untag_role(RoleName='my-role', TagKeys=['somekey'])
|
||||
tags = conn.list_role_tags(RoleName='my-role')
|
||||
assert len(tags['Tags']) == 1
|
||||
assert tags['Tags'][0]['Key'] == 'someotherkey'
|
||||
assert tags['Tags'][0]['Value'] == 'someothervalue'
|
||||
|
||||
# And again:
|
||||
conn.untag_role(RoleName='my-role', TagKeys=['someotherkey'])
|
||||
tags = conn.list_role_tags(RoleName='my-role')
|
||||
assert not tags['Tags']
|
||||
|
||||
# Test removing tags with invalid values:
|
||||
# With more than 50 tags:
|
||||
with assert_raises(ClientError) as ce:
|
||||
conn.untag_role(RoleName='my-role', TagKeys=[str(x) for x in range(0, 51)])
|
||||
assert 'failed to satisfy constraint: Member must have length less than or equal to 50.' \
|
||||
in ce.exception.response['Error']['Message']
|
||||
assert 'tagKeys' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With a really big key:
|
||||
with assert_raises(ClientError) as ce:
|
||||
conn.untag_role(RoleName='my-role', TagKeys=['0' * 129])
|
||||
assert 'Member must have length less than or equal to 128.' in ce.exception.response['Error']['Message']
|
||||
assert 'tagKeys' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With an invalid character:
|
||||
with assert_raises(ClientError) as ce:
|
||||
conn.untag_role(RoleName='my-role', TagKeys=['NOWAY!'])
|
||||
assert 'Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+' \
|
||||
in ce.exception.response['Error']['Message']
|
||||
assert 'tagKeys' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With a role that doesn't exist:
|
||||
with assert_raises(ClientError):
|
||||
conn.untag_role(RoleName='notarole', TagKeys=['somevalue'])
|
||||
|
||||
|
||||
@mock_iam()
|
||||
def test_update_role_description():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
|
||||
with assert_raises(ClientError):
|
||||
conn.delete_role(RoleName="my-role")
|
||||
|
||||
conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/")
|
||||
response = conn.update_role_description(RoleName="my-role", Description="test")
|
||||
|
||||
assert response['Role']['RoleName'] == 'my-role'
|
||||
|
||||
@mock_iam()
|
||||
def test_update_role():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
|
||||
with assert_raises(ClientError):
|
||||
conn.delete_role(RoleName="my-role")
|
||||
|
||||
conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/")
|
||||
response = conn.update_role_description(RoleName="my-role", Description="test")
|
||||
assert response['Role']['RoleName'] == 'my-role'
|
||||
|
||||
@mock_iam()
|
||||
def test_update_role():
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
|
||||
with assert_raises(ClientError):
|
||||
conn.delete_role(RoleName="my-role")
|
||||
|
||||
conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/")
|
||||
response = conn.update_role(RoleName="my-role", Description="test")
|
||||
assert len(response.keys()) == 1
|
||||
|
||||
|
||||
@mock_iam()
|
||||
def test_list_entities_for_policy():
|
||||
import json
|
||||
test_policy = json.dumps({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "s3:ListBucket",
|
||||
"Resource": "*",
|
||||
"Effect": "Allow",
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
conn = boto3.client('iam', region_name='us-east-1')
|
||||
conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/")
|
||||
conn.create_user(Path='/', UserName='testUser')
|
||||
conn.create_group(Path='/', GroupName='testGroup')
|
||||
conn.create_policy(
|
||||
PolicyName='testPolicy',
|
||||
Path='/',
|
||||
PolicyDocument=test_policy,
|
||||
Description='Test Policy'
|
||||
)
|
||||
|
||||
# Attach things to the user and group:
|
||||
conn.put_user_policy(UserName='testUser', PolicyName='testPolicy', PolicyDocument=test_policy)
|
||||
conn.put_group_policy(GroupName='testGroup', PolicyName='testPolicy', PolicyDocument=test_policy)
|
||||
|
||||
conn.attach_user_policy(UserName='testUser', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy')
|
||||
conn.attach_group_policy(GroupName='testGroup', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy')
|
||||
|
||||
conn.add_user_to_group(UserName='testUser', GroupName='testGroup')
|
||||
|
||||
# Add things to the role:
|
||||
conn.create_instance_profile(InstanceProfileName='ipn')
|
||||
conn.add_role_to_instance_profile(InstanceProfileName='ipn', RoleName='my-role')
|
||||
conn.tag_role(RoleName='my-role', Tags=[
|
||||
{
|
||||
'Key': 'somekey',
|
||||
'Value': 'somevalue'
|
||||
},
|
||||
{
|
||||
'Key': 'someotherkey',
|
||||
'Value': 'someothervalue'
|
||||
}
|
||||
])
|
||||
conn.put_role_policy(RoleName='my-role', PolicyName='test-policy', PolicyDocument=test_policy)
|
||||
conn.attach_role_policy(RoleName='my-role', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy')
|
||||
|
||||
response = conn.list_entities_for_policy(
|
||||
PolicyArn='arn:aws:iam::123456789012:policy/testPolicy',
|
||||
EntityFilter='Role'
|
||||
)
|
||||
assert response['PolicyRoles'] == [{'RoleName': 'my-role'}]
|
||||
|
||||
response = conn.list_entities_for_policy(
|
||||
PolicyArn='arn:aws:iam::123456789012:policy/testPolicy',
|
||||
EntityFilter='User',
|
||||
)
|
||||
assert response['PolicyUsers'] == [{'UserName': 'testUser'}]
|
||||
|
||||
response = conn.list_entities_for_policy(
|
||||
PolicyArn='arn:aws:iam::123456789012:policy/testPolicy',
|
||||
EntityFilter='Group',
|
||||
)
|
||||
assert response['PolicyGroups'] == [{'GroupName': 'testGroup'}]
|
||||
|
||||
response = conn.list_entities_for_policy(
|
||||
PolicyArn='arn:aws:iam::123456789012:policy/testPolicy',
|
||||
EntityFilter='LocalManagedPolicy',
|
||||
)
|
||||
assert response['PolicyGroups'] == [{'GroupName': 'testGroup'}]
|
||||
assert response['PolicyUsers'] == [{'UserName': 'testUser'}]
|
||||
assert response['PolicyRoles'] == [{'RoleName': 'my-role'}]
|
||||
|
||||
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
import os, re
|
||||
|
||||
import boto3
|
||||
import boto.kms
|
||||
from boto.exception import JSONResponseError
|
||||
@ -717,3 +716,60 @@ def test_cancel_key_deletion():
|
||||
assert result["KeyMetadata"]["Enabled"] == False
|
||||
assert result["KeyMetadata"]["KeyState"] == 'Disabled'
|
||||
assert 'DeletionDate' not in result["KeyMetadata"]
|
||||
|
||||
|
||||
@mock_kms
|
||||
def test_update_key_description():
|
||||
client = boto3.client('kms', region_name='us-east-1')
|
||||
key = client.create_key(Description='old_description')
|
||||
key_id = key['KeyMetadata']['KeyId']
|
||||
|
||||
result = client.update_key_description(KeyId=key_id, Description='new_description')
|
||||
assert 'ResponseMetadata' in result
|
||||
|
||||
|
||||
@mock_kms
|
||||
def test_tag_resource():
|
||||
client = boto3.client('kms', region_name='us-east-1')
|
||||
key = client.create_key(Description='cancel-key-deletion')
|
||||
response = client.schedule_key_deletion(
|
||||
KeyId=key['KeyMetadata']['KeyId']
|
||||
)
|
||||
|
||||
keyid = response['KeyId']
|
||||
response = client.tag_resource(
|
||||
KeyId=keyid,
|
||||
Tags=[
|
||||
{
|
||||
'TagKey': 'string',
|
||||
'TagValue': 'string'
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
# Shouldn't have any data, just header
|
||||
assert len(response.keys()) == 1
|
||||
|
||||
|
||||
@mock_kms
|
||||
def test_list_resource_tags():
|
||||
client = boto3.client('kms', region_name='us-east-1')
|
||||
key = client.create_key(Description='cancel-key-deletion')
|
||||
response = client.schedule_key_deletion(
|
||||
KeyId=key['KeyMetadata']['KeyId']
|
||||
)
|
||||
|
||||
keyid = response['KeyId']
|
||||
response = client.tag_resource(
|
||||
KeyId=keyid,
|
||||
Tags=[
|
||||
{
|
||||
'TagKey': 'string',
|
||||
'TagValue': 'string'
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
response = client.list_resource_tags(KeyId=keyid)
|
||||
assert response['Tags'][0]['TagKey'] == 'string'
|
||||
assert response['Tags'][0]['TagValue'] == 'string'
|
||||
|
8
tests/test_packages/__init__.py
Normal file
8
tests/test_packages/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import logging
|
||||
# Disable extra logging for tests
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('boto3').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('botocore').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('nose').setLevel(logging.CRITICAL)
|
37
tests/test_packages/test_httpretty.py
Normal file
37
tests/test_packages/test_httpretty.py
Normal file
@ -0,0 +1,37 @@
|
||||
# #!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
import mock
|
||||
|
||||
from moto.packages.httpretty.core import HTTPrettyRequest, fake_gethostname, fake_gethostbyname
|
||||
|
||||
|
||||
def test_parse_querystring():
|
||||
|
||||
core = HTTPrettyRequest(headers='test test HTTP/1.1')
|
||||
|
||||
qs = 'test test'
|
||||
response = core.parse_querystring(qs)
|
||||
|
||||
assert response == {}
|
||||
|
||||
def test_parse_request_body():
|
||||
core = HTTPrettyRequest(headers='test test HTTP/1.1')
|
||||
|
||||
qs = 'test'
|
||||
response = core.parse_request_body(qs)
|
||||
|
||||
assert response == 'test'
|
||||
|
||||
def test_fake_gethostname():
|
||||
|
||||
response = fake_gethostname()
|
||||
|
||||
assert response == 'localhost'
|
||||
|
||||
def test_fake_gethostbyname():
|
||||
|
||||
host = 'test'
|
||||
response = fake_gethostbyname(host=host)
|
||||
|
||||
assert response == '127.0.0.1'
|
@ -98,6 +98,16 @@ def test_rrset():
|
||||
rrsets.should.have.length_of(1)
|
||||
rrsets[0].resource_records[0].should.equal('5.6.7.8')
|
||||
|
||||
changes = ResourceRecordSets(conn, zoneid)
|
||||
change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "TXT")
|
||||
change.add_value("foo")
|
||||
changes.commit()
|
||||
|
||||
rrsets = conn.get_all_rrsets(zoneid)
|
||||
rrsets.should.have.length_of(2)
|
||||
rrsets[0].resource_records[0].should.equal('5.6.7.8')
|
||||
rrsets[1].resource_records[0].should.equal('foo')
|
||||
|
||||
changes = ResourceRecordSets(conn, zoneid)
|
||||
changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A")
|
||||
changes.commit()
|
||||
@ -520,7 +530,7 @@ def test_change_resource_record_sets_crud_valid():
|
||||
|
||||
# Create A Record.
|
||||
a_record_endpoint_payload = {
|
||||
'Comment': 'create A record prod.redis.db',
|
||||
'Comment': 'Create A record prod.redis.db',
|
||||
'Changes': [
|
||||
{
|
||||
'Action': 'CREATE',
|
||||
@ -545,15 +555,15 @@ def test_change_resource_record_sets_crud_valid():
|
||||
a_record_detail['TTL'].should.equal(10)
|
||||
a_record_detail['ResourceRecords'].should.equal([{'Value': '127.0.0.1'}])
|
||||
|
||||
# Update type to CNAME
|
||||
# Update A Record.
|
||||
cname_record_endpoint_payload = {
|
||||
'Comment': 'Update to CNAME prod.redis.db',
|
||||
'Comment': 'Update A record prod.redis.db',
|
||||
'Changes': [
|
||||
{
|
||||
'Action': 'UPSERT',
|
||||
'ResourceRecordSet': {
|
||||
'Name': 'prod.redis.db.',
|
||||
'Type': 'CNAME',
|
||||
'Type': 'A',
|
||||
'TTL': 60,
|
||||
'ResourceRecords': [{
|
||||
'Value': '192.168.1.1'
|
||||
@ -568,7 +578,7 @@ def test_change_resource_record_sets_crud_valid():
|
||||
len(response['ResourceRecordSets']).should.equal(1)
|
||||
cname_record_detail = response['ResourceRecordSets'][0]
|
||||
cname_record_detail['Name'].should.equal('prod.redis.db.')
|
||||
cname_record_detail['Type'].should.equal('CNAME')
|
||||
cname_record_detail['Type'].should.equal('A')
|
||||
cname_record_detail['TTL'].should.equal(60)
|
||||
cname_record_detail['ResourceRecords'].should.equal([{'Value': '192.168.1.1'}])
|
||||
|
||||
|
@ -418,6 +418,22 @@ def test_copy_key():
|
||||
"new-key").get_contents_as_string().should.equal(b"some value")
|
||||
|
||||
|
||||
@mock_s3_deprecated
|
||||
def test_copy_key_with_unicode():
|
||||
conn = boto.connect_s3('the_key', 'the_secret')
|
||||
bucket = conn.create_bucket("foobar")
|
||||
key = Key(bucket)
|
||||
key.key = "the-unicode-💩-key"
|
||||
key.set_contents_from_string("some value")
|
||||
|
||||
bucket.copy_key('new-key', 'foobar', 'the-unicode-💩-key')
|
||||
|
||||
bucket.get_key(
|
||||
"the-unicode-💩-key").get_contents_as_string().should.equal(b"some value")
|
||||
bucket.get_key(
|
||||
"new-key").get_contents_as_string().should.equal(b"some value")
|
||||
|
||||
|
||||
@mock_s3_deprecated
|
||||
def test_copy_key_with_version():
|
||||
conn = boto.connect_s3('the_key', 'the_secret')
|
||||
@ -428,7 +444,12 @@ def test_copy_key_with_version():
|
||||
key.set_contents_from_string("some value")
|
||||
key.set_contents_from_string("another value")
|
||||
|
||||
bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0')
|
||||
key = [
|
||||
key.version_id
|
||||
for key in bucket.get_all_versions()
|
||||
if not key.is_latest
|
||||
][0]
|
||||
bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id=key)
|
||||
|
||||
bucket.get_key(
|
||||
"the-key").get_contents_as_string().should.equal(b"another value")
|
||||
@ -802,16 +823,19 @@ def test_key_version():
|
||||
bucket = conn.create_bucket('foobar')
|
||||
bucket.configure_versioning(versioning=True)
|
||||
|
||||
versions = []
|
||||
|
||||
key = Key(bucket)
|
||||
key.key = 'the-key'
|
||||
key.version_id.should.be.none
|
||||
key.set_contents_from_string('some string')
|
||||
key.version_id.should.equal('0')
|
||||
versions.append(key.version_id)
|
||||
key.set_contents_from_string('some string')
|
||||
key.version_id.should.equal('1')
|
||||
versions.append(key.version_id)
|
||||
set(versions).should.have.length_of(2)
|
||||
|
||||
key = bucket.get_key('the-key')
|
||||
key.version_id.should.equal('1')
|
||||
key.version_id.should.equal(versions[-1])
|
||||
|
||||
|
||||
@mock_s3_deprecated
|
||||
@ -820,23 +844,25 @@ def test_list_versions():
|
||||
bucket = conn.create_bucket('foobar')
|
||||
bucket.configure_versioning(versioning=True)
|
||||
|
||||
key_versions = []
|
||||
|
||||
key = Key(bucket, 'the-key')
|
||||
key.version_id.should.be.none
|
||||
key.set_contents_from_string("Version 1")
|
||||
key.version_id.should.equal('0')
|
||||
key_versions.append(key.version_id)
|
||||
key.set_contents_from_string("Version 2")
|
||||
key.version_id.should.equal('1')
|
||||
key_versions.append(key.version_id)
|
||||
key_versions.should.have.length_of(2)
|
||||
|
||||
versions = list(bucket.list_versions())
|
||||
|
||||
versions.should.have.length_of(2)
|
||||
|
||||
versions[0].name.should.equal('the-key')
|
||||
versions[0].version_id.should.equal('0')
|
||||
versions[0].version_id.should.equal(key_versions[0])
|
||||
versions[0].get_contents_as_string().should.equal(b"Version 1")
|
||||
|
||||
versions[1].name.should.equal('the-key')
|
||||
versions[1].version_id.should.equal('1')
|
||||
versions[1].version_id.should.equal(key_versions[1])
|
||||
versions[1].get_contents_as_string().should.equal(b"Version 2")
|
||||
|
||||
key = Key(bucket, 'the2-key')
|
||||
@ -1467,16 +1493,22 @@ def test_boto3_head_object_with_versioning():
|
||||
s3.Object('blah', 'hello.txt').put(Body=old_content)
|
||||
s3.Object('blah', 'hello.txt').put(Body=new_content)
|
||||
|
||||
versions = list(s3.Bucket('blah').object_versions.all())
|
||||
latest = list(filter(lambda item: item.is_latest, versions))[0]
|
||||
oldest = list(filter(lambda item: not item.is_latest, versions))[0]
|
||||
|
||||
head_object = s3.Object('blah', 'hello.txt').meta.client.head_object(
|
||||
Bucket='blah', Key='hello.txt')
|
||||
head_object['VersionId'].should.equal('1')
|
||||
head_object['VersionId'].should.equal(latest.id)
|
||||
head_object['ContentLength'].should.equal(len(new_content))
|
||||
|
||||
old_head_object = s3.Object('blah', 'hello.txt').meta.client.head_object(
|
||||
Bucket='blah', Key='hello.txt', VersionId='0')
|
||||
old_head_object['VersionId'].should.equal('0')
|
||||
Bucket='blah', Key='hello.txt', VersionId=oldest.id)
|
||||
old_head_object['VersionId'].should.equal(oldest.id)
|
||||
old_head_object['ContentLength'].should.equal(len(old_content))
|
||||
|
||||
old_head_object['VersionId'].should_not.equal(head_object['VersionId'])
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_boto3_copy_object_with_versioning():
|
||||
@ -1491,9 +1523,6 @@ def test_boto3_copy_object_with_versioning():
|
||||
obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId']
|
||||
obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId']
|
||||
|
||||
# Versions should be the same
|
||||
obj1_version.should.equal(obj2_version)
|
||||
|
||||
client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2')
|
||||
obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId']
|
||||
|
||||
@ -2491,6 +2520,75 @@ def test_boto3_list_object_versions():
|
||||
response['Body'].read().should.equal(items[-1])
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_boto3_list_object_versions_with_versioning_disabled():
|
||||
s3 = boto3.client('s3', region_name='us-east-1')
|
||||
bucket_name = 'mybucket'
|
||||
key = 'key-with-versions'
|
||||
s3.create_bucket(Bucket=bucket_name)
|
||||
items = (six.b('v1'), six.b('v2'))
|
||||
for body in items:
|
||||
s3.put_object(
|
||||
Bucket=bucket_name,
|
||||
Key=key,
|
||||
Body=body
|
||||
)
|
||||
response = s3.list_object_versions(
|
||||
Bucket=bucket_name
|
||||
)
|
||||
|
||||
# One object version should be returned
|
||||
len(response['Versions']).should.equal(1)
|
||||
response['Versions'][0]['Key'].should.equal(key)
|
||||
|
||||
# The version id should be the string null
|
||||
response['Versions'][0]['VersionId'].should.equal('null')
|
||||
|
||||
# Test latest object version is returned
|
||||
response = s3.get_object(Bucket=bucket_name, Key=key)
|
||||
response['Body'].read().should.equal(items[-1])
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_boto3_list_object_versions_with_versioning_enabled_late():
|
||||
s3 = boto3.client('s3', region_name='us-east-1')
|
||||
bucket_name = 'mybucket'
|
||||
key = 'key-with-versions'
|
||||
s3.create_bucket(Bucket=bucket_name)
|
||||
items = (six.b('v1'), six.b('v2'))
|
||||
s3.put_object(
|
||||
Bucket=bucket_name,
|
||||
Key=key,
|
||||
Body=six.b('v1')
|
||||
)
|
||||
s3.put_bucket_versioning(
|
||||
Bucket=bucket_name,
|
||||
VersioningConfiguration={
|
||||
'Status': 'Enabled'
|
||||
}
|
||||
)
|
||||
s3.put_object(
|
||||
Bucket=bucket_name,
|
||||
Key=key,
|
||||
Body=six.b('v2')
|
||||
)
|
||||
response = s3.list_object_versions(
|
||||
Bucket=bucket_name
|
||||
)
|
||||
|
||||
# Two object versions should be returned
|
||||
len(response['Versions']).should.equal(2)
|
||||
keys = set([item['Key'] for item in response['Versions']])
|
||||
keys.should.equal({key})
|
||||
|
||||
# There should still be a null version id.
|
||||
versionsId = set([item['VersionId'] for item in response['Versions']])
|
||||
versionsId.should.contain('null')
|
||||
|
||||
# Test latest object version is returned
|
||||
response = s3.get_object(Bucket=bucket_name, Key=key)
|
||||
response['Body'].read().should.equal(items[-1])
|
||||
|
||||
@mock_s3
|
||||
def test_boto3_bad_prefix_list_object_versions():
|
||||
s3 = boto3.client('s3', region_name='us-east-1')
|
||||
@ -2547,18 +2645,25 @@ def test_boto3_delete_markers():
|
||||
Bucket=bucket_name,
|
||||
Key=key
|
||||
)
|
||||
e.response['Error']['Code'].should.equal('404')
|
||||
e.exception.response['Error']['Code'].should.equal('NoSuchKey')
|
||||
|
||||
response = s3.list_object_versions(
|
||||
Bucket=bucket_name
|
||||
)
|
||||
response['Versions'].should.have.length_of(2)
|
||||
response['DeleteMarkers'].should.have.length_of(1)
|
||||
|
||||
s3.delete_object(
|
||||
Bucket=bucket_name,
|
||||
Key=key,
|
||||
VersionId='2'
|
||||
VersionId=response['DeleteMarkers'][0]['VersionId']
|
||||
)
|
||||
response = s3.get_object(
|
||||
Bucket=bucket_name,
|
||||
Key=key
|
||||
)
|
||||
response['Body'].read().should.equal(items[-1])
|
||||
|
||||
response = s3.list_object_versions(
|
||||
Bucket=bucket_name
|
||||
)
|
||||
@ -2567,10 +2672,8 @@ def test_boto3_delete_markers():
|
||||
# We've asserted there is only 2 records so one is newest, one is oldest
|
||||
latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0]
|
||||
oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0]
|
||||
|
||||
# Double check ordering of version ID's
|
||||
latest['VersionId'].should.equal('1')
|
||||
oldest['VersionId'].should.equal('0')
|
||||
latest['VersionId'].should_not.equal(oldest['VersionId'])
|
||||
|
||||
# Double check the name is still unicode
|
||||
latest['Key'].should.equal('key-with-versions-and-unicode-ó')
|
||||
@ -2615,12 +2718,12 @@ def test_boto3_multiple_delete_markers():
|
||||
s3.delete_object(
|
||||
Bucket=bucket_name,
|
||||
Key=key,
|
||||
VersionId='2'
|
||||
VersionId=response['DeleteMarkers'][0]['VersionId']
|
||||
)
|
||||
s3.delete_object(
|
||||
Bucket=bucket_name,
|
||||
Key=key,
|
||||
VersionId='3'
|
||||
VersionId=response['DeleteMarkers'][1]['VersionId']
|
||||
)
|
||||
|
||||
response = s3.get_object(
|
||||
@ -2636,8 +2739,7 @@ def test_boto3_multiple_delete_markers():
|
||||
oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0]
|
||||
|
||||
# Double check ordering of version ID's
|
||||
latest['VersionId'].should.equal('1')
|
||||
oldest['VersionId'].should.equal('0')
|
||||
latest['VersionId'].should_not.equal(oldest['VersionId'])
|
||||
|
||||
# Double check the name is still unicode
|
||||
latest['Key'].should.equal('key-with-versions-and-unicode-ó')
|
||||
|
Loading…
x
Reference in New Issue
Block a user