diff --git a/.gitignore b/.gitignore index 7f57e98e9..efb489651 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,6 @@ python_env .ropeproject/ .pytest_cache/ venv/ - +.python-version +.vscode/ +tests/file.tmp diff --git a/.travis.yml b/.travis.yml index 3a5de0fa2..5bc9779f3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +dist: xenial language: python sudo: false services: @@ -5,26 +6,12 @@ services: python: - 2.7 - 3.6 + - 3.7 env: - TEST_SERVER_MODE=false - TEST_SERVER_MODE=true -# Due to incomplete Python 3.7 support on Travis CI ( -# https://github.com/travis-ci/travis-ci/issues/9815), -# using a matrix is necessary -matrix: - include: - - python: 3.7 - env: TEST_SERVER_MODE=false - dist: xenial - sudo: true - - python: 3.7 - env: TEST_SERVER_MODE=true - dist: xenial - sudo: true before_install: - export BOTO_CONFIG=/dev/null - - export AWS_SECRET_ACCESS_KEY=foobar_secret - - export AWS_ACCESS_KEY_ID=foobar_key install: # We build moto first so the docker container doesn't try to compile it as well, also note we don't use # -d for docker run so the logs show up in travis diff --git a/AUTHORS.md b/AUTHORS.md index 0a152505a..5eb313dda 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -54,3 +54,4 @@ Moto is written by Steve Pulec with contributions from: * [William Richard](https://github.com/william-richard) * [Alex Casalboni](https://github.com/alexcasalboni) * [Jon Beilke](https://github.com/jrbeilke) +* [Robert Lewis](https://github.com/ralewis85) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 7c68c0e31..09f0cd039 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -470,46 +470,46 @@ - [ ] upgrade_applied_schema - [ ] upgrade_published_schema -## cloudformation - 21% implemented +## cloudformation - 56% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set - [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set +- [X] create_stack_instances +- [X] create_stack_set +- [X] delete_change_set - [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set +- [X] delete_stack_instances +- [X] delete_stack_set - [ ] describe_account_limits -- [ ] describe_change_set +- [X] describe_change_set - [ ] describe_stack_events -- [ ] describe_stack_instance +- [X] describe_stack_instance - [ ] describe_stack_resource - [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation +- [X] describe_stack_set +- [X] describe_stack_set_operation - [X] describe_stacks - [ ] estimate_template_cost - [X] execute_change_set - [ ] get_stack_policy - [ ] get_template - [ ] get_template_summary -- [ ] list_change_sets +- [X] list_change_sets - [X] list_exports - [ ] list_imports -- [ ] list_stack_instances +- [X] list_stack_instances - [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets +- [X] list_stack_set_operation_results +- [X] list_stack_set_operations +- [X] list_stack_sets - [X] list_stacks - [ ] set_stack_policy - [ ] signal_resource -- [ ] stop_stack_set_operation +- [X] stop_stack_set_operation - [X] update_stack -- [ ] update_stack_instances -- [ ] update_stack_set +- [X] update_stack_instances +- [X] update_stack_set - [ ] update_termination_protection - [ ] validate_template @@ -827,25 +827,25 @@ - [ ] unlink_identity - [ ] update_identity_pool -## cognito-idp - 0% implemented +## cognito-idp - 34% implemented - [ ] add_custom_attributes -- [ ] admin_add_user_to_group +- [X] admin_add_user_to_group - [ ] admin_confirm_sign_up -- [ ] admin_create_user -- [ ] admin_delete_user +- [X] admin_create_user +- [X] admin_delete_user - [ ] admin_delete_user_attributes - [ ] admin_disable_provider_for_user - [X] admin_disable_user - [X] admin_enable_user - [ ] admin_forget_device - [ ] admin_get_device -- [ ] admin_get_user -- [ ] admin_initiate_auth +- [X] admin_get_user +- [X] admin_initiate_auth - [ ] admin_link_provider_for_user - [ ] admin_list_devices -- [ ] admin_list_groups_for_user +- [X] admin_list_groups_for_user - [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group +- [X] admin_remove_user_from_group - [ ] admin_reset_user_password - [ ] admin_respond_to_auth_challenge - [ ] admin_set_user_mfa_preference @@ -855,37 +855,37 @@ - [ ] admin_update_user_attributes - [ ] admin_user_global_sign_out - [ ] associate_software_token -- [ ] change_password +- [X] change_password - [ ] confirm_device -- [ ] confirm_forgot_password +- [X] confirm_forgot_password - [ ] confirm_sign_up -- [ ] create_group -- [ ] create_identity_provider +- [X] create_group +- [X] create_identity_provider - [ ] create_resource_server - [ ] create_user_import_job -- [ ] create_user_pool -- [ ] create_user_pool_client -- [ ] create_user_pool_domain -- [ ] delete_group -- [ ] delete_identity_provider +- [X] create_user_pool +- [X] create_user_pool_client +- [X] create_user_pool_domain +- [X] delete_group +- [X] delete_identity_provider - [ ] delete_resource_server - [ ] delete_user - [ ] delete_user_attributes -- [ ] delete_user_pool -- [ ] delete_user_pool_client -- [ ] delete_user_pool_domain -- [ ] describe_identity_provider +- [X] delete_user_pool +- [X] delete_user_pool_client +- [X] delete_user_pool_domain +- [X] describe_identity_provider - [ ] describe_resource_server - [ ] describe_risk_configuration - [ ] describe_user_import_job -- [ ] describe_user_pool -- [ ] describe_user_pool_client -- [ ] describe_user_pool_domain +- [X] describe_user_pool +- [X] describe_user_pool_client +- [X] describe_user_pool_domain - [ ] forget_device - [ ] forgot_password - [ ] get_csv_header - [ ] get_device -- [ ] get_group +- [X] get_group - [ ] get_identity_provider_by_identifier - [ ] get_signing_certificate - [ ] get_ui_customization @@ -895,16 +895,16 @@ - [ ] global_sign_out - [ ] initiate_auth - [ ] list_devices -- [ ] list_groups -- [ ] list_identity_providers +- [X] list_groups +- [X] list_identity_providers - [ ] list_resource_servers - [ ] list_user_import_jobs -- [ ] list_user_pool_clients -- [ ] list_user_pools -- [ ] list_users -- [ ] list_users_in_group +- [X] list_user_pool_clients +- [X] list_user_pools +- [X] list_users +- [X] list_users_in_group - [ ] resend_confirmation_code -- [ ] respond_to_auth_challenge +- [X] respond_to_auth_challenge - [ ] set_risk_configuration - [ ] set_ui_customization - [ ] set_user_mfa_preference @@ -916,11 +916,11 @@ - [ ] update_auth_event_feedback - [ ] update_device_status - [ ] update_group -- [ ] update_identity_provider +- [x] update_identity_provider - [ ] update_resource_server - [ ] update_user_attributes - [ ] update_user_pool -- [ ] update_user_pool_client +- [X] update_user_pool_client - [ ] verify_software_token - [ ] verify_user_attribute @@ -2208,7 +2208,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 48% implemented +## iam - 62% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -2225,7 +2225,7 @@ - [X] create_policy - [X] create_policy_version - [X] create_role -- [ ] create_saml_provider +- [X] create_saml_provider - [ ] create_service_linked_role - [ ] create_service_specific_credential - [X] create_user @@ -2243,11 +2243,11 @@ - [X] delete_policy_version - [X] delete_role - [X] delete_role_policy -- [ ] delete_saml_provider +- [X] delete_saml_provider - [X] delete_server_certificate - [ ] delete_service_linked_role - [ ] delete_service_specific_credential -- [ ] delete_signing_certificate +- [X] delete_signing_certificate - [ ] delete_ssh_public_key - [X] delete_user - [X] delete_user_policy @@ -2257,7 +2257,7 @@ - [X] detach_user_policy - [X] enable_mfa_device - [ ] generate_credential_report -- [ ] get_access_key_last_used +- [X] get_access_key_last_used - [X] get_account_authorization_details - [ ] get_account_password_policy - [ ] get_account_summary @@ -2273,13 +2273,13 @@ - [X] get_policy_version - [X] get_role - [X] get_role_policy -- [ ] get_saml_provider +- [X] get_saml_provider - [X] get_server_certificate - [ ] get_service_linked_role_deletion_status - [ ] get_ssh_public_key - [X] get_user - [X] get_user_policy -- [ ] list_access_keys +- [X] list_access_keys - [X] list_account_aliases - [X] list_attached_group_policies - [X] list_attached_role_policies @@ -2287,19 +2287,21 @@ - [ ] list_entities_for_policy - [X] list_group_policies - [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role +- [X] list_groups_for_user +- [X] list_instance_profiles +- [X] list_instance_profiles_for_role - [X] list_mfa_devices - [ ] list_open_id_connect_providers - [X] list_policies - [X] list_policy_versions - [X] list_role_policies -- [ ] list_roles -- [ ] list_saml_providers -- [ ] list_server_certificates +- [X] list_roles +- [X] list_role_tags +- [ ] list_user_tags +- [X] list_saml_providers +- [X] list_server_certificates - [ ] list_service_specific_credentials -- [ ] list_signing_certificates +- [X] list_signing_certificates - [ ] list_ssh_public_keys - [X] list_user_policies - [X] list_users @@ -2315,6 +2317,10 @@ - [ ] set_default_policy_version - [ ] simulate_custom_policy - [ ] simulate_principal_policy +- [X] tag_role +- [ ] tag_user +- [X] untag_role +- [ ] untag_user - [X] update_access_key - [ ] update_account_password_policy - [ ] update_assume_role_policy @@ -2323,14 +2329,14 @@ - [ ] update_open_id_connect_provider_thumbprint - [ ] update_role - [ ] update_role_description -- [ ] update_saml_provider +- [X] update_saml_provider - [ ] update_server_certificate - [ ] update_service_specific_credential -- [ ] update_signing_certificate +- [X] update_signing_certificate - [ ] update_ssh_public_key -- [ ] update_user -- [ ] upload_server_certificate -- [ ] upload_signing_certificate +- [X] update_user +- [X] upload_server_certificate +- [X] upload_signing_certificate - [ ] upload_ssh_public_key ## importexport - 0% implemented @@ -2376,11 +2382,11 @@ - [ ] unsubscribe_from_event - [ ] update_assessment_target -## iot - 30% implemented +## iot - 32% implemented - [ ] accept_certificate_transfer - [X] add_thing_to_thing_group - [ ] associate_targets_with_job -- [ ] attach_policy +- [X] attach_policy - [X] attach_principal_policy - [X] attach_thing_principal - [ ] cancel_certificate_transfer @@ -2429,7 +2435,7 @@ - [X] describe_thing_group - [ ] describe_thing_registration_task - [X] describe_thing_type -- [ ] detach_policy +- [X] detach_policy - [X] detach_principal_policy - [X] detach_thing_principal - [ ] disable_topic_rule @@ -3542,7 +3548,7 @@ - [ ] get_bucket_inventory_configuration - [ ] get_bucket_lifecycle - [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location +- [X] get_bucket_location - [ ] get_bucket_logging - [ ] get_bucket_metrics_configuration - [ ] get_bucket_notification @@ -3648,14 +3654,14 @@ ## secretsmanager - 33% implemented - [ ] cancel_rotate_secret - [X] create_secret -- [ ] delete_secret +- [X] delete_secret - [X] describe_secret - [X] get_random_password - [X] get_secret_value - [ ] list_secret_version_ids -- [ ] list_secrets +- [x] list_secrets - [ ] put_secret_value -- [ ] restore_secret +- [X] restore_secret - [X] rotate_secret - [ ] tag_resource - [ ] untag_resource @@ -3913,7 +3919,7 @@ - [ ] delete_message_batch - [X] delete_queue - [ ] get_queue_attributes -- [ ] get_queue_url +- [X] get_queue_url - [X] list_dead_letter_source_queues - [ ] list_queue_tags - [X] list_queues diff --git a/Makefile b/Makefile index f224d7091..de08c6f74 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,7 @@ test: lint rm -f .coverage rm -rf cover @nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE) + test_server: @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ diff --git a/README.md b/README.md index 791226d6b..56f73e28e 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,8 @@ [![Join the chat at https://gitter.im/awsmoto/Lobby](https://badges.gitter.im/awsmoto/Lobby.svg)](https://gitter.im/awsmoto/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/spulec/moto.png?branch=master)](https://travis-ci.org/spulec/moto) -[![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.png?branch=master)](https://coveralls.io/r/spulec/moto) +[![Build Status](https://travis-ci.org/spulec/moto.svg?branch=master)](https://travis-ci.org/spulec/moto) +[![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.svg?branch=master)](https://coveralls.io/r/spulec/moto) [![Docs](https://readthedocs.org/projects/pip/badge/?version=stable)](http://docs.getmoto.org) # In a nutshell @@ -70,10 +70,12 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | CloudwatchEvents | @mock_events | all endpoints done | |------------------------------------------------------------------------------| -| Cognito Identity | @mock_cognitoidentity| basic endpoints done | +| Cognito Identity | @mock_cognitoidentity| basic endpoints done | |------------------------------------------------------------------------------| | Cognito Identity Provider | @mock_cognitoidp| basic endpoints done | |------------------------------------------------------------------------------| +| Config | @mock_config | basic endpoints done | +|------------------------------------------------------------------------------| | Data Pipeline | @mock_datapipeline| basic endpoints done | |------------------------------------------------------------------------------| | DynamoDB | @mock_dynamodb | core endpoints done | @@ -259,7 +261,7 @@ It uses flask, which isn't a default dependency. You can install the server 'extra' package with: ```python -pip install moto[server] +pip install "moto[server]" ``` You can then start it running a service: diff --git a/moto/__init__.py b/moto/__init__.py index dd3593d5d..5eeac8471 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.3.7' +__version__ = '1.3.8' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa @@ -13,9 +13,11 @@ from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa from .cognitoidentity import mock_cognitoidentity, mock_cognitoidentity_deprecated # flake8: noqa from .cognitoidp import mock_cognitoidp, mock_cognitoidp_deprecated # flake8: noqa +from .config import mock_config # flake8: noqa from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa +from .dynamodbstreams import mock_dynamodbstreams # flake8: noqa from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa from .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa diff --git a/moto/acm/models.py b/moto/acm/models.py index 39be8945d..15a1bd44d 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -243,7 +243,7 @@ class CertBundle(BaseModel): 'KeyAlgorithm': key_algo, 'NotAfter': datetime_to_epoch(self._cert.not_valid_after), 'NotBefore': datetime_to_epoch(self._cert.not_valid_before), - 'Serial': self._cert.serial, + 'Serial': self._cert.serial_number, 'SignatureAlgorithm': self._cert.signature_algorithm_oid._name.upper().replace('ENCRYPTION', ''), 'Status': self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED. 'Subject': 'CN={0}'.format(self.common_name), diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 0ebc4c465..27e81a87c 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -17,10 +17,12 @@ ASG_NAME_TAG = "aws:autoscaling:groupName" class InstanceState(object): - def __init__(self, instance, lifecycle_state="InService", health_status="Healthy"): + def __init__(self, instance, lifecycle_state="InService", + health_status="Healthy", protected_from_scale_in=False): self.instance = instance self.lifecycle_state = lifecycle_state self.health_status = health_status + self.protected_from_scale_in = protected_from_scale_in class FakeScalingPolicy(BaseModel): @@ -152,7 +154,8 @@ class FakeAutoScalingGroup(BaseModel): min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, load_balancers, target_group_arns, placement_group, termination_policies, - autoscaling_backend, tags): + autoscaling_backend, tags, + new_instances_protected_from_scale_in=False): self.autoscaling_backend = autoscaling_backend self.name = name @@ -178,6 +181,7 @@ class FakeAutoScalingGroup(BaseModel): self.target_group_arns = target_group_arns self.placement_group = placement_group self.termination_policies = termination_policies + self.new_instances_protected_from_scale_in = new_instances_protected_from_scale_in self.suspended_processes = [] self.instance_states = [] @@ -210,6 +214,8 @@ class FakeAutoScalingGroup(BaseModel): placement_group=None, termination_policies=properties.get("TerminationPolicies", []), tags=properties.get("Tags", []), + new_instances_protected_from_scale_in=properties.get( + "NewInstancesProtectedFromScaleIn", False) ) return group @@ -238,7 +244,8 @@ class FakeAutoScalingGroup(BaseModel): def update(self, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, - placement_group, termination_policies): + placement_group, termination_policies, + new_instances_protected_from_scale_in=None): if availability_zones: self.availability_zones = availability_zones if max_size is not None: @@ -256,6 +263,8 @@ class FakeAutoScalingGroup(BaseModel): self.health_check_period = health_check_period if health_check_type is not None: self.health_check_type = health_check_type + if new_instances_protected_from_scale_in is not None: + self.new_instances_protected_from_scale_in = new_instances_protected_from_scale_in if desired_capacity is not None: self.set_desired_capacity(desired_capacity) @@ -280,12 +289,16 @@ class FakeAutoScalingGroup(BaseModel): else: # Need to remove some instances count_to_remove = curr_instance_count - self.desired_capacity - instances_to_remove = self.instance_states[:count_to_remove] - instance_ids_to_remove = [ - instance.instance.id for instance in instances_to_remove] - self.autoscaling_backend.ec2_backend.terminate_instances( - instance_ids_to_remove) - self.instance_states = self.instance_states[count_to_remove:] + instances_to_remove = [ # only remove unprotected + state for state in self.instance_states + if not state.protected_from_scale_in + ][:count_to_remove] + if instances_to_remove: # just in case not instances to remove + instance_ids_to_remove = [ + instance.instance.id for instance in instances_to_remove] + self.autoscaling_backend.ec2_backend.terminate_instances( + instance_ids_to_remove) + self.instance_states = list(set(self.instance_states) - set(instances_to_remove)) def get_propagated_tags(self): propagated_tags = {} @@ -310,7 +323,10 @@ class FakeAutoScalingGroup(BaseModel): ) for instance in reservation.instances: instance.autoscaling_group = self - self.instance_states.append(InstanceState(instance)) + self.instance_states.append(InstanceState( + instance, + protected_from_scale_in=self.new_instances_protected_from_scale_in, + )) def append_target_groups(self, target_group_arns): append = [x for x in target_group_arns if x not in self.target_group_arns] @@ -372,7 +388,8 @@ class AutoScalingBackend(BaseBackend): default_cooldown, health_check_period, health_check_type, load_balancers, target_group_arns, placement_group, - termination_policies, tags): + termination_policies, tags, + new_instances_protected_from_scale_in=False): def make_int(value): return int(value) if value is not None else value @@ -403,6 +420,7 @@ class AutoScalingBackend(BaseBackend): termination_policies=termination_policies, autoscaling_backend=self, tags=tags, + new_instances_protected_from_scale_in=new_instances_protected_from_scale_in, ) self.autoscaling_groups[name] = group @@ -415,12 +433,14 @@ class AutoScalingBackend(BaseBackend): launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, placement_group, - termination_policies): + termination_policies, + new_instances_protected_from_scale_in=None): group = self.autoscaling_groups[name] group.update(availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, - placement_group, termination_policies) + placement_group, termination_policies, + new_instances_protected_from_scale_in=new_instances_protected_from_scale_in) return group def describe_auto_scaling_groups(self, names): @@ -448,7 +468,13 @@ class AutoScalingBackend(BaseBackend): raise ResourceContentionError else: group.desired_capacity = original_size + len(instance_ids) - new_instances = [InstanceState(self.ec2_backend.get_instance(x)) for x in instance_ids] + new_instances = [ + InstanceState( + self.ec2_backend.get_instance(x), + protected_from_scale_in=group.new_instances_protected_from_scale_in, + ) + for x in instance_ids + ] for instance in new_instances: self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) group.instance_states.extend(new_instances) @@ -626,6 +652,13 @@ class AutoScalingBackend(BaseBackend): group = self.autoscaling_groups[group_name] group.suspended_processes = scaling_processes or [] + def set_instance_protection(self, group_name, instance_ids, protected_from_scale_in): + group = self.autoscaling_groups[group_name] + protected_instances = [ + x for x in group.instance_states if x.instance.id in instance_ids] + for instance in protected_instances: + instance.protected_from_scale_in = protected_from_scale_in + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 5586c51dd..3b2752d46 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -85,6 +85,8 @@ class AutoScalingResponse(BaseResponse): termination_policies=self._get_multi_param( 'TerminationPolicies.member'), tags=self._get_list_prefix('Tags.member'), + new_instances_protected_from_scale_in=self._get_bool_param( + 'NewInstancesProtectedFromScaleIn', False) ) template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -192,6 +194,8 @@ class AutoScalingResponse(BaseResponse): placement_group=self._get_param('PlacementGroup'), termination_policies=self._get_multi_param( 'TerminationPolicies.member'), + new_instances_protected_from_scale_in=self._get_bool_param( + 'NewInstancesProtectedFromScaleIn', None) ) template = self.response_template(UPDATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -290,6 +294,15 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(SUSPEND_PROCESSES_TEMPLATE) return template.render() + def set_instance_protection(self): + group_name = self._get_param('AutoScalingGroupName') + instance_ids = self._get_multi_param('InstanceIds.member') + protected_from_scale_in = self._get_bool_param('ProtectedFromScaleIn') + self.autoscaling_backend.set_instance_protection( + group_name, instance_ids, protected_from_scale_in) + template = self.response_template(SET_INSTANCE_PROTECTION_TEMPLATE) + return template.render() + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -391,7 +404,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """ -{{ requestid }} + """ @@ -399,7 +412,7 @@ ATTACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -441,7 +454,7 @@ DETACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -490,6 +503,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """{{ instance_state.instance.id }} {{ group.launch_config_name }} {{ instance_state.lifecycle_state }} + {{ instance_state.protected_from_scale_in|string|lower }} {% endfor %} @@ -508,6 +522,15 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {% endif %} + {% if group.target_group_arns %} + + {% for target_group_arn in group.target_group_arns %} + {{ target_group_arn }} + {% endfor %} + + {% else %} + + {% endif %} {{ group.min_size }} {% if group.vpc_zone_identifier %} {{ group.vpc_zone_identifier }} @@ -530,6 +553,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """{{ group.placement_group }} {% endif %} + {{ group.new_instances_protected_from_scale_in|string|lower }} {% endfor %} @@ -565,6 +589,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """{{ instance_state.instance.id }} {{ instance_state.instance.autoscaling_group.launch_config_name }} {{ instance_state.lifecycle_state }} + {{ instance_state.protected_from_scale_in|string|lower }} {% endfor %} @@ -629,7 +654,7 @@ DELETE_POLICY_TEMPLATE = """ -{{ requestid }} + """ @@ -645,14 +670,14 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ -{{ requestid }} + """ @@ -665,6 +690,13 @@ SUSPEND_PROCESSES_TEMPLATE = """ -{{ requestid }} + """ + +SET_INSTANCE_PROTECTION_TEMPLATE = """ + + + + +""" diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index b11bde042..9fc41c11e 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -386,7 +386,7 @@ class LambdaFunction(BaseModel): 'Role': properties['Role'], 'Runtime': properties['Runtime'], } - optional_properties = 'Description MemorySize Publish Timeout VpcConfig'.split() + optional_properties = 'Description MemorySize Publish Timeout VpcConfig Environment'.split() # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the # default logic for prop in optional_properties: @@ -500,6 +500,11 @@ class LambdaStorage(object): except ValueError: return self._functions[name]['latest'] + def list_versions_by_function(self, name): + if name not in self._functions: + return None + return [self._functions[name]['latest']] + def get_arn(self, arn): return self._arns.get(arn, None) @@ -607,6 +612,9 @@ class LambdaBackend(BaseBackend): def get_function(self, function_name, qualifier=None): return self._lambdas.get_function(function_name, qualifier) + def list_versions_by_function(self, function_name): + return self._lambdas.list_versions_by_function(function_name) + def get_function_by_arn(self, function_arn): return self._lambdas.get_arn(function_arn) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 1a9a4df83..1c43ef84b 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -52,7 +52,11 @@ class LambdaResponse(BaseResponse): self.setup_class(request, full_url, headers) if request.method == 'GET': # This is ListVersionByFunction - raise ValueError("Cannot handle request") + + path = request.path if hasattr(request, 'path') else path_url(request.url) + function_name = path.split('/')[-2] + return self._list_versions_by_function(function_name) + elif request.method == 'POST': return self._publish_function(request, full_url, headers) else: @@ -151,6 +155,19 @@ class LambdaResponse(BaseResponse): return 200, {}, json.dumps(result) + def _list_versions_by_function(self, function_name): + result = { + 'Versions': [] + } + + functions = self.lambda_backend.list_versions_by_function(function_name) + if functions: + for fn in functions: + json_data = fn.get_configuration() + result['Versions'].append(json_data) + + return 200, {}, json.dumps(result) + def _create_function(self, request, full_url, headers): try: fn = self.lambda_backend.create_function(self.json_body) @@ -166,7 +183,7 @@ class LambdaResponse(BaseResponse): fn = self.lambda_backend.publish_function(function_name) if fn: config = fn.get_configuration() - return 200, {}, json.dumps(config) + return 201, {}, json.dumps(config) else: return 404, {}, "{}" diff --git a/moto/backends.py b/moto/backends.py index d95424385..90cc803a7 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -12,6 +12,7 @@ from moto.core import moto_api_backends from moto.datapipeline import datapipeline_backends from moto.dynamodb import dynamodb_backends from moto.dynamodb2 import dynamodb_backends2 +from moto.dynamodbstreams import dynamodbstreams_backends from moto.ec2 import ec2_backends from moto.ecr import ecr_backends from moto.ecs import ecs_backends @@ -45,7 +46,7 @@ from moto.iot import iot_backends from moto.iotdata import iotdata_backends from moto.batch import batch_backends from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends - +from moto.config import config_backends BACKENDS = { 'acm': acm_backends, @@ -56,9 +57,11 @@ BACKENDS = { 'cloudwatch': cloudwatch_backends, 'cognito-identity': cognitoidentity_backends, 'cognito-idp': cognitoidp_backends, + 'config': config_backends, 'datapipeline': datapipeline_backends, 'dynamodb': dynamodb_backends, 'dynamodb2': dynamodb_backends2, + 'dynamodbstreams': dynamodbstreams_backends, 'ec2': ec2_backends, 'ecr': ecr_backends, 'ecs': ecs_backends, diff --git a/moto/batch/responses.py b/moto/batch/responses.py index e626b7d4c..7fb606184 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -27,7 +27,7 @@ class BatchResponse(BaseResponse): elif not hasattr(self, '_json'): try: self._json = json.loads(self.body) - except json.JSONDecodeError: + except ValueError: print() return self._json diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index e5ab7255d..01e3113dd 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from datetime import datetime +from datetime import datetime, timedelta import json import yaml import uuid @@ -12,11 +12,156 @@ from .parsing import ResourceMap, OutputMap from .utils import ( generate_changeset_id, generate_stack_id, + generate_stackset_arn, + generate_stackset_id, yaml_tag_constructor, + validate_template_cfn_lint, ) from .exceptions import ValidationError +class FakeStackSet(BaseModel): + + def __init__(self, stackset_id, name, template, region='us-east-1', + status='ACTIVE', description=None, parameters=None, tags=None, + admin_role='AWSCloudFormationStackSetAdministrationRole', + execution_role='AWSCloudFormationStackSetExecutionRole'): + self.id = stackset_id + self.arn = generate_stackset_arn(stackset_id, region) + self.name = name + self.template = template + self.description = description + self.parameters = parameters + self.tags = tags + self.admin_role = admin_role + self.execution_role = execution_role + self.status = status + self.instances = FakeStackInstances(parameters, self.id, self.name) + self.stack_instances = self.instances.stack_instances + self.operations = [] + + def _create_operation(self, operation_id, action, status, accounts=[], regions=[]): + operation = { + 'OperationId': str(operation_id), + 'Action': action, + 'Status': status, + 'CreationTimestamp': datetime.now(), + 'EndTimestamp': datetime.now() + timedelta(minutes=2), + 'Instances': [{account: region} for account in accounts for region in regions], + } + + self.operations += [operation] + return operation + + def get_operation(self, operation_id): + for operation in self.operations: + if operation_id == operation['OperationId']: + return operation + raise ValidationError(operation_id) + + def update_operation(self, operation_id, status): + operation = self.get_operation(operation_id) + operation['Status'] = status + return operation_id + + def delete(self): + self.status = 'DELETED' + + def update(self, template, description, parameters, tags, admin_role, + execution_role, accounts, regions, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.template = template if template else self.template + self.description = description if description is not None else self.description + self.parameters = parameters if parameters else self.parameters + self.tags = tags if tags else self.tags + self.admin_role = admin_role if admin_role else self.admin_role + self.execution_role = execution_role if execution_role else self.execution_role + + if accounts and regions: + self.update_instances(accounts, regions, self.parameters) + + operation = self._create_operation(operation_id=operation_id, + action='UPDATE', status='SUCCEEDED', accounts=accounts, + regions=regions) + return operation + + def create_stack_instances(self, accounts, regions, parameters, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + if not parameters: + parameters = self.parameters + + self.instances.create_instances(accounts, regions, parameters, operation_id) + self._create_operation(operation_id=operation_id, action='CREATE', + status='SUCCEEDED', accounts=accounts, regions=regions) + + def delete_stack_instances(self, accounts, regions, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.instances.delete(accounts, regions) + + operation = self._create_operation(operation_id=operation_id, action='DELETE', + status='SUCCEEDED', accounts=accounts, regions=regions) + return operation + + def update_instances(self, accounts, regions, parameters, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.instances.update(accounts, regions, parameters) + operation = self._create_operation(operation_id=operation_id, + action='UPDATE', status='SUCCEEDED', accounts=accounts, + regions=regions) + return operation + + +class FakeStackInstances(BaseModel): + def __init__(self, parameters, stackset_id, stackset_name): + self.parameters = parameters if parameters else {} + self.stackset_id = stackset_id + self.stack_name = "StackSet-{}".format(stackset_id) + self.stackset_name = stackset_name + self.stack_instances = [] + + def create_instances(self, accounts, regions, parameters, operation_id): + new_instances = [] + for region in regions: + for account in accounts: + instance = { + 'StackId': generate_stack_id(self.stack_name, region, account), + 'StackSetId': self.stackset_id, + 'Region': region, + 'Account': account, + 'Status': "CURRENT", + 'ParameterOverrides': parameters if parameters else [], + } + new_instances.append(instance) + self.stack_instances += new_instances + return new_instances + + def update(self, accounts, regions, parameters): + for account in accounts: + for region in regions: + instance = self.get_instance(account, region) + if parameters: + instance['ParameterOverrides'] = parameters + else: + instance['ParameterOverrides'] = [] + + def delete(self, accounts, regions): + for i, instance in enumerate(self.stack_instances): + if instance['Region'] in regions and instance['Account'] in accounts: + self.stack_instances.pop(i) + + def get_instance(self, account, region): + for i, instance in enumerate(self.stack_instances): + if instance['Region'] == region and instance['Account'] == account: + return self.stack_instances[i] + + class FakeStack(BaseModel): def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False): @@ -84,9 +229,9 @@ class FakeStack(BaseModel): def _parse_template(self): yaml.add_multi_constructor('', yaml_tag_constructor) try: - self.template_dict = yaml.load(self.template) + self.template_dict = yaml.load(self.template, Loader=yaml.Loader) except yaml.parser.ParserError: - self.template_dict = json.loads(self.template) + self.template_dict = json.loads(self.template, Loader=yaml.Loader) @property def stack_parameters(self): @@ -126,6 +271,49 @@ class FakeStack(BaseModel): self.status = "DELETE_COMPLETE" +class FakeChange(BaseModel): + + def __init__(self, action, logical_resource_id, resource_type): + self.action = action + self.logical_resource_id = logical_resource_id + self.resource_type = resource_type + + +class FakeChangeSet(FakeStack): + + def __init__(self, stack_id, stack_name, stack_template, change_set_id, change_set_name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None): + super(FakeChangeSet, self).__init__( + stack_id, + stack_name, + stack_template, + parameters, + region_name, + notification_arns=notification_arns, + tags=tags, + role_arn=role_arn, + cross_stack_resources=cross_stack_resources, + create_change_set=True, + ) + self.stack_name = stack_name + self.change_set_id = change_set_id + self.change_set_name = change_set_name + self.changes = self.diff(template=template, parameters=parameters) + + def diff(self, template, parameters=None): + self.template = template + self._parse_template() + changes = [] + resources_by_action = self.resource_map.diff(self.template_dict, parameters) + for action, resources in resources_by_action.items(): + for resource_name, resource in resources.items(): + changes.append(FakeChange( + action=action, + logical_resource_id=resource_name, + resource_type=resource['ResourceType'], + )) + return changes + + class FakeEvent(BaseModel): def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None): @@ -145,10 +333,72 @@ class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() + self.stacksets = OrderedDict() self.deleted_stacks = {} self.exports = OrderedDict() self.change_sets = OrderedDict() + def create_stack_set(self, name, template, parameters, tags=None, description=None, region='us-east-1', admin_role=None, execution_role=None): + stackset_id = generate_stackset_id(name) + new_stackset = FakeStackSet( + stackset_id=stackset_id, + name=name, + template=template, + parameters=parameters, + description=description, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + ) + self.stacksets[stackset_id] = new_stackset + return new_stackset + + def get_stack_set(self, name): + stacksets = self.stacksets.keys() + for stackset in stacksets: + if self.stacksets[stackset].name == name: + return self.stacksets[stackset] + raise ValidationError(name) + + def delete_stack_set(self, name): + stacksets = self.stacksets.keys() + for stackset in stacksets: + if self.stacksets[stackset].name == name: + self.stacksets[stackset].delete() + + def create_stack_instances(self, stackset_name, accounts, regions, parameters, operation_id=None): + stackset = self.get_stack_set(stackset_name) + + stackset.create_stack_instances( + accounts=accounts, + regions=regions, + parameters=parameters, + operation_id=operation_id, + ) + return stackset + + def update_stack_set(self, stackset_name, template=None, description=None, + parameters=None, tags=None, admin_role=None, execution_role=None, + accounts=None, regions=None, operation_id=None): + stackset = self.get_stack_set(stackset_name) + update = stackset.update( + template=template, + description=description, + parameters=parameters, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + accounts=accounts, + regions=regions, + operation_id=operation_id + ) + return update + + def delete_stack_instances(self, stackset_name, accounts, regions, operation_id=None): + stackset = self.get_stack_set(stackset_name) + stackset.delete_stack_instances(accounts, regions, operation_id) + return stackset + def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False): stack_id = generate_stack_id(name) new_stack = FakeStack( @@ -170,24 +420,62 @@ class CloudFormationBackend(BaseBackend): return new_stack def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None): + stack_id = None + stack_template = None if change_set_type == 'UPDATE': stacks = self.stacks.values() stack = None for s in stacks: if s.name == stack_name: stack = s + stack_id = stack.stack_id + stack_template = stack.template if stack is None: raise ValidationError(stack_name) - else: - stack = self.create_stack(stack_name, template, parameters, - region_name, notification_arns, tags, - role_arn, create_change_set=True) + stack_id = generate_stack_id(stack_name) + stack_template = template + change_set_id = generate_changeset_id(change_set_name, region_name) - self.stacks[change_set_name] = {'Id': change_set_id, - 'StackId': stack.stack_id} - self.change_sets[change_set_id] = stack - return change_set_id, stack.stack_id + new_change_set = FakeChangeSet( + stack_id=stack_id, + stack_name=stack_name, + stack_template=stack_template, + change_set_id=change_set_id, + change_set_name=change_set_name, + template=template, + parameters=parameters, + region_name=region_name, + notification_arns=notification_arns, + tags=tags, + role_arn=role_arn, + cross_stack_resources=self.exports + ) + self.change_sets[change_set_id] = new_change_set + self.stacks[stack_id] = new_change_set + return change_set_id, stack_id + + def delete_change_set(self, change_set_name, stack_name=None): + if change_set_name in self.change_sets: + # This means arn was passed in + del self.change_sets[change_set_name] + else: + for cs in self.change_sets: + if self.change_sets[cs].change_set_name == change_set_name: + del self.change_sets[cs] + + def describe_change_set(self, change_set_name, stack_name=None): + change_set = None + if change_set_name in self.change_sets: + # This means arn was passed in + change_set = self.change_sets[change_set_name] + else: + for cs in self.change_sets: + if self.change_sets[cs].change_set_name == change_set_name: + change_set = self.change_sets[cs] + if change_set is None: + raise ValidationError(change_set_name) + return change_set def execute_change_set(self, change_set_name, stack_name=None): stack = None @@ -196,7 +484,7 @@ class CloudFormationBackend(BaseBackend): stack = self.change_sets[change_set_name] else: for cs in self.change_sets: - if self.change_sets[cs].name == change_set_name: + if self.change_sets[cs].change_set_name == change_set_name: stack = self.change_sets[cs] if stack is None: raise ValidationError(stack_name) @@ -222,8 +510,15 @@ class CloudFormationBackend(BaseBackend): else: return list(stacks) + def list_change_sets(self): + return self.change_sets.values() + def list_stacks(self): - return self.stacks.values() + return [ + v for v in self.stacks.values() + ] + [ + v for v in self.deleted_stacks.values() + ] def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) @@ -270,6 +565,9 @@ class CloudFormationBackend(BaseBackend): next_token = str(token + 100) if len(all_exports) > token + 100 else None return exports, next_token + def validate_template(self, template): + return validate_template_cfn_lint(template) + def _validate_export_uniqueness(self, stack): new_stack_export_names = [x.name for x in stack.exports] export_names = self.exports.keys() diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 35b05d101..0be68944b 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -465,36 +465,70 @@ class ResourceMap(collections.Mapping): ec2_models.ec2_backends[self._region_name].create_tags( [self[resource].physical_resource_id], self.tags) - def update(self, template, parameters=None): + def diff(self, template, parameters=None): if parameters: self.input_parameters = parameters self.load_mapping() self.load_parameters() self.load_conditions() + old_template = self._resource_json_map + new_template = template['Resources'] + + resource_names_by_action = { + 'Add': set(new_template) - set(old_template), + 'Modify': set(name for name in new_template if name in old_template and new_template[ + name] != old_template[name]), + 'Remove': set(old_template) - set(new_template) + } + resources_by_action = { + 'Add': {}, + 'Modify': {}, + 'Remove': {}, + } + + for resource_name in resource_names_by_action['Add']: + resources_by_action['Add'][resource_name] = { + 'LogicalResourceId': resource_name, + 'ResourceType': new_template[resource_name]['Type'] + } + + for resource_name in resource_names_by_action['Modify']: + resources_by_action['Modify'][resource_name] = { + 'LogicalResourceId': resource_name, + 'ResourceType': new_template[resource_name]['Type'] + } + + for resource_name in resource_names_by_action['Remove']: + resources_by_action['Remove'][resource_name] = { + 'LogicalResourceId': resource_name, + 'ResourceType': old_template[resource_name]['Type'] + } + + return resources_by_action + + def update(self, template, parameters=None): + resources_by_action = self.diff(template, parameters) + old_template = self._resource_json_map new_template = template['Resources'] self._resource_json_map = new_template - new_resource_names = set(new_template) - set(old_template) - for resource_name in new_resource_names: + for resource_name, resource in resources_by_action['Add'].items(): resource_json = new_template[resource_name] new_resource = parse_and_create_resource( resource_name, resource_json, self, self._region_name) self._parsed_resources[resource_name] = new_resource - removed_resource_names = set(old_template) - set(new_template) - for resource_name in removed_resource_names: + for resource_name, resource in resources_by_action['Remove'].items(): resource_json = old_template[resource_name] parse_and_delete_resource( resource_name, resource_json, self, self._region_name) self._parsed_resources.pop(resource_name) - resources_to_update = set(name for name in new_template if name in old_template and new_template[ - name] != old_template[name]) tries = 1 - while resources_to_update and tries < 5: - for resource_name in resources_to_update.copy(): + while resources_by_action['Modify'] and tries < 5: + for resource_name, resource in resources_by_action['Modify'].copy().items(): resource_json = new_template[resource_name] try: changed_resource = parse_and_update_resource( @@ -505,7 +539,7 @@ class ResourceMap(collections.Mapping): last_exception = e else: self._parsed_resources[resource_name] = changed_resource - resources_to_update.remove(resource_name) + del resources_by_action['Modify'][resource_name] tries += 1 if tries == 5: raise last_exception diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index a1295a20d..d1ef5ba8a 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +import yaml from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse @@ -87,7 +88,8 @@ class CloudFormationResponse(BaseResponse): role_arn = self._get_param('RoleARN') update_or_create = self._get_param('ChangeSetType', 'CREATE') parameters_list = self._get_list_prefix("Parameters.member") - tags = {tag[0]: tag[1] for tag in self._get_list_prefix("Tags.member")} + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) parameters = {param['parameter_key']: param['parameter_value'] for param in parameters_list} if template_url: @@ -118,6 +120,31 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(CREATE_CHANGE_SET_RESPONSE_TEMPLATE) return template.render(stack_id=stack_id, change_set_id=change_set_id) + def delete_change_set(self): + stack_name = self._get_param('StackName') + change_set_name = self._get_param('ChangeSetName') + + self.cloudformation_backend.delete_change_set(change_set_name=change_set_name, stack_name=stack_name) + if self.request_json: + return json.dumps({ + 'DeleteChangeSetResponse': { + 'DeleteChangeSetResult': {}, + } + }) + else: + template = self.response_template(DELETE_CHANGE_SET_RESPONSE_TEMPLATE) + return template.render() + + def describe_change_set(self): + stack_name = self._get_param('StackName') + change_set_name = self._get_param('ChangeSetName') + change_set = self.cloudformation_backend.describe_change_set( + change_set_name=change_set_name, + stack_name=stack_name, + ) + template = self.response_template(DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE) + return template.render(change_set=change_set) + @amzn_request_id def execute_change_set(self): stack_name = self._get_param('StackName') @@ -185,6 +212,11 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(DESCRIBE_STACK_EVENTS_RESPONSE) return template.render(stack=stack) + def list_change_sets(self): + change_sets = self.cloudformation_backend.list_change_sets() + template = self.response_template(LIST_CHANGE_SETS_RESPONSE) + return template.render(change_sets=change_sets) + def list_stacks(self): stacks = self.cloudformation_backend.list_stacks() template = self.response_template(LIST_STACKS_RESPONSE) @@ -294,6 +326,201 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(LIST_EXPORTS_RESPONSE) return template.render(exports=exports, next_token=next_token) + def validate_template(self): + cfn_lint = self.cloudformation_backend.validate_template(self._get_param('TemplateBody')) + if cfn_lint: + raise ValidationError(cfn_lint[0].message) + description = "" + try: + description = json.loads(self._get_param('TemplateBody'))['Description'] + except (ValueError, KeyError): + pass + try: + description = yaml.load(self._get_param('TemplateBody'))['Description'] + except (yaml.ParserError, KeyError): + pass + template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE) + return template.render(description=description) + + def create_stack_set(self): + stackset_name = self._get_param('StackSetName') + stack_body = self._get_param('TemplateBody') + template_url = self._get_param('TemplateURL') + # role_arn = self._get_param('RoleARN') + parameters_list = self._get_list_prefix("Parameters.member") + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) + + # Copy-Pasta - Hack dict-comprehension + parameters = dict([ + (parameter['parameter_key'], parameter['parameter_value']) + for parameter + in parameters_list + ]) + if template_url: + stack_body = self._get_stack_from_s3_url(template_url) + + stackset = self.cloudformation_backend.create_stack_set( + name=stackset_name, + template=stack_body, + parameters=parameters, + tags=tags, + # role_arn=role_arn, + ) + if self.request_json: + return json.dumps({ + 'CreateStackSetResponse': { + 'CreateStackSetResult': { + 'StackSetId': stackset.stackset_id, + } + } + }) + else: + template = self.response_template(CREATE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def create_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + parameters = self._get_multi_param('ParameterOverrides.member') + self.cloudformation_backend.create_stack_instances(stackset_name, accounts, regions, parameters) + template = self.response_template(CREATE_STACK_INSTANCES_TEMPLATE) + return template.render() + + def delete_stack_set(self): + stackset_name = self._get_param('StackSetName') + self.cloudformation_backend.delete_stack_set(stackset_name) + template = self.response_template(DELETE_STACK_SET_RESPONSE_TEMPLATE) + return template.render() + + def delete_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + operation = self.cloudformation_backend.delete_stack_instances(stackset_name, accounts, regions) + + template = self.response_template(DELETE_STACK_INSTANCES_TEMPLATE) + return template.render(operation=operation) + + def describe_stack_set(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + + if not stackset.admin_role: + stackset.admin_role = 'arn:aws:iam::123456789012:role/AWSCloudFormationStackSetAdministrationRole' + if not stackset.execution_role: + stackset.execution_role = 'AWSCloudFormationStackSetExecutionRole' + + template = self.response_template(DESCRIBE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def describe_stack_instance(self): + stackset_name = self._get_param('StackSetName') + account = self._get_param('StackInstanceAccount') + region = self._get_param('StackInstanceRegion') + + instance = self.cloudformation_backend.get_stack_set(stackset_name).instances.get_instance(account, region) + template = self.response_template(DESCRIBE_STACK_INSTANCE_TEMPLATE) + rendered = template.render(instance=instance) + return rendered + + def list_stack_sets(self): + stacksets = self.cloudformation_backend.stacksets + template = self.response_template(LIST_STACK_SETS_TEMPLATE) + return template.render(stacksets=stacksets) + + def list_stack_instances(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + template = self.response_template(LIST_STACK_INSTANCES_TEMPLATE) + return template.render(stackset=stackset) + + def list_stack_set_operations(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + template = self.response_template(LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def stop_stack_set_operation(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + stackset.update_operation(operation_id, 'STOPPED') + template = self.response_template(STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE) + return template.render() + + def describe_stack_set_operation(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + operation = stackset.get_operation(operation_id) + template = self.response_template(DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE) + return template.render(stackset=stackset, operation=operation) + + def list_stack_set_operation_results(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + operation = stackset.get_operation(operation_id) + template = self.response_template(LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE) + return template.render(operation=operation) + + def update_stack_set(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + description = self._get_param('Description') + execution_role = self._get_param('ExecutionRoleName') + admin_role = self._get_param('AdministrationRoleARN') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + template_body = self._get_param('TemplateBody') + template_url = self._get_param('TemplateURL') + if template_url: + template_body = self._get_stack_from_s3_url(template_url) + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) + parameters_list = self._get_list_prefix("Parameters.member") + parameters = dict([ + (parameter['parameter_key'], parameter['parameter_value']) + for parameter + in parameters_list + ]) + operation = self.cloudformation_backend.update_stack_set( + stackset_name=stackset_name, + template=template_body, + description=description, + parameters=parameters, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + accounts=accounts, + regions=regions, + operation_id=operation_id + ) + + template = self.response_template(UPDATE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(operation=operation) + + def update_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + parameters = self._get_multi_param('ParameterOverrides.member') + operation = self.cloudformation_backend.get_stack_set(stackset_name).update_instances(accounts, regions, parameters) + template = self.response_template(UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE) + return template.render(operation=operation) + + +VALIDATE_STACK_RESPONSE_TEMPLATE = """ + + + + +{{ description }} + + +""" CREATE_STACK_RESPONSE_TEMPLATE = """ @@ -326,6 +553,66 @@ CREATE_CHANGE_SET_RESPONSE_TEMPLATE = """ """ +DELETE_CHANGE_SET_RESPONSE_TEMPLATE = """ + + + + 3d3200a1-810e-3023-6cc3-example + + +""" + +DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE = """ + + {{ change_set.change_set_id }} + {{ change_set.change_set_name }} + {{ change_set.stack_id }} + {{ change_set.stack_name }} + {{ change_set.description }} + + {% for param_name, param_value in change_set.stack_parameters.items() %} + + {{ param_name }} + {{ param_value }} + + {% endfor %} + + 2011-05-23T15:47:44Z + {{ change_set.execution_status }} + {{ change_set.status }} + {{ change_set.status_reason }} + {% if change_set.notification_arns %} + + {% for notification_arn in change_set.notification_arns %} + {{ notification_arn }} + {% endfor %} + + {% else %} + + {% endif %} + {% if change_set.role_arn %} + {{ change_set.role_arn }} + {% endif %} + {% if change_set.changes %} + + {% for change in change_set.changes %} + + Resource + + {{ change.action }} + {{ change.logical_resource_id }} + {{ change.resource_type }} + + + {% endfor %} + + {% endif %} + {% if next_token %} + {{ next_token }} + {% endif %} + +""" + EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE = """ @@ -451,6 +738,27 @@ DESCRIBE_STACK_EVENTS_RESPONSE = """ + + + {% for change_set in change_sets %} + + {{ change_set.stack_id }} + {{ change_set.stack_name }} + {{ change_set.change_set_id }} + {{ change_set.change_set_name }} + {{ change_set.execution_status }} + {{ change_set.status }} + {{ change_set.status_reason }} + 2011-05-23T15:47:44Z + {{ change_set.description }} + + {% endfor %} + + +""" + + LIST_STACKS_RESPONSE = """ @@ -525,3 +833,236 @@ LIST_EXPORTS_RESPONSE = """ + + {{ stackset.stackset_id }} + + + f457258c-391d-41d1-861f-example + + +""" + +DESCRIBE_STACK_SET_RESPONSE_TEMPLATE = """ + + + + {{ stackset.arn }} + {{ stackset.execution_role }} + {{ stackset.admin_role }} + {{ stackset.id }} + {{ stackset.template }} + {{ stackset.name }} + + {% for param_name, param_value in stackset.parameters.items() %} + + {{ param_name }} + {{ param_value }} + + {% endfor %} + + + {% for tag_key, tag_value in stackset.tags.items() %} + + {{ tag_key }} + {{ tag_value }} + + {% endfor %} + + {{ stackset.status }} + + + + d8b64e11-5332-46e1-9603-example + +""" + +DELETE_STACK_SET_RESPONSE_TEMPLATE = """ + + + c35ec2d0-d69f-4c4d-9bd7-example + +""" + +CREATE_STACK_INSTANCES_TEMPLATE = """ + + 1459ad6d-63cc-4c96-a73e-example + + + 6b29f7e3-69be-4d32-b374-example + + +""" + +LIST_STACK_INSTANCES_TEMPLATE = """ + + + {% for instance in stackset.stack_instances %} + + {{ instance.StackId }} + {{ instance.StackSetId }} + {{ instance.Region }} + {{ instance.Account }} + {{ instance.Status }} + + {% endfor %} + + + + 83c27e73-b498-410f-993c-example + + +""" + +DELETE_STACK_INSTANCES_TEMPLATE = """ + + {{ operation.OperationId }} + + + e5325090-66f6-4ecd-a531-example + + +""" + +DESCRIBE_STACK_INSTANCE_TEMPLATE = """ + + + {{ instance.StackId }} + {{ instance.StackSetId }} + {% if instance.ParameterOverrides %} + + {% for override in instance.ParameterOverrides %} + {% if override['ParameterKey'] or override['ParameterValue'] %} + + {{ override.ParameterKey }} + false + {{ override.ParameterValue }} + + {% endif %} + {% endfor %} + + {% else %} + + {% endif %} + {{ instance.Region }} + {{ instance.Account }} + {{ instance.Status }} + + + + c6c7be10-0343-4319-8a25-example + + +""" + +LIST_STACK_SETS_TEMPLATE = """ + + + {% for key, value in stacksets.items() %} + + {{ value.name }} + {{ value.id }} + {{ value.status }} + + {% endfor %} + + + + 4dcacb73-841e-4ed8-b335-example + + +""" + +UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE = """ + + {{ operation }} + + + bdbf8e94-19b6-4ce4-af85-example + + +""" + +UPDATE_STACK_SET_RESPONSE_TEMPLATE = """ + + {{ operation.OperationId }} + + + adac907b-17e3-43e6-a254-example + + +""" + +LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE = """ + + + {% for operation in stackset.operations %} + + {{ operation.CreationTimestamp }} + {{ operation.OperationId }} + {{ operation.Action }} + {{ operation.EndTimestamp }} + {{ operation.Status }} + + {% endfor %} + + + + 65b9d9be-08bb-4a43-9a21-example + + +""" + +STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE = """ + + + 2188554a-07c6-4396-b2c5-example + +""" + +DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE = """ + + + {{ stackset.execution_role }} + arn:aws:iam::123456789012:role/{{ stackset.admin_role }} + {{ stackset.id }} + {{ operation.CreationTimestamp }} + {{ operation.OperationId }} + {{ operation.Action }} + + + + {{ operation.EndTimestamp }} + {{ operation.Status }} + + + + 2edc27b6-9ce2-486a-a192-example + + +""" + +LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE = """ + + + {% for instance in operation.Instances %} + {% for account, region in instance.items() %} + + + Function not found: arn:aws:lambda:us-west-2:123456789012:function:AWSCloudFormationStackSetAccountGate + SKIPPED + + {{ region }} + {{ account }} + {{ operation.Status }} + + {% endfor %} + {% endfor %} + + + + ac05a9ce-5f98-4197-a29b-example + + +""" diff --git a/moto/cloudformation/utils.py b/moto/cloudformation/utils.py index f3b8874ed..de75d2c15 100644 --- a/moto/cloudformation/utils.py +++ b/moto/cloudformation/utils.py @@ -3,11 +3,14 @@ import uuid import six import random import yaml +import os + +from cfnlint import decode, core -def generate_stack_id(stack_name): +def generate_stack_id(stack_name, region="us-east-1", account="123456789"): random_id = uuid.uuid4() - return "arn:aws:cloudformation:us-east-1:123456789:stack/{0}/{1}".format(stack_name, random_id) + return "arn:aws:cloudformation:{}:{}:stack/{}/{}".format(region, account, stack_name, random_id) def generate_changeset_id(changeset_name, region_name): @@ -15,6 +18,15 @@ def generate_changeset_id(changeset_name, region_name): return 'arn:aws:cloudformation:{0}:123456789:changeSet/{1}/{2}'.format(region_name, changeset_name, random_id) +def generate_stackset_id(stackset_name): + random_id = uuid.uuid4() + return '{}:{}'.format(stackset_name, random_id) + + +def generate_stackset_arn(stackset_id, region_name): + return 'arn:aws:cloudformation:{}:123456789012:stackset/{}'.format(region_name, stackset_id) + + def random_suffix(): size = 12 chars = list(range(10)) + ['A-Z'] @@ -38,3 +50,33 @@ def yaml_tag_constructor(loader, tag, node): key = 'Fn::{}'.format(tag[1:]) return {key: _f(loader, tag, node)} + + +def validate_template_cfn_lint(template): + + # Save the template to a temporary file -- cfn-lint requires a file + filename = "file.tmp" + with open(filename, "w") as file: + file.write(template) + abs_filename = os.path.abspath(filename) + + # decode handles both yaml and json + template, matches = decode.decode(abs_filename, False) + + # Set cfn-lint to info + core.configure_logging(None) + + # Initialize the ruleset to be applied (no overrules, no excludes) + rules = core.get_rules([], [], []) + + # Use us-east-1 region (spec file) for validation + regions = ['us-east-1'] + + # Process all the rules and gather the errors + matches = core.run_checks( + abs_filename, + template, + rules, + regions) + + return matches diff --git a/moto/cognitoidp/exceptions.py b/moto/cognitoidp/exceptions.py index 1f1ec2309..452670213 100644 --- a/moto/cognitoidp/exceptions.py +++ b/moto/cognitoidp/exceptions.py @@ -24,6 +24,16 @@ class UserNotFoundError(BadRequest): }) +class GroupExistsException(BadRequest): + + def __init__(self, message): + super(GroupExistsException, self).__init__() + self.description = json.dumps({ + "message": message, + '__type': 'GroupExistsException', + }) + + class NotAuthorizedError(BadRequest): def __init__(self, message): diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 476d470b9..bdd279ba6 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -1,6 +1,8 @@ from __future__ import unicode_literals import datetime +import functools +import itertools import json import os import time @@ -11,8 +13,7 @@ from jose import jws from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel -from .exceptions import NotAuthorizedError, ResourceNotFoundError, UserNotFoundError - +from .exceptions import GroupExistsException, NotAuthorizedError, ResourceNotFoundError, UserNotFoundError UserStatus = { "FORCE_CHANGE_PASSWORD": "FORCE_CHANGE_PASSWORD", @@ -20,6 +21,39 @@ UserStatus = { } +def paginate(limit, start_arg="next_token", limit_arg="max_results"): + """Returns a limited result list, and an offset into list of remaining items + + Takes the next_token, and max_results kwargs given to a function and handles + the slicing of the results. The kwarg `next_token` is the offset into the + list to begin slicing from. `max_results` is the size of the result required + + If the max_results is not supplied then the `limit` parameter is used as a + default + + :param limit_arg: the name of argument in the decorated function that + controls amount of items returned + :param start_arg: the name of the argument in the decorated that provides + the starting offset + :param limit: A default maximum items to return + :return: a tuple containing a list of items, and the offset into the list + """ + default_start = 0 + + def outer_wrapper(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg]) + lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg]) + stop = start + lim + result = func(*args, **kwargs) + limited_results = list(itertools.islice(result, start, stop)) + next_token = stop if stop < len(result) else None + return limited_results, next_token + return wrapper + return outer_wrapper + + class CognitoIdpUserPool(BaseModel): def __init__(self, region, name, extended_config): @@ -33,6 +67,7 @@ class CognitoIdpUserPool(BaseModel): self.clients = OrderedDict() self.identity_providers = OrderedDict() + self.groups = OrderedDict() self.users = OrderedDict() self.refresh_tokens = {} self.access_tokens = {} @@ -185,6 +220,33 @@ class CognitoIdpIdentityProvider(BaseModel): return identity_provider_json +class CognitoIdpGroup(BaseModel): + + def __init__(self, user_pool_id, group_name, description, role_arn, precedence): + self.user_pool_id = user_pool_id + self.group_name = group_name + self.description = description or "" + self.role_arn = role_arn + self.precedence = precedence + self.last_modified_date = datetime.datetime.now() + self.creation_date = self.last_modified_date + + # Users who are members of this group. + # Note that these links are bidirectional. + self.users = set() + + def to_json(self): + return { + "GroupName": self.group_name, + "UserPoolId": self.user_pool_id, + "Description": self.description, + "RoleArn": self.role_arn, + "Precedence": self.precedence, + "LastModifiedDate": time.mktime(self.last_modified_date.timetuple()), + "CreationDate": time.mktime(self.creation_date.timetuple()), + } + + class CognitoIdpUser(BaseModel): def __init__(self, user_pool_id, username, password, status, attributes): @@ -198,6 +260,10 @@ class CognitoIdpUser(BaseModel): self.create_date = datetime.datetime.utcnow() self.last_modified_date = datetime.datetime.utcnow() + # Groups this user is a member of. + # Note that these links are bidirectional. + self.groups = set() + def _base_json(self): return { "UserPoolId": self.user_pool_id, @@ -242,7 +308,8 @@ class CognitoIdpBackend(BaseBackend): self.user_pools[user_pool.id] = user_pool return user_pool - def list_user_pools(self): + @paginate(60) + def list_user_pools(self, max_results=None, next_token=None): return self.user_pools.values() def describe_user_pool(self, user_pool_id): @@ -289,7 +356,8 @@ class CognitoIdpBackend(BaseBackend): user_pool.clients[user_pool_client.id] = user_pool_client return user_pool_client - def list_user_pool_clients(self, user_pool_id): + @paginate(60) + def list_user_pool_clients(self, user_pool_id, max_results=None, next_token=None): user_pool = self.user_pools.get(user_pool_id) if not user_pool: raise ResourceNotFoundError(user_pool_id) @@ -339,7 +407,8 @@ class CognitoIdpBackend(BaseBackend): user_pool.identity_providers[name] = identity_provider return identity_provider - def list_identity_providers(self, user_pool_id): + @paginate(60) + def list_identity_providers(self, user_pool_id, max_results=None, next_token=None): user_pool = self.user_pools.get(user_pool_id) if not user_pool: raise ResourceNotFoundError(user_pool_id) @@ -357,6 +426,19 @@ class CognitoIdpBackend(BaseBackend): return identity_provider + def update_identity_provider(self, user_pool_id, name, extended_config): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + identity_provider = user_pool.identity_providers.get(name) + if not identity_provider: + raise ResourceNotFoundError(name) + + identity_provider.extended_config.update(extended_config) + + return identity_provider + def delete_identity_provider(self, user_pool_id, name): user_pool = self.user_pools.get(user_pool_id) if not user_pool: @@ -367,6 +449,72 @@ class CognitoIdpBackend(BaseBackend): del user_pool.identity_providers[name] + # Group + def create_group(self, user_pool_id, group_name, description, role_arn, precedence): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + group = CognitoIdpGroup(user_pool_id, group_name, description, role_arn, precedence) + if group.group_name in user_pool.groups: + raise GroupExistsException("A group with the name already exists") + user_pool.groups[group.group_name] = group + + return group + + def get_group(self, user_pool_id, group_name): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if group_name not in user_pool.groups: + raise ResourceNotFoundError(group_name) + + return user_pool.groups[group_name] + + def list_groups(self, user_pool_id): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + return user_pool.groups.values() + + def delete_group(self, user_pool_id, group_name): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if group_name not in user_pool.groups: + raise ResourceNotFoundError(group_name) + + group = user_pool.groups[group_name] + for user in group.users: + user.groups.remove(group) + + del user_pool.groups[group_name] + + def admin_add_user_to_group(self, user_pool_id, group_name, username): + group = self.get_group(user_pool_id, group_name) + user = self.admin_get_user(user_pool_id, username) + + group.users.add(user) + user.groups.add(group) + + def list_users_in_group(self, user_pool_id, group_name): + group = self.get_group(user_pool_id, group_name) + return list(group.users) + + def admin_list_groups_for_user(self, user_pool_id, username): + user = self.admin_get_user(user_pool_id, username) + return list(user.groups) + + def admin_remove_user_from_group(self, user_pool_id, group_name, username): + group = self.get_group(user_pool_id, group_name) + user = self.admin_get_user(user_pool_id, username) + + group.users.discard(user) + user.groups.discard(group) + # User def admin_create_user(self, user_pool_id, username, temporary_password, attributes): user_pool = self.user_pools.get(user_pool_id) @@ -387,7 +535,8 @@ class CognitoIdpBackend(BaseBackend): return user_pool.users[username] - def list_users(self, user_pool_id): + @paginate(60, "pagination_token", "limit") + def list_users(self, user_pool_id, pagination_token=None, limit=None): user_pool = self.user_pools.get(user_pool_id) if not user_pool: raise ResourceNotFoundError(user_pool_id) @@ -410,6 +559,10 @@ class CognitoIdpBackend(BaseBackend): if username not in user_pool.users: raise UserNotFoundError(username) + user = user_pool.users[username] + for group in user.groups: + group.users.remove(user) + del user_pool.users[username] def _log_user_in(self, user_pool, client, username): diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 50939786b..264910739 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -22,10 +22,17 @@ class CognitoIdpResponse(BaseResponse): }) def list_user_pools(self): - user_pools = cognitoidp_backends[self.region].list_user_pools() - return json.dumps({ - "UserPools": [user_pool.to_json() for user_pool in user_pools] - }) + max_results = self._get_param("MaxResults") + next_token = self._get_param("NextToken", "0") + user_pools, next_token = cognitoidp_backends[self.region].list_user_pools( + max_results=max_results, next_token=next_token + ) + response = { + "UserPools": [user_pool.to_json() for user_pool in user_pools], + } + if next_token: + response["NextToken"] = str(next_token) + return json.dumps(response) def describe_user_pool(self): user_pool_id = self._get_param("UserPoolId") @@ -72,10 +79,16 @@ class CognitoIdpResponse(BaseResponse): def list_user_pool_clients(self): user_pool_id = self._get_param("UserPoolId") - user_pool_clients = cognitoidp_backends[self.region].list_user_pool_clients(user_pool_id) - return json.dumps({ + max_results = self._get_param("MaxResults") + next_token = self._get_param("NextToken", "0") + user_pool_clients, next_token = cognitoidp_backends[self.region].list_user_pool_clients(user_pool_id, + max_results=max_results, next_token=next_token) + response = { "UserPoolClients": [user_pool_client.to_json() for user_pool_client in user_pool_clients] - }) + } + if next_token: + response["NextToken"] = str(next_token) + return json.dumps(response) def describe_user_pool_client(self): user_pool_id = self._get_param("UserPoolId") @@ -110,10 +123,17 @@ class CognitoIdpResponse(BaseResponse): def list_identity_providers(self): user_pool_id = self._get_param("UserPoolId") - identity_providers = cognitoidp_backends[self.region].list_identity_providers(user_pool_id) - return json.dumps({ + max_results = self._get_param("MaxResults") + next_token = self._get_param("NextToken", "0") + identity_providers, next_token = cognitoidp_backends[self.region].list_identity_providers( + user_pool_id, max_results=max_results, next_token=next_token + ) + response = { "Providers": [identity_provider.to_json() for identity_provider in identity_providers] - }) + } + if next_token: + response["NextToken"] = str(next_token) + return json.dumps(response) def describe_identity_provider(self): user_pool_id = self._get_param("UserPoolId") @@ -123,12 +143,103 @@ class CognitoIdpResponse(BaseResponse): "IdentityProvider": identity_provider.to_json(extended=True) }) + def update_identity_provider(self): + user_pool_id = self._get_param("UserPoolId") + name = self._get_param("ProviderName") + identity_provider = cognitoidp_backends[self.region].update_identity_provider(user_pool_id, name, self.parameters) + return json.dumps({ + "IdentityProvider": identity_provider.to_json(extended=True) + }) + def delete_identity_provider(self): user_pool_id = self._get_param("UserPoolId") name = self._get_param("ProviderName") cognitoidp_backends[self.region].delete_identity_provider(user_pool_id, name) return "" + # Group + def create_group(self): + group_name = self._get_param("GroupName") + user_pool_id = self._get_param("UserPoolId") + description = self._get_param("Description") + role_arn = self._get_param("RoleArn") + precedence = self._get_param("Precedence") + + group = cognitoidp_backends[self.region].create_group( + user_pool_id, + group_name, + description, + role_arn, + precedence, + ) + + return json.dumps({ + "Group": group.to_json(), + }) + + def get_group(self): + group_name = self._get_param("GroupName") + user_pool_id = self._get_param("UserPoolId") + group = cognitoidp_backends[self.region].get_group(user_pool_id, group_name) + return json.dumps({ + "Group": group.to_json(), + }) + + def list_groups(self): + user_pool_id = self._get_param("UserPoolId") + groups = cognitoidp_backends[self.region].list_groups(user_pool_id) + return json.dumps({ + "Groups": [group.to_json() for group in groups], + }) + + def delete_group(self): + group_name = self._get_param("GroupName") + user_pool_id = self._get_param("UserPoolId") + cognitoidp_backends[self.region].delete_group(user_pool_id, group_name) + return "" + + def admin_add_user_to_group(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + group_name = self._get_param("GroupName") + + cognitoidp_backends[self.region].admin_add_user_to_group( + user_pool_id, + group_name, + username, + ) + + return "" + + def list_users_in_group(self): + user_pool_id = self._get_param("UserPoolId") + group_name = self._get_param("GroupName") + users = cognitoidp_backends[self.region].list_users_in_group(user_pool_id, group_name) + return json.dumps({ + "Users": [user.to_json(extended=True) for user in users], + }) + + def admin_list_groups_for_user(self): + username = self._get_param("Username") + user_pool_id = self._get_param("UserPoolId") + groups = cognitoidp_backends[self.region].admin_list_groups_for_user(user_pool_id, username) + return json.dumps({ + "Groups": [group.to_json() for group in groups], + }) + + def admin_remove_user_from_group(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + group_name = self._get_param("GroupName") + + cognitoidp_backends[self.region].admin_remove_user_from_group( + user_pool_id, + group_name, + username, + ) + + return "" + # User def admin_create_user(self): user_pool_id = self._get_param("UserPoolId") @@ -155,10 +266,15 @@ class CognitoIdpResponse(BaseResponse): def list_users(self): user_pool_id = self._get_param("UserPoolId") - users = cognitoidp_backends[self.region].list_users(user_pool_id) - return json.dumps({ - "Users": [user.to_json(extended=True) for user in users] - }) + limit = self._get_param("Limit") + token = self._get_param("PaginationToken") + users, token = cognitoidp_backends[self.region].list_users(user_pool_id, + limit=limit, + pagination_token=token) + response = {"Users": [user.to_json(extended=True) for user in users]} + if token: + response["PaginationToken"] = str(token) + return json.dumps(response) def admin_disable_user(self): user_pool_id = self._get_param("UserPoolId") diff --git a/moto/config/__init__.py b/moto/config/__init__.py new file mode 100644 index 000000000..9ca6a5917 --- /dev/null +++ b/moto/config/__init__.py @@ -0,0 +1,4 @@ +from .models import config_backends +from ..core.models import base_decorator + +mock_config = base_decorator(config_backends) diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py new file mode 100644 index 000000000..b2b01d6a0 --- /dev/null +++ b/moto/config/exceptions.py @@ -0,0 +1,149 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class NameTooLongException(JsonRESTError): + code = 400 + + def __init__(self, name, location): + message = '1 validation error detected: Value \'{name}\' at \'{location}\' failed to satisfy' \ + ' constraint: Member must have length less than or equal to 256'.format(name=name, location=location) + super(NameTooLongException, self).__init__("ValidationException", message) + + +class InvalidConfigurationRecorderNameException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'The configuration recorder name \'{name}\' is not valid, blank string.'.format(name=name) + super(InvalidConfigurationRecorderNameException, self).__init__("InvalidConfigurationRecorderNameException", + message) + + +class MaxNumberOfConfigurationRecordersExceededException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Failed to put configuration recorder \'{name}\' because the maximum number of ' \ + 'configuration recorders: 1 is reached.'.format(name=name) + super(MaxNumberOfConfigurationRecordersExceededException, self).__init__( + "MaxNumberOfConfigurationRecordersExceededException", message) + + +class InvalidRecordingGroupException(JsonRESTError): + code = 400 + + def __init__(self): + message = 'The recording group provided is not valid' + super(InvalidRecordingGroupException, self).__init__("InvalidRecordingGroupException", message) + + +class InvalidResourceTypeException(JsonRESTError): + code = 400 + + def __init__(self, bad_list, good_list): + message = '{num} validation error detected: Value \'{bad_list}\' at ' \ + '\'configurationRecorder.recordingGroup.resourceTypes\' failed to satisfy constraint: ' \ + 'Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]'.format( + num=len(bad_list), bad_list=bad_list, good_list=good_list) + # For PY2: + message = str(message) + + super(InvalidResourceTypeException, self).__init__("ValidationException", message) + + +class NoSuchConfigurationRecorderException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Cannot find configuration recorder with the specified name \'{name}\'.'.format(name=name) + super(NoSuchConfigurationRecorderException, self).__init__("NoSuchConfigurationRecorderException", message) + + +class InvalidDeliveryChannelNameException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'The delivery channel name \'{name}\' is not valid, blank string.'.format(name=name) + super(InvalidDeliveryChannelNameException, self).__init__("InvalidDeliveryChannelNameException", + message) + + +class NoSuchBucketException(JsonRESTError): + """We are *only* validating that there is value that is not '' here.""" + code = 400 + + def __init__(self): + message = 'Cannot find a S3 bucket with an empty bucket name.' + super(NoSuchBucketException, self).__init__("NoSuchBucketException", message) + + +class InvalidS3KeyPrefixException(JsonRESTError): + code = 400 + + def __init__(self): + message = 'The s3 key prefix \'\' is not valid, empty s3 key prefix.' + super(InvalidS3KeyPrefixException, self).__init__("InvalidS3KeyPrefixException", message) + + +class InvalidSNSTopicARNException(JsonRESTError): + """We are *only* validating that there is value that is not '' here.""" + code = 400 + + def __init__(self): + message = 'The sns topic arn \'\' is not valid.' + super(InvalidSNSTopicARNException, self).__init__("InvalidSNSTopicARNException", message) + + +class InvalidDeliveryFrequency(JsonRESTError): + code = 400 + + def __init__(self, value, good_list): + message = '1 validation error detected: Value \'{value}\' at ' \ + '\'deliveryChannel.configSnapshotDeliveryProperties.deliveryFrequency\' failed to satisfy ' \ + 'constraint: Member must satisfy enum value set: {good_list}'.format(value=value, good_list=good_list) + super(InvalidDeliveryFrequency, self).__init__("InvalidDeliveryFrequency", message) + + +class MaxNumberOfDeliveryChannelsExceededException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Failed to put delivery channel \'{name}\' because the maximum number of ' \ + 'delivery channels: 1 is reached.'.format(name=name) + super(MaxNumberOfDeliveryChannelsExceededException, self).__init__( + "MaxNumberOfDeliveryChannelsExceededException", message) + + +class NoSuchDeliveryChannelException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Cannot find delivery channel with specified name \'{name}\'.'.format(name=name) + super(NoSuchDeliveryChannelException, self).__init__("NoSuchDeliveryChannelException", message) + + +class NoAvailableConfigurationRecorderException(JsonRESTError): + code = 400 + + def __init__(self): + message = 'Configuration recorder is not available to put delivery channel.' + super(NoAvailableConfigurationRecorderException, self).__init__("NoAvailableConfigurationRecorderException", + message) + + +class NoAvailableDeliveryChannelException(JsonRESTError): + code = 400 + + def __init__(self): + message = 'Delivery channel is not available to start configuration recorder.' + super(NoAvailableDeliveryChannelException, self).__init__("NoAvailableDeliveryChannelException", message) + + +class LastDeliveryChannelDeleteFailedException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Failed to delete last specified delivery channel with name \'{name}\', because there, ' \ + 'because there is a running configuration recorder.'.format(name=name) + super(LastDeliveryChannelDeleteFailedException, self).__init__("LastDeliveryChannelDeleteFailedException", message) diff --git a/moto/config/models.py b/moto/config/models.py new file mode 100644 index 000000000..cd6e07afa --- /dev/null +++ b/moto/config/models.py @@ -0,0 +1,335 @@ +import json +import time +import pkg_resources + +from datetime import datetime + +from boto3 import Session + +from moto.config.exceptions import InvalidResourceTypeException, InvalidDeliveryFrequency, \ + InvalidConfigurationRecorderNameException, NameTooLongException, \ + MaxNumberOfConfigurationRecordersExceededException, InvalidRecordingGroupException, \ + NoSuchConfigurationRecorderException, NoAvailableConfigurationRecorderException, \ + InvalidDeliveryChannelNameException, NoSuchBucketException, InvalidS3KeyPrefixException, \ + InvalidSNSTopicARNException, MaxNumberOfDeliveryChannelsExceededException, NoAvailableDeliveryChannelException, \ + NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException + +from moto.core import BaseBackend, BaseModel + +DEFAULT_ACCOUNT_ID = 123456789012 + + +def datetime2int(date): + return int(time.mktime(date.timetuple())) + + +def snake_to_camels(original): + parts = original.split('_') + + camel_cased = parts[0].lower() + ''.join(p.title() for p in parts[1:]) + camel_cased = camel_cased.replace('Arn', 'ARN') # Config uses 'ARN' instead of 'Arn' + + return camel_cased + + +class ConfigEmptyDictable(BaseModel): + """Base class to make serialization easy. This assumes that the sub-class will NOT return 'None's in the JSON.""" + + def to_dict(self): + data = {} + for item, value in self.__dict__.items(): + if value is not None: + if isinstance(value, ConfigEmptyDictable): + data[snake_to_camels(item)] = value.to_dict() + else: + data[snake_to_camels(item)] = value + + return data + + +class ConfigRecorderStatus(ConfigEmptyDictable): + + def __init__(self, name): + self.name = name + + self.recording = False + self.last_start_time = None + self.last_stop_time = None + self.last_status = None + self.last_error_code = None + self.last_error_message = None + self.last_status_change_time = None + + def start(self): + self.recording = True + self.last_status = 'PENDING' + self.last_start_time = datetime2int(datetime.utcnow()) + self.last_status_change_time = datetime2int(datetime.utcnow()) + + def stop(self): + self.recording = False + self.last_stop_time = datetime2int(datetime.utcnow()) + self.last_status_change_time = datetime2int(datetime.utcnow()) + + +class ConfigDeliverySnapshotProperties(ConfigEmptyDictable): + + def __init__(self, delivery_frequency): + self.delivery_frequency = delivery_frequency + + +class ConfigDeliveryChannel(ConfigEmptyDictable): + + def __init__(self, name, s3_bucket_name, prefix=None, sns_arn=None, snapshot_properties=None): + self.name = name + self.s3_bucket_name = s3_bucket_name + self.s3_key_prefix = prefix + self.sns_topic_arn = sns_arn + self.config_snapshot_delivery_properties = snapshot_properties + + +class RecordingGroup(ConfigEmptyDictable): + + def __init__(self, all_supported=True, include_global_resource_types=False, resource_types=None): + self.all_supported = all_supported + self.include_global_resource_types = include_global_resource_types + self.resource_types = resource_types + + +class ConfigRecorder(ConfigEmptyDictable): + + def __init__(self, role_arn, recording_group, name='default', status=None): + self.name = name + self.role_arn = role_arn + self.recording_group = recording_group + + if not status: + self.status = ConfigRecorderStatus(name) + else: + self.status = status + + +class ConfigBackend(BaseBackend): + + def __init__(self): + self.recorders = {} + self.delivery_channels = {} + + @staticmethod + def _validate_resource_types(resource_list): + # Load the service file: + resource_package = 'botocore' + resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json')) + conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path)) + + # Verify that each entry exists in the supported list: + bad_list = [] + for resource in resource_list: + # For PY2: + r_str = str(resource) + + if r_str not in conifg_schema['shapes']['ResourceType']['enum']: + bad_list.append(r_str) + + if bad_list: + raise InvalidResourceTypeException(bad_list, conifg_schema['shapes']['ResourceType']['enum']) + + @staticmethod + def _validate_delivery_snapshot_properties(properties): + # Load the service file: + resource_package = 'botocore' + resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json')) + conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path)) + + # Verify that the deliveryFrequency is set to an acceptable value: + if properties.get('deliveryFrequency', None) not in \ + conifg_schema['shapes']['MaximumExecutionFrequency']['enum']: + raise InvalidDeliveryFrequency(properties.get('deliveryFrequency', None), + conifg_schema['shapes']['MaximumExecutionFrequency']['enum']) + + def put_configuration_recorder(self, config_recorder): + # Validate the name: + if not config_recorder.get('name'): + raise InvalidConfigurationRecorderNameException(config_recorder.get('name')) + if len(config_recorder.get('name')) > 256: + raise NameTooLongException(config_recorder.get('name'), 'configurationRecorder.name') + + # We're going to assume that the passed in Role ARN is correct. + + # Config currently only allows 1 configuration recorder for an account: + if len(self.recorders) == 1 and not self.recorders.get(config_recorder['name']): + raise MaxNumberOfConfigurationRecordersExceededException(config_recorder['name']) + + # Is this updating an existing one? + recorder_status = None + if self.recorders.get(config_recorder['name']): + recorder_status = self.recorders[config_recorder['name']].status + + # Validate the Recording Group: + if config_recorder.get('recordingGroup') is None: + recording_group = RecordingGroup() + else: + rg = config_recorder['recordingGroup'] + + # If an empty dict is passed in, then bad: + if not rg: + raise InvalidRecordingGroupException() + + # Can't have both the resource types specified and the other flags as True. + if rg.get('resourceTypes') and ( + rg.get('allSupported', False) or + rg.get('includeGlobalResourceTypes', False)): + raise InvalidRecordingGroupException() + + # Must supply resourceTypes if 'allSupported' is not supplied: + if not rg.get('allSupported') and not rg.get('resourceTypes'): + raise InvalidRecordingGroupException() + + # Validate that the list provided is correct: + self._validate_resource_types(rg.get('resourceTypes', [])) + + recording_group = RecordingGroup( + all_supported=rg.get('allSupported', True), + include_global_resource_types=rg.get('includeGlobalResourceTypes', False), + resource_types=rg.get('resourceTypes', []) + ) + + self.recorders[config_recorder['name']] = \ + ConfigRecorder(config_recorder['roleARN'], recording_group, name=config_recorder['name'], + status=recorder_status) + + def describe_configuration_recorders(self, recorder_names): + recorders = [] + + if recorder_names: + for rn in recorder_names: + if not self.recorders.get(rn): + raise NoSuchConfigurationRecorderException(rn) + + # Format the recorder: + recorders.append(self.recorders[rn].to_dict()) + + else: + for recorder in self.recorders.values(): + recorders.append(recorder.to_dict()) + + return recorders + + def describe_configuration_recorder_status(self, recorder_names): + recorders = [] + + if recorder_names: + for rn in recorder_names: + if not self.recorders.get(rn): + raise NoSuchConfigurationRecorderException(rn) + + # Format the recorder: + recorders.append(self.recorders[rn].status.to_dict()) + + else: + for recorder in self.recorders.values(): + recorders.append(recorder.status.to_dict()) + + return recorders + + def put_delivery_channel(self, delivery_channel): + # Must have a configuration recorder: + if not self.recorders: + raise NoAvailableConfigurationRecorderException() + + # Validate the name: + if not delivery_channel.get('name'): + raise InvalidDeliveryChannelNameException(delivery_channel.get('name')) + if len(delivery_channel.get('name')) > 256: + raise NameTooLongException(delivery_channel.get('name'), 'deliveryChannel.name') + + # We are going to assume that the bucket exists -- but will verify if the bucket provided is blank: + if not delivery_channel.get('s3BucketName'): + raise NoSuchBucketException() + + # We are going to assume that the bucket has the correct policy attached to it. We are only going to verify + # if the prefix provided is not an empty string: + if delivery_channel.get('s3KeyPrefix', None) == '': + raise InvalidS3KeyPrefixException() + + # Ditto for SNS -- Only going to assume that the ARN provided is not an empty string: + if delivery_channel.get('snsTopicARN', None) == '': + raise InvalidSNSTopicARNException() + + # Config currently only allows 1 delivery channel for an account: + if len(self.delivery_channels) == 1 and not self.delivery_channels.get(delivery_channel['name']): + raise MaxNumberOfDeliveryChannelsExceededException(delivery_channel['name']) + + if not delivery_channel.get('configSnapshotDeliveryProperties'): + dp = None + + else: + # Validate the config snapshot delivery properties: + self._validate_delivery_snapshot_properties(delivery_channel['configSnapshotDeliveryProperties']) + + dp = ConfigDeliverySnapshotProperties( + delivery_channel['configSnapshotDeliveryProperties']['deliveryFrequency']) + + self.delivery_channels[delivery_channel['name']] = \ + ConfigDeliveryChannel(delivery_channel['name'], delivery_channel['s3BucketName'], + prefix=delivery_channel.get('s3KeyPrefix', None), + sns_arn=delivery_channel.get('snsTopicARN', None), + snapshot_properties=dp) + + def describe_delivery_channels(self, channel_names): + channels = [] + + if channel_names: + for cn in channel_names: + if not self.delivery_channels.get(cn): + raise NoSuchDeliveryChannelException(cn) + + # Format the delivery channel: + channels.append(self.delivery_channels[cn].to_dict()) + + else: + for channel in self.delivery_channels.values(): + channels.append(channel.to_dict()) + + return channels + + def start_configuration_recorder(self, recorder_name): + if not self.recorders.get(recorder_name): + raise NoSuchConfigurationRecorderException(recorder_name) + + # Must have a delivery channel available as well: + if not self.delivery_channels: + raise NoAvailableDeliveryChannelException() + + # Start recording: + self.recorders[recorder_name].status.start() + + def stop_configuration_recorder(self, recorder_name): + if not self.recorders.get(recorder_name): + raise NoSuchConfigurationRecorderException(recorder_name) + + # Stop recording: + self.recorders[recorder_name].status.stop() + + def delete_configuration_recorder(self, recorder_name): + if not self.recorders.get(recorder_name): + raise NoSuchConfigurationRecorderException(recorder_name) + + del self.recorders[recorder_name] + + def delete_delivery_channel(self, channel_name): + if not self.delivery_channels.get(channel_name): + raise NoSuchDeliveryChannelException(channel_name) + + # Check if a channel is recording -- if so, bad -- (there can only be 1 recorder): + for recorder in self.recorders.values(): + if recorder.status.recording: + raise LastDeliveryChannelDeleteFailedException(channel_name) + + del self.delivery_channels[channel_name] + + +config_backends = {} +boto3_session = Session() +for region in boto3_session.get_available_regions('config'): + config_backends[region] = ConfigBackend() diff --git a/moto/config/responses.py b/moto/config/responses.py new file mode 100644 index 000000000..286b2349f --- /dev/null +++ b/moto/config/responses.py @@ -0,0 +1,53 @@ +import json +from moto.core.responses import BaseResponse +from .models import config_backends + + +class ConfigResponse(BaseResponse): + + @property + def config_backend(self): + return config_backends[self.region] + + def put_configuration_recorder(self): + self.config_backend.put_configuration_recorder(self._get_param('ConfigurationRecorder')) + return "" + + def describe_configuration_recorders(self): + recorders = self.config_backend.describe_configuration_recorders(self._get_param('ConfigurationRecorderNames')) + schema = {'ConfigurationRecorders': recorders} + return json.dumps(schema) + + def describe_configuration_recorder_status(self): + recorder_statuses = self.config_backend.describe_configuration_recorder_status( + self._get_param('ConfigurationRecorderNames')) + schema = {'ConfigurationRecordersStatus': recorder_statuses} + return json.dumps(schema) + + def put_delivery_channel(self): + self.config_backend.put_delivery_channel(self._get_param('DeliveryChannel')) + return "" + + def describe_delivery_channels(self): + delivery_channels = self.config_backend.describe_delivery_channels(self._get_param('DeliveryChannelNames')) + schema = {'DeliveryChannels': delivery_channels} + return json.dumps(schema) + + def describe_delivery_channel_status(self): + raise NotImplementedError() + + def delete_delivery_channel(self): + self.config_backend.delete_delivery_channel(self._get_param('DeliveryChannelName')) + return "" + + def delete_configuration_recorder(self): + self.config_backend.delete_configuration_recorder(self._get_param('ConfigurationRecorderName')) + return "" + + def start_configuration_recorder(self): + self.config_backend.start_configuration_recorder(self._get_param('ConfigurationRecorderName')) + return "" + + def stop_configuration_recorder(self): + self.config_backend.stop_configuration_recorder(self._get_param('ConfigurationRecorderName')) + return "" diff --git a/moto/config/urls.py b/moto/config/urls.py new file mode 100644 index 000000000..fd7b6969f --- /dev/null +++ b/moto/config/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import ConfigResponse + +url_bases = [ + "https?://config.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': ConfigResponse.dispatch, +} diff --git a/moto/core/models.py b/moto/core/models.py index 19267ca08..9fe1e96bd 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -4,6 +4,7 @@ from __future__ import absolute_import import functools import inspect +import os import re import six from io import BytesIO @@ -21,6 +22,11 @@ from .utils import ( ) +# "Mock" the AWS credentials as they can't be mocked in Botocore currently +os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") +os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") + + class BaseMockAWS(object): nested_count = 0 diff --git a/moto/core/responses.py b/moto/core/responses.py index 0f133e72c..8fb247f75 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -718,6 +718,8 @@ def to_str(value, spec): return str(value) elif vtype == 'float': return str(value) + elif vtype == 'double': + return str(value) elif vtype == 'timestamp': return datetime.datetime.utcfromtimestamp( value).replace(tzinfo=pytz.utc).isoformat() @@ -737,6 +739,8 @@ def from_str(value, spec): return int(value) elif vtype == 'float': return float(value) + elif vtype == 'double': + return float(value) elif vtype == 'timestamp': return value elif vtype == 'string': diff --git a/moto/core/utils.py b/moto/core/utils.py index 777a03752..ca670e871 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -280,7 +280,7 @@ def amzn_request_id(f): # Update request ID in XML try: - body = body.replace('{{ requestid }}', request_id) + body = re.sub(r'(?<=).*(?=<\/RequestId>)', request_id, body) except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) pass diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 53226c557..6d37345fe 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -383,7 +383,7 @@ class OpNotEqual(Op): def expr(self, item): lhs = self._lhs(item) rhs = self._rhs(item) - return lhs == rhs + return lhs != rhs class OpLessThanOrEqual(Op): diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index a54c4f7d0..0f4594aa4 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -5,6 +5,7 @@ import datetime import decimal import json import re +import uuid import boto3 from moto.compat import OrderedDict @@ -65,6 +66,8 @@ class DynamoType(object): return int(self.value) except ValueError: return float(self.value) + elif self.is_set(): + return set(self.value) else: return self.value @@ -292,9 +295,82 @@ class Item(BaseModel): 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) +class StreamRecord(BaseModel): + def __init__(self, table, stream_type, event_name, old, new, seq): + old_a = old.to_json()['Attributes'] if old is not None else {} + new_a = new.to_json()['Attributes'] if new is not None else {} + + rec = old if old is not None else new + keys = {table.hash_key_attr: rec.hash_key.to_json()} + if table.range_key_attr is not None: + keys[table.range_key_attr] = rec.range_key.to_json() + + self.record = { + 'eventID': uuid.uuid4().hex, + 'eventName': event_name, + 'eventSource': 'aws:dynamodb', + 'eventVersion': '1.0', + 'awsRegion': 'us-east-1', + 'dynamodb': { + 'StreamViewType': stream_type, + 'ApproximateCreationDateTime': datetime.datetime.utcnow().isoformat(), + 'SequenceNumber': seq, + 'SizeBytes': 1, + 'Keys': keys + } + } + + if stream_type in ('NEW_IMAGE', 'NEW_AND_OLD_IMAGES'): + self.record['dynamodb']['NewImage'] = new_a + if stream_type in ('OLD_IMAGE', 'NEW_AND_OLD_IMAGES'): + self.record['dynamodb']['OldImage'] = old_a + + # This is a substantial overestimate but it's the easiest to do now + self.record['dynamodb']['SizeBytes'] = len( + json.dumps(self.record['dynamodb'])) + + def to_json(self): + return self.record + + +class StreamShard(BaseModel): + def __init__(self, table): + self.table = table + self.id = 'shardId-00000001541626099285-f35f62ef' + self.starting_sequence_number = 1100000000017454423009 + self.items = [] + self.created_on = datetime.datetime.utcnow() + + def to_json(self): + return { + 'ShardId': self.id, + 'SequenceNumberRange': { + 'StartingSequenceNumber': str(self.starting_sequence_number) + } + } + + def add(self, old, new): + t = self.table.stream_specification['StreamViewType'] + if old is None: + event_name = 'INSERT' + elif new is None: + event_name = 'DELETE' + else: + event_name = 'MODIFY' + seq = len(self.items) + self.starting_sequence_number + self.items.append( + StreamRecord(self.table, t, event_name, old, new, seq)) + + def get(self, start, quantity): + start -= self.starting_sequence_number + assert start >= 0 + end = start + quantity + return [i.to_json() for i in self.items[start:end]] + + class Table(BaseModel): - def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None): + def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None, streams=None): self.name = table_name self.attr = attr self.schema = schema @@ -325,10 +401,22 @@ class Table(BaseModel): 'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED', # 'AttributeName': 'string' # Can contain this } + self.set_stream_specification(streams) def _generate_arn(self, name): return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name + def set_stream_specification(self, streams): + self.stream_specification = streams + if streams and (streams.get('StreamEnabled') or streams.get('StreamViewType')): + self.stream_specification['StreamEnabled'] = True + self.latest_stream_label = datetime.datetime.utcnow().isoformat() + self.stream_shard = StreamShard(self) + else: + self.stream_specification = {'StreamEnabled': False} + self.latest_stream_label = None + self.stream_shard = None + def describe(self, base_key='TableDescription'): results = { base_key: { @@ -345,6 +433,11 @@ class Table(BaseModel): 'LocalSecondaryIndexes': [index for index in self.indexes], } } + if self.stream_specification and self.stream_specification['StreamEnabled']: + results[base_key]['StreamSpecification'] = self.stream_specification + if self.latest_stream_label: + results[base_key]['LatestStreamLabel'] = self.latest_stream_label + results[base_key]['LatestStreamArn'] = self.table_arn + '/stream/' + self.latest_stream_label return results def __len__(self): @@ -385,23 +478,22 @@ class Table(BaseModel): else: range_value = None + if expected is None: + expected = {} + lookup_range_value = range_value + else: + expected_range_value = expected.get( + self.range_key_attr, {}).get("Value") + if(expected_range_value is None): + lookup_range_value = range_value + else: + lookup_range_value = DynamoType(expected_range_value) + current = self.get_item(hash_value, lookup_range_value) + item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs) if not overwrite: - if expected is None: - expected = {} - lookup_range_value = range_value - else: - expected_range_value = expected.get( - self.range_key_attr, {}).get("Value") - if(expected_range_value is None): - lookup_range_value = range_value - else: - lookup_range_value = DynamoType(expected_range_value) - - current = self.get_item(hash_value, lookup_range_value) - if current is None: current_attr = {} elif hasattr(current, 'attrs'): @@ -419,19 +511,20 @@ class Table(BaseModel): elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value: raise ValueError("The conditional request failed") elif 'ComparisonOperator' in val: - comparison_func = get_comparison_func( - val['ComparisonOperator']) dynamo_types = [ DynamoType(ele) for ele in val.get("AttributeValueList", []) ] - for t in dynamo_types: - if not comparison_func(current_attr[key].value, t.value): - raise ValueError('The conditional request failed') + if not current_attr[key].compare(val['ComparisonOperator'], dynamo_types): + raise ValueError('The conditional request failed') if range_value: self.items[hash_value][range_value] = item else: self.items[hash_value] = item + + if self.stream_shard is not None: + self.stream_shard.add(current, item) + return item def __nonzero__(self): @@ -462,9 +555,14 @@ class Table(BaseModel): def delete_item(self, hash_key, range_key): try: if range_key: - return self.items[hash_key].pop(range_key) + item = self.items[hash_key].pop(range_key) else: - return self.items.pop(hash_key) + item = self.items.pop(hash_key) + + if self.stream_shard is not None: + self.stream_shard.add(item, None) + + return item except KeyError: return None @@ -472,6 +570,7 @@ class Table(BaseModel): exclusive_start_key, scan_index_forward, projection_expression, index_name=None, filter_expression=None, **filter_kwargs): results = [] + if index_name: all_indexes = (self.global_indexes or []) + (self.indexes or []) indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) @@ -488,24 +587,28 @@ class Table(BaseModel): raise ValueError('Missing Hash Key. KeySchema: %s' % index['KeySchema']) - possible_results = [] - for item in self.all_items(): - if not isinstance(item, Item): - continue - item_hash_key = item.attrs.get(index_hash_key['AttributeName']) - if item_hash_key and item_hash_key == hash_key: - possible_results.append(item) - else: - possible_results = [item for item in list(self.all_items()) if isinstance( - item, Item) and item.hash_key == hash_key] - - if index_name: try: index_range_key = [key for key in index[ 'KeySchema'] if key['KeyType'] == 'RANGE'][0] except IndexError: index_range_key = None + possible_results = [] + for item in self.all_items(): + if not isinstance(item, Item): + continue + item_hash_key = item.attrs.get(index_hash_key['AttributeName']) + if index_range_key is None: + if item_hash_key and item_hash_key == hash_key: + possible_results.append(item) + else: + item_range_key = item.attrs.get(index_range_key['AttributeName']) + if item_hash_key and item_hash_key == hash_key and item_range_key: + possible_results.append(item) + else: + possible_results = [item for item in list(self.all_items()) if isinstance( + item, Item) and item.hash_key == hash_key] + if range_comparison: if index_name and not index_range_key: raise ValueError( @@ -680,6 +783,13 @@ class DynamoDBBackend(BaseBackend): table.throughput = throughput return table + def update_table_streams(self, name, stream_specification): + table = self.tables[name] + if (stream_specification.get('StreamEnabled') or stream_specification.get('StreamViewType')) and table.latest_stream_label: + raise ValueError('Table already has stream enabled') + table.set_stream_specification(stream_specification) + return table + def update_table_global_indexes(self, name, global_index_updates): table = self.tables[name] gsis_by_name = dict((i['IndexName'], i) for i in table.global_indexes) @@ -840,15 +950,12 @@ class DynamoDBBackend(BaseBackend): elif 'Value' in val and DynamoType(val['Value']).value != item_attr[key].value: raise ValueError("The conditional request failed") elif 'ComparisonOperator' in val: - comparison_func = get_comparison_func( - val['ComparisonOperator']) dynamo_types = [ DynamoType(ele) for ele in val.get("AttributeValueList", []) ] - for t in dynamo_types: - if not comparison_func(item_attr[key].value, t.value): - raise ValueError('The conditional request failed') + if not item_attr[key].compare(val['ComparisonOperator'], dynamo_types): + raise ValueError('The conditional request failed') # Update does not fail on new items, so create one if item is None: diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index e2f1ef1cc..e382efc1d 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -31,6 +31,67 @@ def get_empty_str_error(): )) +def condition_expression_to_expected(condition_expression, expression_attribute_names, expression_attribute_values): + """ + Limited condition expression syntax parsing. + Supports Global Negation ex: NOT(inner expressions). + Supports simple AND conditions ex: cond_a AND cond_b and cond_c. + Atomic expressions supported are attribute_exists(key), attribute_not_exists(key) and #key = :value. + """ + expected = {} + if condition_expression and 'OR' not in condition_expression: + reverse_re = re.compile('^NOT\s*\((.*)\)$') + reverse_m = reverse_re.match(condition_expression.strip()) + + reverse = False + if reverse_m: + reverse = True + condition_expression = reverse_m.group(1) + + cond_items = [c.strip() for c in condition_expression.split('AND')] + if cond_items: + exists_re = re.compile('^attribute_exists\s*\((.*)\)$') + not_exists_re = re.compile( + '^attribute_not_exists\s*\((.*)\)$') + equals_re = re.compile('^(#?\w+)\s*=\s*(\:?\w+)') + + for cond in cond_items: + exists_m = exists_re.match(cond) + not_exists_m = not_exists_re.match(cond) + equals_m = equals_re.match(cond) + + if exists_m: + attribute_name = expression_attribute_names_lookup(exists_m.group(1), expression_attribute_names) + expected[attribute_name] = {'Exists': True if not reverse else False} + elif not_exists_m: + attribute_name = expression_attribute_names_lookup(not_exists_m.group(1), expression_attribute_names) + expected[attribute_name] = {'Exists': False if not reverse else True} + elif equals_m: + attribute_name = expression_attribute_names_lookup(equals_m.group(1), expression_attribute_names) + attribute_value = expression_attribute_values_lookup(equals_m.group(2), expression_attribute_values) + expected[attribute_name] = { + 'AttributeValueList': [attribute_value], + 'ComparisonOperator': 'EQ' if not reverse else 'NEQ'} + + return expected + + +def expression_attribute_names_lookup(attribute_name, expression_attribute_names): + if attribute_name.startswith('#') and attribute_name in expression_attribute_names: + return expression_attribute_names[attribute_name] + else: + return attribute_name + + +def expression_attribute_values_lookup(attribute_value, expression_attribute_values): + if isinstance(attribute_value, six.string_types) and \ + attribute_value.startswith(':') and\ + attribute_value in expression_attribute_values: + return expression_attribute_values[attribute_value] + else: + return attribute_value + + class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): @@ -95,8 +156,16 @@ class DynamoHandler(BaseResponse): body = self.body # get the table name table_name = body['TableName'] - # get the throughput - throughput = body["ProvisionedThroughput"] + # check billing mode and get the throughput + if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST": + if "ProvisionedThroughput" in body.keys(): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, + 'ProvisionedThroughput cannot be specified \ + when BillingMode is PAY_PER_REQUEST') + throughput = None + else: # Provisioned (default billing mode) + throughput = body["ProvisionedThroughput"] # getting the schema key_schema = body['KeySchema'] # getting attribute definition @@ -104,13 +173,16 @@ class DynamoHandler(BaseResponse): # getting the indexes global_indexes = body.get("GlobalSecondaryIndexes", []) local_secondary_indexes = body.get("LocalSecondaryIndexes", []) + # get the stream specification + streams = body.get("StreamSpecification") table = self.dynamodb_backend.create_table(table_name, schema=key_schema, throughput=throughput, attr=attr, global_indexes=global_indexes, - indexes=local_secondary_indexes) + indexes=local_secondary_indexes, + streams=streams) if table is not None: return dynamo_json_dump(table.describe()) else: @@ -163,12 +235,20 @@ class DynamoHandler(BaseResponse): def update_table(self): name = self.body['TableName'] + table = self.dynamodb_backend.get_table(name) if 'GlobalSecondaryIndexUpdates' in self.body: table = self.dynamodb_backend.update_table_global_indexes( name, self.body['GlobalSecondaryIndexUpdates']) if 'ProvisionedThroughput' in self.body: throughput = self.body["ProvisionedThroughput"] table = self.dynamodb_backend.update_table_throughput(name, throughput) + if 'StreamSpecification' in self.body: + try: + table = self.dynamodb_backend.update_table_streams(name, self.body['StreamSpecification']) + except ValueError: + er = 'com.amazonaws.dynamodb.v20111205#ResourceInUseException' + return self.error(er, 'Cannot enable stream') + return dynamo_json_dump(table.describe()) def describe_table(self): @@ -183,6 +263,11 @@ class DynamoHandler(BaseResponse): def put_item(self): name = self.body['TableName'] item = self.body['Item'] + return_values = self.body.get('ReturnValues', 'NONE') + + if return_values not in ('ALL_OLD', 'NONE'): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, 'Return values set to invalid value') if has_empty_keys_or_values(item): return get_empty_str_error() @@ -193,28 +278,24 @@ class DynamoHandler(BaseResponse): else: expected = None + if return_values == 'ALL_OLD': + existing_item = self.dynamodb_backend.get_item(name, item) + if existing_item: + existing_attributes = existing_item.to_json()['Attributes'] + else: + existing_attributes = {} + # Attempt to parse simple ConditionExpressions into an Expected # expression if not expected: condition_expression = self.body.get('ConditionExpression') - if condition_expression and 'OR' not in condition_expression: - cond_items = [c.strip() - for c in condition_expression.split('AND')] - - if cond_items: - expected = {} - overwrite = False - exists_re = re.compile('^attribute_exists\s*\((.*)\)$') - not_exists_re = re.compile( - '^attribute_not_exists\s*\((.*)\)$') - - for cond in cond_items: - exists_m = exists_re.match(cond) - not_exists_m = not_exists_re.match(cond) - if exists_m: - expected[exists_m.group(1)] = {'Exists': True} - elif not_exists_m: - expected[not_exists_m.group(1)] = {'Exists': False} + expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) + expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) + expected = condition_expression_to_expected(condition_expression, + expression_attribute_names, + expression_attribute_values) + if expected: + overwrite = False try: result = self.dynamodb_backend.put_item(name, item, expected, overwrite) @@ -228,6 +309,10 @@ class DynamoHandler(BaseResponse): 'TableName': name, 'CapacityUnits': 1 } + if return_values == 'ALL_OLD': + item_dict['Attributes'] = existing_attributes + else: + item_dict.pop('Attributes', None) return dynamo_json_dump(item_dict) else: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' @@ -385,7 +470,7 @@ class DynamoHandler(BaseResponse): range_values = [value_alias_map[ range_key_expression_components[2]]] else: - hash_key_expression = key_condition_expression + hash_key_expression = key_condition_expression.strip('()') range_comparison = None range_values = [] @@ -512,7 +597,11 @@ class DynamoHandler(BaseResponse): def delete_item(self): name = self.body['TableName'] keys = self.body['Key'] - return_values = self.body.get('ReturnValues', '') + return_values = self.body.get('ReturnValues', 'NONE') + if return_values not in ('ALL_OLD', 'NONE'): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, 'Return values set to invalid value') + table = self.dynamodb_backend.get_table(name) if not table: er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException' @@ -527,9 +616,9 @@ class DynamoHandler(BaseResponse): return dynamo_json_dump(item_dict) def update_item(self): - name = self.body['TableName'] key = self.body['Key'] + return_values = self.body.get('ReturnValues', 'NONE') update_expression = self.body.get('UpdateExpression') attribute_updates = self.body.get('AttributeUpdates') expression_attribute_names = self.body.get( @@ -537,6 +626,15 @@ class DynamoHandler(BaseResponse): expression_attribute_values = self.body.get( 'ExpressionAttributeValues', {}) existing_item = self.dynamodb_backend.get_item(name, key) + if existing_item: + existing_attributes = existing_item.to_json()['Attributes'] + else: + existing_attributes = {} + + if return_values not in ('NONE', 'ALL_OLD', 'ALL_NEW', 'UPDATED_OLD', + 'UPDATED_NEW'): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, 'Return values set to invalid value') if has_empty_keys_or_values(expression_attribute_values): return get_empty_str_error() @@ -550,23 +648,11 @@ class DynamoHandler(BaseResponse): # expression if not expected: condition_expression = self.body.get('ConditionExpression') - if condition_expression and 'OR' not in condition_expression: - cond_items = [c.strip() - for c in condition_expression.split('AND')] - - if cond_items: - expected = {} - exists_re = re.compile('^attribute_exists\s*\((.*)\)$') - not_exists_re = re.compile( - '^attribute_not_exists\s*\((.*)\)$') - - for cond in cond_items: - exists_m = exists_re.match(cond) - not_exists_m = not_exists_re.match(cond) - if exists_m: - expected[exists_m.group(1)] = {'Exists': True} - elif not_exists_m: - expected[not_exists_m.group(1)] = {'Exists': False} + expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) + expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) + expected = condition_expression_to_expected(condition_expression, + expression_attribute_names, + expression_attribute_values) # Support spaces between operators in an update expression # E.g. `a = b + c` -> `a=b+c` @@ -591,8 +677,26 @@ class DynamoHandler(BaseResponse): 'TableName': name, 'CapacityUnits': 0.5 } - if not existing_item: + unchanged_attributes = { + k for k in existing_attributes.keys() + if existing_attributes[k] == item_dict['Attributes'].get(k) + } + changed_attributes = set(existing_attributes.keys()).union(item_dict['Attributes'].keys()).difference(unchanged_attributes) + + if return_values == 'NONE': item_dict['Attributes'] = {} + elif return_values == 'ALL_OLD': + item_dict['Attributes'] = existing_attributes + elif return_values == 'UPDATED_OLD': + item_dict['Attributes'] = { + k: v for k, v in existing_attributes.items() + if k in changed_attributes + } + elif return_values == 'UPDATED_NEW': + item_dict['Attributes'] = { + k: v for k, v in item_dict['Attributes'].items() + if k in changed_attributes + } return dynamo_json_dump(item_dict) diff --git a/moto/dynamodbstreams/__init__.py b/moto/dynamodbstreams/__init__.py new file mode 100644 index 000000000..b35879eba --- /dev/null +++ b/moto/dynamodbstreams/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import dynamodbstreams_backends +from ..core.models import base_decorator + +dynamodbstreams_backend = dynamodbstreams_backends['us-east-1'] +mock_dynamodbstreams = base_decorator(dynamodbstreams_backends) diff --git a/moto/dynamodbstreams/models.py b/moto/dynamodbstreams/models.py new file mode 100644 index 000000000..41cc6e280 --- /dev/null +++ b/moto/dynamodbstreams/models.py @@ -0,0 +1,129 @@ +from __future__ import unicode_literals + +import os +import json +import boto3 +import base64 + +from moto.core import BaseBackend, BaseModel +from moto.dynamodb2.models import dynamodb_backends + + +class ShardIterator(BaseModel): + def __init__(self, streams_backend, stream_shard, shard_iterator_type, sequence_number=None): + self.id = base64.b64encode(os.urandom(472)).decode('utf-8') + self.streams_backend = streams_backend + self.stream_shard = stream_shard + self.shard_iterator_type = shard_iterator_type + if shard_iterator_type == 'TRIM_HORIZON': + self.sequence_number = stream_shard.starting_sequence_number + elif shard_iterator_type == 'LATEST': + self.sequence_number = stream_shard.starting_sequence_number + len(stream_shard.items) + elif shard_iterator_type == 'AT_SEQUENCE_NUMBER': + self.sequence_number = sequence_number + elif shard_iterator_type == 'AFTER_SEQUENCE_NUMBER': + self.sequence_number = sequence_number + 1 + + @property + def arn(self): + return '{}/stream/{}|1|{}'.format( + self.stream_shard.table.table_arn, + self.stream_shard.table.latest_stream_label, + self.id) + + def to_json(self): + return { + 'ShardIterator': self.arn + } + + def get(self, limit=1000): + items = self.stream_shard.get(self.sequence_number, limit) + try: + last_sequence_number = max(i['dynamodb']['SequenceNumber'] for i in items) + new_shard_iterator = ShardIterator(self.streams_backend, + self.stream_shard, + 'AFTER_SEQUENCE_NUMBER', + last_sequence_number) + except ValueError: + new_shard_iterator = ShardIterator(self.streams_backend, + self.stream_shard, + 'AT_SEQUENCE_NUMBER', + self.sequence_number) + + self.streams_backend.shard_iterators[new_shard_iterator.arn] = new_shard_iterator + return { + 'NextShardIterator': new_shard_iterator.arn, + 'Records': items + } + + +class DynamoDBStreamsBackend(BaseBackend): + def __init__(self, region): + self.region = region + self.shard_iterators = {} + + def reset(self): + region = self.region + self.__dict__ = {} + self.__init__(region) + + @property + def dynamodb(self): + return dynamodb_backends[self.region] + + def _get_table_from_arn(self, arn): + table_name = arn.split(':', 6)[5].split('/')[1] + return self.dynamodb.get_table(table_name) + + def describe_stream(self, arn): + table = self._get_table_from_arn(arn) + resp = {'StreamDescription': { + 'StreamArn': arn, + 'StreamLabel': table.latest_stream_label, + 'StreamStatus': ('ENABLED' if table.latest_stream_label + else 'DISABLED'), + 'StreamViewType': table.stream_specification['StreamViewType'], + 'CreationRequestDateTime': table.stream_shard.created_on.isoformat(), + 'TableName': table.name, + 'KeySchema': table.schema, + 'Shards': ([table.stream_shard.to_json()] if table.stream_shard + else []) + }} + + return json.dumps(resp) + + def list_streams(self, table_name=None): + streams = [] + for table in self.dynamodb.tables.values(): + if table_name is not None and table.name != table_name: + continue + if table.latest_stream_label: + d = table.describe(base_key='Table') + streams.append({ + 'StreamArn': d['Table']['LatestStreamArn'], + 'TableName': d['Table']['TableName'], + 'StreamLabel': d['Table']['LatestStreamLabel'] + }) + + return json.dumps({'Streams': streams}) + + def get_shard_iterator(self, arn, shard_id, shard_iterator_type, sequence_number=None): + table = self._get_table_from_arn(arn) + assert table.stream_shard.id == shard_id + + shard_iterator = ShardIterator(self, table.stream_shard, + shard_iterator_type, + sequence_number) + self.shard_iterators[shard_iterator.arn] = shard_iterator + + return json.dumps(shard_iterator.to_json()) + + def get_records(self, iterator_arn, limit): + shard_iterator = self.shard_iterators[iterator_arn] + return json.dumps(shard_iterator.get(limit)) + + +available_regions = boto3.session.Session().get_available_regions( + 'dynamodbstreams') +dynamodbstreams_backends = {region: DynamoDBStreamsBackend(region=region) + for region in available_regions} diff --git a/moto/dynamodbstreams/responses.py b/moto/dynamodbstreams/responses.py new file mode 100644 index 000000000..c9c113615 --- /dev/null +++ b/moto/dynamodbstreams/responses.py @@ -0,0 +1,34 @@ +from __future__ import unicode_literals + +from moto.core.responses import BaseResponse + +from .models import dynamodbstreams_backends + + +class DynamoDBStreamsHandler(BaseResponse): + + @property + def backend(self): + return dynamodbstreams_backends[self.region] + + def describe_stream(self): + arn = self._get_param('StreamArn') + return self.backend.describe_stream(arn) + + def list_streams(self): + table_name = self._get_param('TableName') + return self.backend.list_streams(table_name) + + def get_shard_iterator(self): + arn = self._get_param('StreamArn') + shard_id = self._get_param('ShardId') + shard_iterator_type = self._get_param('ShardIteratorType') + return self.backend.get_shard_iterator(arn, shard_id, + shard_iterator_type) + + def get_records(self): + arn = self._get_param('ShardIterator') + limit = self._get_param('Limit') + if limit is None: + limit = 1000 + return self.backend.get_records(arn, limit) diff --git a/moto/dynamodbstreams/urls.py b/moto/dynamodbstreams/urls.py new file mode 100644 index 000000000..1d0f94c35 --- /dev/null +++ b/moto/dynamodbstreams/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import DynamoDBStreamsHandler + +url_bases = [ + "https?://streams.dynamodb.(.+).amazonaws.com" +] + +url_paths = { + "{0}/$": DynamoDBStreamsHandler.dispatch, +} diff --git a/moto/ec2/models.py b/moto/ec2/models.py old mode 100755 new mode 100644 index b94cac479..0936d2be9 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -134,6 +134,8 @@ def utc_date_and_time(): def validate_resource_ids(resource_ids): + if not resource_ids: + raise MissingParameterError(parameter='resourceIdSet') for resource_id in resource_ids: if not is_valid_resource_id(resource_id): raise InvalidID(resource_id=resource_id) @@ -189,7 +191,7 @@ class NetworkInterface(TaggedEC2Resource): self.ec2_backend = ec2_backend self.id = random_eni_id() self.device_index = device_index - self.private_ip_address = private_ip_address + self.private_ip_address = private_ip_address or random_private_ip() self.subnet = subnet self.instance = None self.attachment_id = None @@ -388,6 +390,7 @@ class Instance(TaggedEC2Resource, BotoInstance): self.ebs_optimized = kwargs.get("ebs_optimized", False) self.source_dest_check = "true" self.launch_time = utc_date_and_time() + self.ami_launch_index = kwargs.get("ami_launch_index", 0) self.disable_api_termination = kwargs.get("disable_api_termination", False) self._spot_fleet_id = kwargs.get("spot_fleet_id", None) associate_public_ip = kwargs.get("associate_public_ip", False) @@ -719,6 +722,7 @@ class InstanceBackend(object): instance_tags = tags.get('instance', {}) for index in range(count): + kwargs["ami_launch_index"] = index new_instance = Instance( self, image_id, @@ -1115,7 +1119,7 @@ class Ami(TaggedEC2Resource): elif filter_name == 'image-id': return self.id elif filter_name == 'is-public': - return str(self.is_public) + return self.is_public_string elif filter_name == 'state': return self.state elif filter_name == 'name': @@ -2230,6 +2234,10 @@ class VPCPeeringConnectionStatus(object): self.code = code self.message = message + def deleted(self): + self.code = 'deleted' + self.message = 'Deleted by {deleter ID}' + def initiating(self): self.code = 'initiating-request' self.message = 'Initiating Request to {accepter ID}' @@ -2292,9 +2300,8 @@ class VPCPeeringConnectionBackend(object): return self.vpc_pcxs.get(vpc_pcx_id) def delete_vpc_peering_connection(self, vpc_pcx_id): - deleted = self.vpc_pcxs.pop(vpc_pcx_id, None) - if not deleted: - raise InvalidVPCPeeringConnectionIdError(vpc_pcx_id) + deleted = self.get_vpc_peering_connection(vpc_pcx_id) + deleted._status.deleted() return deleted def accept_vpc_peering_connection(self, vpc_pcx_id): @@ -2461,7 +2468,7 @@ class SubnetBackend(object): default_for_az, map_public_ip_on_launch) # AWS associates a new subnet with the default Network ACL - self.associate_default_network_acl_with_subnet(subnet_id) + self.associate_default_network_acl_with_subnet(subnet_id, vpc_id) self.subnets[availability_zone][subnet_id] = subnet return subnet @@ -2876,7 +2883,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): def __init__(self, ec2_backend, spot_request_id, price, image_id, type, valid_from, valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, - kernel_id, ramdisk_id, monitoring_enabled, subnet_id, spot_fleet_id, + kernel_id, ramdisk_id, monitoring_enabled, subnet_id, tags, spot_fleet_id, **kwargs): super(SpotInstanceRequest, self).__init__(**kwargs) ls = LaunchSpecification() @@ -2900,6 +2907,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): ls.monitored = monitoring_enabled ls.subnet_id = subnet_id self.spot_fleet_id = spot_fleet_id + self.tags = tags if security_groups: for group_name in security_groups: @@ -2933,6 +2941,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): security_group_names=[], security_group_ids=self.launch_specification.groups, spot_fleet_id=self.spot_fleet_id, + tags=self.tags, ) instance = reservation.instances[0] return instance @@ -2948,15 +2957,16 @@ class SpotRequestBackend(object): valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, kernel_id, ramdisk_id, - monitoring_enabled, subnet_id, spot_fleet_id=None): + monitoring_enabled, subnet_id, tags=None, spot_fleet_id=None): requests = [] + tags = tags or {} for _ in range(count): spot_request_id = random_spot_request_id() request = SpotInstanceRequest(self, spot_request_id, price, image_id, type, valid_from, valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, kernel_id, ramdisk_id, - monitoring_enabled, subnet_id, spot_fleet_id) + monitoring_enabled, subnet_id, tags, spot_fleet_id) self.spot_instance_requests[spot_request_id] = request requests.append(request) return requests @@ -2976,8 +2986,8 @@ class SpotRequestBackend(object): class SpotFleetLaunchSpec(object): def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id, - instance_type, key_name, monitoring, spot_price, subnet_id, user_data, - weighted_capacity): + instance_type, key_name, monitoring, spot_price, subnet_id, tag_specifications, + user_data, weighted_capacity): self.ebs_optimized = ebs_optimized self.group_set = group_set self.iam_instance_profile = iam_instance_profile @@ -2987,6 +2997,7 @@ class SpotFleetLaunchSpec(object): self.monitoring = monitoring self.spot_price = spot_price self.subnet_id = subnet_id + self.tag_specifications = tag_specifications self.user_data = user_data self.weighted_capacity = float(weighted_capacity) @@ -3017,6 +3028,7 @@ class SpotFleetRequest(TaggedEC2Resource): monitoring=spec.get('monitoring._enabled'), spot_price=spec.get('spot_price', self.spot_price), subnet_id=spec['subnet_id'], + tag_specifications=self._parse_tag_specifications(spec), user_data=spec.get('user_data'), weighted_capacity=spec['weighted_capacity'], ) @@ -3099,6 +3111,7 @@ class SpotFleetRequest(TaggedEC2Resource): monitoring_enabled=launch_spec.monitoring, subnet_id=launch_spec.subnet_id, spot_fleet_id=self.id, + tags=launch_spec.tag_specifications, ) self.spot_requests.extend(requests) self.fulfilled_capacity += added_weight @@ -3121,6 +3134,25 @@ class SpotFleetRequest(TaggedEC2Resource): self.spot_requests = [req for req in self.spot_requests if req.instance.id not in instance_ids] self.ec2_backend.terminate_instances(instance_ids) + def _parse_tag_specifications(self, spec): + try: + tag_spec_num = max([int(key.split('.')[1]) for key in spec if key.startswith("tag_specification_set")]) + except ValueError: # no tag specifications + return {} + + tag_specifications = {} + for si in range(1, tag_spec_num + 1): + resource_type = spec["tag_specification_set.{si}._resource_type".format(si=si)] + + tags = [key for key in spec if key.startswith("tag_specification_set.{si}._tag".format(si=si))] + tag_num = max([int(key.split('.')[3]) for key in tags]) + tag_specifications[resource_type] = dict(( + spec["tag_specification_set.{si}._tag.{ti}._key".format(si=si, ti=ti)], + spec["tag_specification_set.{si}._tag.{ti}._value".format(si=si, ti=ti)], + ) for ti in range(1, tag_num + 1)) + + return tag_specifications + class SpotFleetBackend(object): def __init__(self): @@ -3557,8 +3589,22 @@ class NetworkAclBackend(object): self.get_vpc(vpc_id) network_acl = NetworkAcl(self, network_acl_id, vpc_id, default) self.network_acls[network_acl_id] = network_acl + if default: + self.add_default_entries(network_acl_id) return network_acl + def add_default_entries(self, network_acl_id): + default_acl_entries = [ + {'rule_number': 100, 'rule_action': 'allow', 'egress': 'true'}, + {'rule_number': 32767, 'rule_action': 'deny', 'egress': 'true'}, + {'rule_number': 100, 'rule_action': 'allow', 'egress': 'false'}, + {'rule_number': 32767, 'rule_action': 'deny', 'egress': 'false'} + ] + for entry in default_acl_entries: + self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1', + rule_action=entry['rule_action'], egress=entry['egress'], cidr_block='0.0.0.0/0', + icmp_code=None, icmp_type=None, port_range_from=None, port_range_to=None) + def get_all_network_acls(self, network_acl_ids=None, filters=None): network_acls = self.network_acls.values() @@ -3633,9 +3679,9 @@ class NetworkAclBackend(object): new_acl.associations[new_assoc_id] = association return association - def associate_default_network_acl_with_subnet(self, subnet_id): + def associate_default_network_acl_with_subnet(self, subnet_id, vpc_id): association_id = random_network_acl_subnet_association_id() - acl = next(acl for acl in self.network_acls.values() if acl.default) + acl = next(acl for acl in self.network_acls.values() if acl.default and acl.vpc_id == vpc_id) acl.associations[association_id] = NetworkAclAssociation(self, association_id, subnet_id, acl.id) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index aa0d7f73b..acd37b283 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -150,16 +150,18 @@ CREATE_VOLUME_RESPONSE = """ 128: + raise TagKeyTooBig(tag_key, param=exception_param) + + # Validate that the tag key fits the proper Regex: + # [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+ + match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key) + # Kudos if you can come up with a better way of doing a global search :) + if not len(match) or len(match[0]) < len(tag_key): + raise InvalidTagCharacters(tag_key, param=exception_param) + + def _check_tag_duplicate(self, all_tags, tag_key): + """Validates that a tag key is not a duplicate + + :param all_tags: Dict to check if there is a duplicate tag. + :param tag_key: The tag key to check against. + :return: + """ + if tag_key in all_tags: + raise DuplicateTags() + + def list_role_tags(self, role_name, marker, max_items=100): + role = self.get_role(role_name) + + max_items = int(max_items) + tag_index = sorted(role.tags) + start_idx = int(marker) if marker else 0 + + tag_index = tag_index[start_idx:start_idx + max_items] + + if len(role.tags) <= (start_idx + max_items): + marker = None + else: + marker = str(start_idx + max_items) + + # Make the tag list of dict's: + tags = [role.tags[tag] for tag in tag_index] + + return tags, marker + + def tag_role(self, role_name, tags): + if len(tags) > 50: + raise TooManyTags(tags) + + role = self.get_role(role_name) + + tag_keys = {} + for tag in tags: + # Need to index by the lowercase tag key since the keys are case insensitive, but their case is retained. + ref_key = tag['Key'].lower() + self._check_tag_duplicate(tag_keys, ref_key) + self._validate_tag_key(tag['Key']) + if len(tag['Value']) > 256: + raise TagValueTooBig(tag['Value']) + + tag_keys[ref_key] = tag + + role.tags.update(tag_keys) + + def untag_role(self, role_name, tag_keys): + if len(tag_keys) > 50: + raise TooManyTags(tag_keys, param='tagKeys') + + role = self.get_role(role_name) + + for key in tag_keys: + ref_key = key.lower() + self._validate_tag_key(key, exception_param='tagKeys') + + role.tags.pop(ref_key, None) + def create_policy_version(self, policy_arn, policy_document, set_as_default): policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") version = PolicyVersion(policy_arn, policy_document, set_as_default) policy.versions.append(version) - version.version_id = 'v{0}'.format(len(policy.versions)) + version.version_id = 'v{0}'.format(policy.next_version_num) + policy.next_version_num += 1 if set_as_default: policy.default_version_id = version.version_id return version @@ -765,6 +899,70 @@ class IAMBackend(BaseBackend): return users + def update_user(self, user_name, new_path=None, new_user_name=None): + try: + user = self.users[user_name] + except KeyError: + raise IAMNotFoundException("User {0} not found".format(user_name)) + + if new_path: + user.path = new_path + if new_user_name: + user.name = new_user_name + self.users[new_user_name] = self.users.pop(user_name) + + def list_roles(self, path_prefix, marker, max_items): + roles = None + try: + roles = self.roles.values() + except KeyError: + raise IAMNotFoundException( + "Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items)) + + return roles + + def upload_signing_certificate(self, user_name, body): + user = self.get_user(user_name) + cert_id = random_resource_id(size=32) + + # Validate the signing cert: + try: + if sys.version_info < (3, 0): + data = bytes(body) + else: + data = bytes(body, 'utf8') + + x509.load_pem_x509_certificate(data, default_backend()) + + except Exception: + raise MalformedCertificate(body) + + user.signing_certificates[cert_id] = SigningCertificate(cert_id, user_name, body) + + return user.signing_certificates[cert_id] + + def delete_signing_certificate(self, user_name, cert_id): + user = self.get_user(user_name) + + try: + del user.signing_certificates[cert_id] + except KeyError: + raise IAMNotFoundException("The Certificate with id {id} cannot be found.".format(id=cert_id)) + + def list_signing_certificates(self, user_name): + user = self.get_user(user_name) + + return list(user.signing_certificates.values()) + + def update_signing_certificate(self, user_name, cert_id, status): + user = self.get_user(user_name) + + try: + user.signing_certificates[cert_id].status = status + + except KeyError: + raise IAMNotFoundException("The Certificate with id {id} cannot be found.".format(id=cert_id)) + def create_login_profile(self, user_name, password): # This does not currently deal with PasswordPolicyViolation. user = self.get_user(user_name) @@ -838,6 +1036,24 @@ class IAMBackend(BaseBackend): user = self.get_user(user_name) user.update_access_key(access_key_id, status) + def get_access_key_last_used(self, access_key_id): + access_keys_list = self.get_all_access_keys_for_all_users() + for key in access_keys_list: + if key.access_key_id == access_key_id: + return { + 'user_name': key.user_name, + 'last_used': key.last_used + } + else: + raise IAMNotFoundException( + "The Access Key with id {0} cannot be found".format(access_key_id)) + + def get_all_access_keys_for_all_users(self): + access_keys_list = [] + for user_name in self.users: + access_keys_list += self.get_all_access_keys(user_name) + return access_keys_list + def get_all_access_keys(self, user_name, marker=None, max_items=None): user = self.get_user(user_name) keys = user.get_all_access_keys() @@ -937,5 +1153,33 @@ class IAMBackend(BaseBackend): 'managed_policies': returned_policies } + def create_saml_provider(self, name, saml_metadata_document): + saml_provider = SAMLProvider(name, saml_metadata_document) + self.saml_providers[name] = saml_provider + return saml_provider + + def update_saml_provider(self, saml_provider_arn, saml_metadata_document): + saml_provider = self.get_saml_provider(saml_provider_arn) + saml_provider.saml_metadata_document = saml_metadata_document + return saml_provider + + def delete_saml_provider(self, saml_provider_arn): + try: + for saml_provider in list(self.list_saml_providers()): + if saml_provider.arn == saml_provider_arn: + del self.saml_providers[saml_provider.name] + except KeyError: + raise IAMNotFoundException( + "SAMLProvider {0} not found".format(saml_provider_arn)) + + def list_saml_providers(self): + return self.saml_providers.values() + + def get_saml_provider(self, saml_provider_arn): + for saml_provider in self.list_saml_providers(): + if saml_provider.arn == saml_provider_arn: + return saml_provider + raise IAMNotFoundException("SamlProvider {0} not found".format(saml_provider_arn)) + iam_backend = IAMBackend() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 22558f3f6..e5b4c9070 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -107,6 +107,69 @@ class IamResponse(BaseResponse): template = self.response_template(LIST_POLICIES_TEMPLATE) return template.render(policies=policies, marker=marker) + def list_entities_for_policy(self): + policy_arn = self._get_param('PolicyArn') + + # Options 'User'|'Role'|'Group'|'LocalManagedPolicy'|'AWSManagedPolicy + entity = self._get_param('EntityFilter') + path_prefix = self._get_param('PathPrefix') + # policy_usage_filter = self._get_param('PolicyUsageFilter') + marker = self._get_param('Marker') + max_items = self._get_param('MaxItems') + + entity_roles = [] + entity_groups = [] + entity_users = [] + + if entity == 'User': + users = iam_backend.list_users(path_prefix, marker, max_items) + if users: + for user in users: + for p in user.managed_policies: + if p == policy_arn: + entity_users.append(user.name) + + elif entity == 'Role': + roles = iam_backend.list_roles(path_prefix, marker, max_items) + if roles: + for role in roles: + for p in role.managed_policies: + if p == policy_arn: + entity_roles.append(role.name) + + elif entity == 'Group': + groups = iam_backend.list_groups() + if groups: + for group in groups: + for p in group.managed_policies: + if p == policy_arn: + entity_groups.append(group.name) + + elif entity == 'LocalManagedPolicy' or entity == 'AWSManagedPolicy': + users = iam_backend.list_users(path_prefix, marker, max_items) + if users: + for user in users: + for p in user.managed_policies: + if p == policy_arn: + entity_users.append(user.name) + + roles = iam_backend.list_roles(path_prefix, marker, max_items) + if roles: + for role in roles: + for p in role.managed_policies: + if p == policy_arn: + entity_roles.append(role.name) + + groups = iam_backend.list_groups() + if groups: + for group in groups: + for p in group.managed_policies: + if p == policy_arn: + entity_groups.append(group.name) + + template = self.response_template(LIST_ENTITIES_FOR_POLICY_TEMPLATE) + return template.render(roles=entity_roles, users=entity_users, groups=entity_groups) + def create_role(self): role_name = self._get_param('RoleName') path = self._get_param('Path') @@ -169,6 +232,20 @@ class IamResponse(BaseResponse): template = self.response_template(GENERIC_EMPTY_TEMPLATE) return template.render(name="UpdateAssumeRolePolicyResponse") + def update_role_description(self): + role_name = self._get_param('RoleName') + description = self._get_param('Description') + role = iam_backend.update_role_description(role_name, description) + template = self.response_template(UPDATE_ROLE_DESCRIPTION_TEMPLATE) + return template.render(role=role) + + def update_role(self): + role_name = self._get_param('RoleName') + description = self._get_param('Description') + role = iam_backend.update_role(role_name, description) + template = self.response_template(UPDATE_ROLE_TEMPLATE) + return template.render(role=role) + def create_policy_version(self): policy_arn = self._get_param('PolicyArn') policy_document = self._get_param('PolicyDocument') @@ -201,7 +278,7 @@ class IamResponse(BaseResponse): def create_instance_profile(self): profile_name = self._get_param('InstanceProfileName') - path = self._get_param('Path') + path = self._get_param('Path', '/') profile = iam_backend.create_instance_profile( profile_name, path, role_ids=[]) @@ -363,6 +440,18 @@ class IamResponse(BaseResponse): template = self.response_template(LIST_USERS_TEMPLATE) return template.render(action='List', users=users) + def update_user(self): + user_name = self._get_param('UserName') + new_path = self._get_param('NewPath') + new_user_name = self._get_param('NewUserName') + iam_backend.update_user(user_name, new_path, new_user_name) + if new_user_name: + user = iam_backend.get_user(new_user_name) + else: + user = iam_backend.get_user(user_name) + template = self.response_template(USER_TEMPLATE) + return template.render(action='Update', user=user) + def create_login_profile(self): user_name = self._get_param('UserName') password = self._get_param('Password') @@ -454,9 +543,14 @@ class IamResponse(BaseResponse): template = self.response_template(GENERIC_EMPTY_TEMPLATE) return template.render(name='UpdateAccessKey') + def get_access_key_last_used(self): + access_key_id = self._get_param('AccessKeyId') + last_used_response = iam_backend.get_access_key_last_used(access_key_id) + template = self.response_template(GET_ACCESS_KEY_LAST_USED_TEMPLATE) + return template.render(user_name=last_used_response["user_name"], last_used=last_used_response["last_used"]) + def list_access_keys(self): user_name = self._get_param('UserName') - keys = iam_backend.get_all_access_keys(user_name) template = self.response_template(LIST_ACCESS_KEYS_TEMPLATE) return template.render(user_name=user_name, keys=keys) @@ -549,9 +643,137 @@ class IamResponse(BaseResponse): policies=account_details['managed_policies'], users=account_details['users'], groups=account_details['groups'], - roles=account_details['roles'] + roles=account_details['roles'], + get_groups_for_user=iam_backend.get_groups_for_user ) + def create_saml_provider(self): + saml_provider_name = self._get_param('Name') + saml_metadata_document = self._get_param('SAMLMetadataDocument') + saml_provider = iam_backend.create_saml_provider(saml_provider_name, saml_metadata_document) + + template = self.response_template(CREATE_SAML_PROVIDER_TEMPLATE) + return template.render(saml_provider=saml_provider) + + def update_saml_provider(self): + saml_provider_arn = self._get_param('SAMLProviderArn') + saml_metadata_document = self._get_param('SAMLMetadataDocument') + saml_provider = iam_backend.update_saml_provider(saml_provider_arn, saml_metadata_document) + + template = self.response_template(UPDATE_SAML_PROVIDER_TEMPLATE) + return template.render(saml_provider=saml_provider) + + def delete_saml_provider(self): + saml_provider_arn = self._get_param('SAMLProviderArn') + iam_backend.delete_saml_provider(saml_provider_arn) + + template = self.response_template(DELETE_SAML_PROVIDER_TEMPLATE) + return template.render() + + def list_saml_providers(self): + saml_providers = iam_backend.list_saml_providers() + + template = self.response_template(LIST_SAML_PROVIDERS_TEMPLATE) + return template.render(saml_providers=saml_providers) + + def get_saml_provider(self): + saml_provider_arn = self._get_param('SAMLProviderArn') + saml_provider = iam_backend.get_saml_provider(saml_provider_arn) + + template = self.response_template(GET_SAML_PROVIDER_TEMPLATE) + return template.render(saml_provider=saml_provider) + + def upload_signing_certificate(self): + user_name = self._get_param('UserName') + cert_body = self._get_param('CertificateBody') + + cert = iam_backend.upload_signing_certificate(user_name, cert_body) + template = self.response_template(UPLOAD_SIGNING_CERTIFICATE_TEMPLATE) + return template.render(cert=cert) + + def update_signing_certificate(self): + user_name = self._get_param('UserName') + cert_id = self._get_param('CertificateId') + status = self._get_param('Status') + + iam_backend.update_signing_certificate(user_name, cert_id, status) + template = self.response_template(UPDATE_SIGNING_CERTIFICATE_TEMPLATE) + return template.render() + + def delete_signing_certificate(self): + user_name = self._get_param('UserName') + cert_id = self._get_param('CertificateId') + + iam_backend.delete_signing_certificate(user_name, cert_id) + template = self.response_template(DELETE_SIGNING_CERTIFICATE_TEMPLATE) + return template.render() + + def list_signing_certificates(self): + user_name = self._get_param('UserName') + + certs = iam_backend.list_signing_certificates(user_name) + template = self.response_template(LIST_SIGNING_CERTIFICATES_TEMPLATE) + return template.render(user_name=user_name, certificates=certs) + + def list_role_tags(self): + role_name = self._get_param('RoleName') + marker = self._get_param('Marker') + max_items = self._get_param('MaxItems', 100) + + tags, marker = iam_backend.list_role_tags(role_name, marker, max_items) + + template = self.response_template(LIST_ROLE_TAG_TEMPLATE) + return template.render(tags=tags, marker=marker) + + def tag_role(self): + role_name = self._get_param('RoleName') + tags = self._get_multi_param('Tags.member') + + iam_backend.tag_role(role_name, tags) + + template = self.response_template(TAG_ROLE_TEMPLATE) + return template.render() + + def untag_role(self): + role_name = self._get_param('RoleName') + tag_keys = self._get_multi_param('TagKeys.member') + + iam_backend.untag_role(role_name, tag_keys) + + template = self.response_template(UNTAG_ROLE_TEMPLATE) + return template.render() + + +LIST_ENTITIES_FOR_POLICY_TEMPLATE = """ + + + {% for role in roles %} + + {{ role }} + + {% endfor %} + + + {% for group in groups %} + + {{ group }} + + {% endfor %} + + false + + {% for user in users %} + + {{ user }} + + {% endfor %} + + + + eb358e22-9d1f-11e4-93eb-190ecEXAMPLE + +""" + ATTACH_ROLE_POLICY_TEMPLATE = """ @@ -734,7 +956,7 @@ CREATE_INSTANCE_PROFILE_TEMPLATE = """ + + + + df37e965-9967-11e1-a4c3-270EXAMPLE04 + +""" + +UPDATE_ROLE_DESCRIPTION_TEMPLATE = """ + + + {{ role.path }} + {{ role.arn }} + {{ role.name }} + {{ role.assume_role_policy_document }} + {{ role.create_date.isoformat() }} + {{ role.id }} + {% if role.tags %} + + {% for tag in role.get_tags() %} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {% endfor %} + + {% endif %} + + + + df37e965-9967-11e1-a4c3-270EXAMPLE04 + +""" + GET_ROLE_TEMPLATE = """ @@ -803,8 +1059,18 @@ GET_ROLE_TEMPLATE = """ {{ policy }} {% endfor %} + false - false 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE @@ -1240,11 +1506,23 @@ LIST_ACCESS_KEYS_TEMPLATE = """ """ + +GET_ACCESS_KEY_LAST_USED_TEMPLATE = """ + + + {{ user_name }} + + {{ last_used }} + + + +""" + CREDENTIAL_REPORT_GENERATING = """ - STARTED - No report exists. Starting a new report generation task + STARTED + No report exists. Starting a new report generation task fa788a82-aa8a-11e4-a278-1786c418872b" @@ -1253,7 +1531,7 @@ CREDENTIAL_REPORT_GENERATING = """ CREDENTIAL_REPORT_GENERATED = """ - COMPLETE + COMPLETE fa788a82-aa8a-11e4-a278-1786c418872b" @@ -1262,7 +1540,7 @@ CREDENTIAL_REPORT_GENERATED = """ CREDENTIAL_REPORT = """ - {{ report }} + {{ report }} 2015-02-02T20:02:02Z text/csv @@ -1277,23 +1555,23 @@ LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE = """ {% for profile in instance_profiles %} - {{ profile.id }} - - {% for role in profile.roles %} - - {{ role.path }} - {{ role.arn }} - {{ role.name }} - {{ role.assume_policy_document }} - 2012-05-09T15:45:35Z - {{ role.id }} - - {% endfor %} - - {{ profile.name }} - {{ profile.path }} - {{ profile.arn }} - 2012-05-09T16:27:11Z + {{ profile.id }} + + {% for role in profile.roles %} + + {{ role.path }} + {{ role.arn }} + {{ role.name }} + {{ role.assume_policy_document }} + {{ role.create_date }} + {{ role.id }} + + {% endfor %} + + {{ profile.name }} + {{ profile.path }} + {{ profile.arn }} + {{ profile.create_date }} {% endfor %} @@ -1376,13 +1654,24 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """ {% for user in users %} - - + + {% for group in get_groups_for_user(user.name) %} + {{ group.name }} + {% endfor %} + + + {% for policy in user.managed_policies %} + + {{ user.managed_policies[policy].name }} + {{ policy }} + + {% endfor %} + {{ user.id }} {{ user.path }} {{ user.name }} {{ user.arn }} - 2012-05-09T15:45:35Z + {{ user.created_iso_8601 }} {% endfor %} @@ -1391,53 +1680,75 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """ {{ group.id }} - {% for policy in group.managed_policies %} - - {{ policy.name }} - {{ policy.arn }} - + {% for policy_arn in group.managed_policies %} + + {{ group.managed_policies[policy_arn].name }} + {{ policy_arn }} + {% endfor %} {{ group.name }} {{ group.path }} {{ group.arn }} - 2012-05-09T16:27:11Z - + {{ group.create_date }} + + {% for policy in group.policies %} + + {{ policy }} + {{ group.get_policy(policy) }} + + {% endfor %} + {% endfor %} {% for role in roles %} - - - {% for policy in role.managed_policies %} + + {% for inline_policy in role.policies %} - {{ policy.name }} - {{ policy.arn }} + {{ inline_policy }} + {{ role.policies[inline_policy] }} + + {% endfor %} + + + {% for policy_arn in role.managed_policies %} + + {{ role.managed_policies[policy_arn].name }} + {{ policy_arn }} {% endfor %} + + {% for tag in role.get_tags() %} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {% endfor %} + {% for profile in instance_profiles %} - {{ profile.id }} - - {% for role in profile.roles %} - - {{ role.path }} - {{ role.arn }} - {{ role.name }} - {{ role.assume_role_policy_document }} - 2012-05-09T15:45:35Z - {{ role.id }} - - {% endfor %} - - {{ profile.name }} - {{ profile.path }} - {{ profile.arn }} - 2012-05-09T16:27:11Z + {{ profile.id }} + + {% for role in profile.roles %} + + {{ role.path }} + {{ role.arn }} + {{ role.name }} + {{ role.assume_role_policy_document }} + {{ role.create_date }} + {{ role.id }} + + {% endfor %} + + {{ profile.name }} + {{ profile.path }} + {{ profile.arn }} + {{ profile.create_date }} {% endfor %} @@ -1445,7 +1756,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ role.arn }} {{ role.name }} {{ role.assume_role_policy_document }} - 2014-07-30T17:09:20Z + {{ role.create_date }} {{ role.id }} {% endfor %} @@ -1458,25 +1769,20 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ policy.id }} {{ policy.path }} + {% for policy_version in policy.versions %} - - {"Version":"2012-10-17","Statement":{"Effect":"Allow", - "Action":["iam:CreatePolicy","iam:CreatePolicyVersion", - "iam:DeletePolicy","iam:DeletePolicyVersion","iam:GetPolicy", - "iam:GetPolicyVersion","iam:ListPolicies", - "iam:ListPolicyVersions","iam:SetDefaultPolicyVersion"], - "Resource":"*"}} - - true - v1 - 2012-05-09T16:27:11Z + {{ policy_version.document }} + {{ policy_version.is_default }} + {{ policy_version.version_id }} + {{ policy_version.create_datetime }} + {% endfor %} {{ policy.arn }} 1 - 2012-05-09T16:27:11Z + {{ policy.create_datetime }} true - 2012-05-09T16:27:11Z + {{ policy.update_datetime }} {% endfor %} @@ -1485,3 +1791,139 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """92e79ae7-7399-11e4-8c85-4b53eEXAMPLE """ + +CREATE_SAML_PROVIDER_TEMPLATE = """ + + {{ saml_provider.arn }} + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + +""" + +LIST_SAML_PROVIDERS_TEMPLATE = """ + + + {% for saml_provider in saml_providers %} + + {{ saml_provider.arn }} + 2032-05-09T16:27:11Z + 2012-05-09T16:27:03Z + + {% endfor %} + + + + fd74fa8d-99f3-11e1-a4c3-27EXAMPLE804 + +""" + +GET_SAML_PROVIDER_TEMPLATE = """ + + 2012-05-09T16:27:11Z + 2015-12-31T21:59:59Z + {{ saml_provider.saml_metadata_document }} + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + +""" + +DELETE_SAML_PROVIDER_TEMPLATE = """ + + c749ee7f-99ef-11e1-a4c3-27EXAMPLE804 + +""" + +UPDATE_SAML_PROVIDER_TEMPLATE = """ + + {{ saml_provider.arn }} + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + +""" + +UPLOAD_SIGNING_CERTIFICATE_TEMPLATE = """ + + + {{ cert.user_name }} + {{ cert.id }} + {{ cert.body }} + {{ cert.status }} + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + + +UPDATE_SIGNING_CERTIFICATE_TEMPLATE = """ + + EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE + +""" + + +DELETE_SIGNING_CERTIFICATE_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + + +LIST_SIGNING_CERTIFICATES_TEMPLATE = """ + + {{ user_name }} + + {% for cert in certificates %} + + {{ user_name }} + {{ cert.id }} + {{ cert.body }} + {{ cert.status }} + + {% endfor %} + + false + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + + +TAG_ROLE_TEMPLATE = """ + + EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE + +""" + + +LIST_ROLE_TAG_TEMPLATE = """ + + {{ 'true' if marker else 'false' }} + {% if marker %} + {{ marker }} + {% endif %} + + {% for tag in tags %} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {% endfor %} + + + + EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE + +""" + + +UNTAG_ROLE_TEMPLATE = """ + + EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE + +""" diff --git a/moto/iam/utils.py b/moto/iam/utils.py index 1fae85a6c..f59bdfffe 100644 --- a/moto/iam/utils.py +++ b/moto/iam/utils.py @@ -12,8 +12,7 @@ def random_alphanumeric(length): ) -def random_resource_id(): - size = 20 +def random_resource_id(size=20): chars = list(range(10)) + list(string.ascii_lowercase) return ''.join(six.text_type(random.choice(chars)) for x in range(size)) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 7bbdb706d..3af3751d9 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -31,3 +31,20 @@ class VersionConflictException(IoTClientError): 'VersionConflictException', 'The version for thing %s does not match the expected version.' % name ) + + +class CertificateStateException(IoTClientError): + def __init__(self, msg, cert_id): + self.code = 406 + super(CertificateStateException, self).__init__( + 'CertificateStateException', + '%s Id: %s' % (msg, cert_id) + ) + + +class DeleteConflictException(IoTClientError): + def __init__(self, msg): + self.code = 409 + super(DeleteConflictException, self).__init__( + 'DeleteConflictException', msg + ) diff --git a/moto/iot/models.py b/moto/iot/models.py index c36bb985f..b493f6b8d 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -13,6 +13,8 @@ import boto3 from moto.core import BaseBackend, BaseModel from .exceptions import ( + CertificateStateException, + DeleteConflictException, ResourceNotFoundException, InvalidRequestException, VersionConflictException @@ -378,7 +380,25 @@ class IoTBackend(BaseBackend): return certificate, key_pair def delete_certificate(self, certificate_id): - self.describe_certificate(certificate_id) + cert = self.describe_certificate(certificate_id) + if cert.status == 'ACTIVE': + raise CertificateStateException( + 'Certificate must be deactivated (not ACTIVE) before deletion.', certificate_id) + + certs = [k[0] for k, v in self.principal_things.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Things must be detached before deletion (arn: %s)' % certs[0] + ) + + certs = [k[0] for k, v in self.principal_policies.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Certificate policies must be detached before deletion (arn: %s)' % certs[0] + ) + del self.certificates[certificate_id] def describe_certificate(self, certificate_id): @@ -411,6 +431,14 @@ class IoTBackend(BaseBackend): return policies[0] def delete_policy(self, policy_name): + + policies = [k[1] for k, v in self.principal_policies.items() if k[1] == policy_name] + if len(policies) > 0: + raise DeleteConflictException( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' + % policy_name + ) + policy = self.get_policy(policy_name) del self.policies[policy.name] @@ -429,6 +457,14 @@ class IoTBackend(BaseBackend): pass raise ResourceNotFoundException() + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + def attach_principal_policy(self, policy_name, principal_arn): principal = self._get_principal(principal_arn) policy = self.get_policy(policy_name) @@ -437,6 +473,15 @@ class IoTBackend(BaseBackend): return self.principal_policies[k] = (principal, policy) + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + def detach_principal_policy(self, policy_name, principal_arn): # this may raises ResourceNotFoundException self._get_principal(principal_arn) diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 006c4c4cc..214576f52 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -224,6 +224,15 @@ class IoTResponse(BaseResponse): ) return json.dumps(dict()) + def attach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.attach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + def attach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get('x-amzn-iot-principal') @@ -233,6 +242,15 @@ class IoTResponse(BaseResponse): ) return json.dumps(dict()) + def detach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + def detach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get('x-amzn-iot-principal') diff --git a/moto/kms/exceptions.py b/moto/kms/exceptions.py new file mode 100644 index 000000000..70edd3dcd --- /dev/null +++ b/moto/kms/exceptions.py @@ -0,0 +1,36 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class NotFoundException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(NotFoundException, self).__init__( + "NotFoundException", message) + + +class ValidationException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ValidationException, self).__init__( + "ValidationException", message) + + +class AlreadyExistsException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(AlreadyExistsException, self).__init__( + "AlreadyExistsException", message) + + +class NotAuthorizedException(JsonRESTError): + code = 400 + + def __init__(self): + super(NotAuthorizedException, self).__init__( + "NotAuthorizedException", None) + + self.description = '{"__type":"NotAuthorizedException"}' diff --git a/moto/kms/models.py b/moto/kms/models.py index bb39d1b24..b49e9dd09 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import os import boto.kms from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_without_milliseconds @@ -21,6 +22,7 @@ class Key(BaseModel): self.account_id = "0123456789012" self.key_rotation_status = False self.deletion_date = None + self.tags = {} @property def physical_resource_id(self): @@ -35,7 +37,7 @@ class Key(BaseModel): "KeyMetadata": { "AWSAccountId": self.account_id, "Arn": self.arn, - "CreationDate": "2015-01-01 00:00:00", + "CreationDate": datetime.strftime(datetime.utcnow(), "%s"), "Description": self.description, "Enabled": self.enabled, "KeyId": self.id, @@ -63,7 +65,6 @@ class Key(BaseModel): ) key.key_rotation_status = properties['EnableKeyRotation'] key.enabled = properties['Enabled'] - return key def get_cfn_attribute(self, attribute_name): @@ -84,6 +85,18 @@ class KmsBackend(BaseBackend): self.keys[key.id] = key return key + def update_key_description(self, key_id, description): + key = self.keys[self.get_key_id(key_id)] + key.description = description + + def tag_resource(self, key_id, tags): + key = self.keys[self.get_key_id(key_id)] + key.tags = tags + + def list_resource_tags(self, key_id): + key = self.keys[self.get_key_id(key_id)] + return key.tags + def delete_key(self, key_id): if key_id in self.keys: if key_id in self.key_to_aliases: @@ -147,27 +160,38 @@ class KmsBackend(BaseBackend): return self.keys[self.get_key_id(key_id)].policy def disable_key(self, key_id): - if key_id in self.keys: - self.keys[key_id].enabled = False - self.keys[key_id].key_state = 'Disabled' + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'Disabled' def enable_key(self, key_id): - if key_id in self.keys: - self.keys[key_id].enabled = True - self.keys[key_id].key_state = 'Enabled' + self.keys[key_id].enabled = True + self.keys[key_id].key_state = 'Enabled' def cancel_key_deletion(self, key_id): - if key_id in self.keys: - self.keys[key_id].key_state = 'Disabled' - self.keys[key_id].deletion_date = None + self.keys[key_id].key_state = 'Disabled' + self.keys[key_id].deletion_date = None def schedule_key_deletion(self, key_id, pending_window_in_days): - if key_id in self.keys: - if 7 <= pending_window_in_days <= 30: - self.keys[key_id].enabled = False - self.keys[key_id].key_state = 'PendingDeletion' - self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days) - return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date) + if 7 <= pending_window_in_days <= 30: + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'PendingDeletion' + self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days) + return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date) + + def generate_data_key(self, key_id, encryption_context, number_of_bytes, key_spec, grant_tokens): + key = self.keys[self.get_key_id(key_id)] + + if key_spec: + if key_spec == 'AES_128': + bytes = 16 + else: + bytes = 32 + else: + bytes = number_of_bytes + + plaintext = os.urandom(bytes) + + return plaintext, key.arn kms_backends = {} diff --git a/moto/kms/responses.py b/moto/kms/responses.py index 5883f51ec..92195ed6b 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -5,11 +5,9 @@ import json import re import six -from boto.exception import JSONResponseError -from boto.kms.exceptions import AlreadyExistsException, NotFoundException - from moto.core.responses import BaseResponse from .models import kms_backends +from .exceptions import NotFoundException, ValidationException, AlreadyExistsException, NotAuthorizedException reserved_aliases = [ 'alias/aws/ebs', @@ -38,6 +36,28 @@ class KmsResponse(BaseResponse): policy, key_usage, description, self.region) return json.dumps(key.to_dict()) + def update_key_description(self): + key_id = self.parameters.get('KeyId') + description = self.parameters.get('Description') + + self.kms_backend.update_key_description(key_id, description) + return json.dumps(None) + + def tag_resource(self): + key_id = self.parameters.get('KeyId') + tags = self.parameters.get('Tags') + self.kms_backend.tag_resource(key_id, tags) + return json.dumps({}) + + def list_resource_tags(self): + key_id = self.parameters.get('KeyId') + tags = self.kms_backend.list_resource_tags(key_id) + return json.dumps({ + "Tags": tags, + "NextMarker": None, + "Truncated": False, + }) + def describe_key(self): key_id = self.parameters.get('KeyId') try: @@ -66,36 +86,28 @@ class KmsResponse(BaseResponse): def create_alias(self): alias_name = self.parameters['AliasName'] target_key_id = self.parameters['TargetKeyId'] - region = self.region if not alias_name.startswith('alias/'): - raise JSONResponseError(400, 'Bad Request', - body={'message': 'Invalid identifier', '__type': 'ValidationException'}) + raise ValidationException('Invalid identifier') if alias_name in reserved_aliases: - raise JSONResponseError(400, 'Bad Request', body={ - '__type': 'NotAuthorizedException'}) + raise NotAuthorizedException() if ':' in alias_name: - raise JSONResponseError(400, 'Bad Request', body={ - 'message': '{alias_name} contains invalid characters for an alias'.format(**locals()), - '__type': 'ValidationException'}) + raise ValidationException('{alias_name} contains invalid characters for an alias'.format(alias_name=alias_name)) if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name): - raise JSONResponseError(400, 'Bad Request', body={ - 'message': "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$" - .format(**locals()), - '__type': 'ValidationException'}) + raise ValidationException("1 validation error detected: Value '{alias_name}' at 'aliasName' " + "failed to satisfy constraint: Member must satisfy regular " + "expression pattern: ^[a-zA-Z0-9:/_-]+$" + .format(alias_name=alias_name)) if self.kms_backend.alias_exists(target_key_id): - raise JSONResponseError(400, 'Bad Request', body={ - 'message': 'Aliases must refer to keys. Not aliases', - '__type': 'ValidationException'}) + raise ValidationException('Aliases must refer to keys. Not aliases') if self.kms_backend.alias_exists(alias_name): - raise AlreadyExistsException(400, 'Bad Request', body={ - 'message': 'An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} already exists' - .format(**locals()), '__type': 'AlreadyExistsException'}) + raise AlreadyExistsException('An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} ' + 'already exists'.format(region=self.region, alias_name=alias_name)) self.kms_backend.add_alias(target_key_id, alias_name) @@ -103,16 +115,13 @@ class KmsResponse(BaseResponse): def delete_alias(self): alias_name = self.parameters['AliasName'] - region = self.region if not alias_name.startswith('alias/'): - raise JSONResponseError(400, 'Bad Request', - body={'message': 'Invalid identifier', '__type': 'ValidationException'}) + raise ValidationException('Invalid identifier') if not self.kms_backend.alias_exists(alias_name): - raise NotFoundException(400, 'Bad Request', body={ - 'message': 'Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(**locals()), - '__type': 'NotFoundException'}) + raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:' + '{alias_name} is not found.'.format(region=self.region, alias_name=alias_name)) self.kms_backend.delete_alias(alias_name) @@ -150,9 +159,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.enable_key_rotation(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) @@ -162,9 +170,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.disable_key_rotation(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) def get_key_rotation_status(self): @@ -173,9 +180,8 @@ class KmsResponse(BaseResponse): try: rotation_enabled = self.kms_backend.get_key_rotation_status(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps({'KeyRotationEnabled': rotation_enabled}) def put_key_policy(self): @@ -188,9 +194,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.put_key_policy(key_id, policy) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) @@ -203,9 +208,8 @@ class KmsResponse(BaseResponse): try: return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)}) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) def list_key_policies(self): key_id = self.parameters.get('KeyId') @@ -213,9 +217,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.describe_key(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps({'Truncated': False, 'PolicyNames': ['default']}) @@ -227,11 +230,17 @@ class KmsResponse(BaseResponse): value = self.parameters.get("Plaintext") if isinstance(value, six.text_type): value = value.encode('utf-8') - return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8")}) + return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'}) def decrypt(self): + # TODO refuse decode if EncryptionContext is not the same as when it was encrypted / generated + value = self.parameters.get("CiphertextBlob") - return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) + try: + return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) + except UnicodeDecodeError: + # Generate data key will produce random bytes which when decrypted is still returned as base64 + return json.dumps({"Plaintext": value}) def disable_key(self): key_id = self.parameters.get('KeyId') @@ -239,9 +248,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.disable_key(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) def enable_key(self): @@ -250,9 +258,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.enable_key(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) def cancel_key_deletion(self): @@ -261,9 +268,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.cancel_key_deletion(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps({'KeyId': key_id}) def schedule_key_deletion(self): @@ -279,19 +285,62 @@ class KmsResponse(BaseResponse): 'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days) }) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) + + def generate_data_key(self): + key_id = self.parameters.get('KeyId') + encryption_context = self.parameters.get('EncryptionContext') + number_of_bytes = self.parameters.get('NumberOfBytes') + key_spec = self.parameters.get('KeySpec') + grant_tokens = self.parameters.get('GrantTokens') + + # Param validation + if key_id.startswith('alias'): + if self.kms_backend.get_key_id_from_alias(key_id) is None: + raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format( + region=self.region, alias_name=key_id)) + else: + if self.kms_backend.get_key_id(key_id) not in self.kms_backend.keys: + raise NotFoundException('Invalid keyId') + + if number_of_bytes and (number_of_bytes > 1024 or number_of_bytes < 0): + raise ValidationException("1 validation error detected: Value '2048' at 'numberOfBytes' failed " + "to satisfy constraint: Member must have value less than or " + "equal to 1024") + + if key_spec and key_spec not in ('AES_256', 'AES_128'): + raise ValidationException("1 validation error detected: Value 'AES_257' at 'keySpec' failed " + "to satisfy constraint: Member must satisfy enum value set: " + "[AES_256, AES_128]") + if not key_spec and not number_of_bytes: + raise ValidationException("Please specify either number of bytes or key spec.") + if key_spec and number_of_bytes: + raise ValidationException("Please specify either number of bytes or key spec.") + + plaintext, key_arn = self.kms_backend.generate_data_key(key_id, encryption_context, + number_of_bytes, key_spec, grant_tokens) + + plaintext = base64.b64encode(plaintext).decode() + + return json.dumps({ + 'CiphertextBlob': plaintext, + 'Plaintext': plaintext, + 'KeyId': key_arn # not alias + }) + + def generate_data_key_without_plaintext(self): + result = json.loads(self.generate_data_key()) + del result['Plaintext'] + + return json.dumps(result) def _assert_valid_key_id(key_id): if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE): - raise JSONResponseError(404, 'Not Found', body={ - 'message': ' Invalid keyId', '__type': 'NotFoundException'}) + raise NotFoundException('Invalid keyId') def _assert_default_policy(policy_name): if policy_name != 'default': - raise JSONResponseError(404, 'Not Found', body={ - 'message': "No such policy exists", - '__type': 'NotFoundException'}) + raise NotFoundException("No such policy exists") diff --git a/moto/logs/models.py b/moto/logs/models.py index ca1fdc4ad..e105d4d14 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -242,7 +242,8 @@ class LogsBackend(BaseBackend): if next_token is None: next_token = 0 - groups = sorted(group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix)) + groups = [group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix)] + groups = sorted(groups, key=lambda x: x['creationTime'], reverse=True) groups_page = groups[next_token:next_token + limit] next_token += limit diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index 8ad9168a5..4eb92108f 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -1113,4 +1113,4 @@ def httprettified(test): if isinstance(test, ClassTypes): return decorate_class(test) - return decorate_callable(test) + return decorate_callable(test) \ No newline at end of file diff --git a/moto/packages/httpretty/http.py b/moto/packages/httpretty/http.py index 7e9a56885..ee1625905 100644 --- a/moto/packages/httpretty/http.py +++ b/moto/packages/httpretty/http.py @@ -29,7 +29,6 @@ import re from .compat import BaseClass from .utils import decode_utf8 - STATUSES = { 100: "Continue", 101: "Switching Protocols", diff --git a/moto/route53/models.py b/moto/route53/models.py index d483d22e2..3760d3817 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -24,7 +24,7 @@ class HealthCheck(BaseModel): self.id = health_check_id self.ip_address = health_check_args.get("ip_address") self.port = health_check_args.get("port", 80) - self._type = health_check_args.get("type") + self.type_ = health_check_args.get("type") self.resource_path = health_check_args.get("resource_path") self.fqdn = health_check_args.get("fqdn") self.search_string = health_check_args.get("search_string") @@ -58,7 +58,7 @@ class HealthCheck(BaseModel): {{ health_check.ip_address }} {{ health_check.port }} - {{ health_check._type }} + {{ health_check.type_ }} {{ health_check.resource_path }} {{ health_check.fqdn }} {{ health_check.request_interval }} @@ -76,7 +76,7 @@ class RecordSet(BaseModel): def __init__(self, kwargs): self.name = kwargs.get('Name') - self._type = kwargs.get('Type') + self.type_ = kwargs.get('Type') self.ttl = kwargs.get('TTL') self.records = kwargs.get('ResourceRecords', []) self.set_identifier = kwargs.get('SetIdentifier') @@ -130,7 +130,7 @@ class RecordSet(BaseModel): def to_xml(self): template = Template(""" {{ record_set.name }} - {{ record_set._type }} + {{ record_set.type_ }} {% if record_set.set_identifier %} {{ record_set.set_identifier }} {% endif %} @@ -183,7 +183,7 @@ class FakeZone(BaseModel): def upsert_rrset(self, record_set): new_rrset = RecordSet(record_set) for i, rrset in enumerate(self.rrsets): - if rrset.name == new_rrset.name: + if rrset.name == new_rrset.name and rrset.type_ == new_rrset.type_: self.rrsets[i] = new_rrset break else: @@ -202,7 +202,7 @@ class FakeZone(BaseModel): record_sets = list(self.rrsets) # Copy the list if start_type: record_sets = [ - record_set for record_set in record_sets if record_set._type >= start_type] + record_set for record_set in record_sets if record_set.type_ >= start_type] if start_name: record_sets = [ record_set for record_set in record_sets if record_set.name >= start_name] diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 6679e7945..98ffa4c47 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -123,6 +123,9 @@ class Route53(BaseResponse): """ % (record_set['Name'], the_zone.name) return 400, headers, error_msg + if not record_set['Name'].endswith('.'): + record_set['Name'] += '.' + if action in ('CREATE', 'UPSERT'): if 'ResourceRecords' in record_set: resource_records = list( diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 26515dfd2..27c842111 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -178,3 +178,24 @@ class InvalidStorageClass(S3ClientError): "InvalidStorageClass", "The storage class you specified is not valid", *args, **kwargs) + + +class InvalidBucketName(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidBucketName, self).__init__( + "InvalidBucketName", + "The specified bucket is not valid.", + *args, **kwargs + ) + + +class DuplicateTagKeys(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(DuplicateTagKeys, self).__init__( + "InvalidTag", + "Cannot provide multiple Tags with the same key", + *args, **kwargs) diff --git a/moto/s3/models.py b/moto/s3/models.py index bb4d7848c..9e4a6a766 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -8,19 +8,26 @@ import itertools import codecs import random import string +import tempfile +import sys +import uuid import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey, \ - InvalidNotificationDestination, MalformedXML, InvalidStorageClass +from .exceptions import BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, \ + EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, DuplicateTagKeys from .utils import clean_key_name, _VersionedKeyStore +MAX_BUCKET_NAME_LENGTH = 63 +MIN_BUCKET_NAME_LENGTH = 3 UPLOAD_ID_BYTES = 43 UPLOAD_PART_MIN_SIZE = 5242880 STORAGE_CLASS = ["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA"] +DEFAULT_KEY_BUFFER_SIZE = 16 * 1024 * 1024 +DEFAULT_TEXT_ENCODING = sys.getdefaultencoding() class FakeDeleteMarker(BaseModel): @@ -29,7 +36,7 @@ class FakeDeleteMarker(BaseModel): self.key = key self.name = key.name self.last_modified = datetime.datetime.utcnow() - self._version_id = key.version_id + 1 + self._version_id = str(uuid.uuid4()) @property def last_modified_ISO8601(self): @@ -42,9 +49,9 @@ class FakeDeleteMarker(BaseModel): class FakeKey(BaseModel): - def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0): + def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0, + max_buffer_size=DEFAULT_KEY_BUFFER_SIZE): self.name = name - self.value = value self.last_modified = datetime.datetime.utcnow() self.acl = get_canned_acl('private') self.website_redirect_location = None @@ -56,14 +63,37 @@ class FakeKey(BaseModel): self._is_versioned = is_versioned self._tagging = FakeTagging() + self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size) + self._max_buffer_size = max_buffer_size + self.value = value + @property def version_id(self): return self._version_id - def copy(self, new_name=None): + @property + def value(self): + self._value_buffer.seek(0) + return self._value_buffer.read() + + @value.setter + def value(self, new_value): + self._value_buffer.seek(0) + self._value_buffer.truncate() + + # Hack for working around moto's own unit tests; this probably won't + # actually get hit in normal use. + if isinstance(new_value, six.text_type): + new_value = new_value.encode(DEFAULT_TEXT_ENCODING) + self._value_buffer.write(new_value) + + def copy(self, new_name=None, new_is_versioned=None): r = copy.deepcopy(self) if new_name is not None: r.name = new_name + if new_is_versioned is not None: + r._is_versioned = new_is_versioned + r.refresh_version() return r def set_metadata(self, metadata, replace=False): @@ -83,29 +113,34 @@ class FakeKey(BaseModel): self.acl = acl def append_to_value(self, value): - self.value += value + self._value_buffer.seek(0, os.SEEK_END) + self._value_buffer.write(value) + self.last_modified = datetime.datetime.utcnow() self._etag = None # must recalculate etag if self._is_versioned: - self._version_id += 1 + self._version_id = str(uuid.uuid4()) else: - self._is_versioned = 0 + self._version_id = None def restore(self, days): self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days) - def increment_version(self): - self._version_id += 1 + def refresh_version(self): + self._version_id = str(uuid.uuid4()) + self.last_modified = datetime.datetime.utcnow() @property def etag(self): if self._etag is None: value_md5 = hashlib.md5() - if isinstance(self.value, six.text_type): - value = self.value.encode("utf-8") - else: - value = self.value - value_md5.update(value) + self._value_buffer.seek(0) + while True: + block = self._value_buffer.read(DEFAULT_KEY_BUFFER_SIZE) + if not block: + break + value_md5.update(block) + self._etag = value_md5.hexdigest() return '"{0}"'.format(self._etag) @@ -132,7 +167,7 @@ class FakeKey(BaseModel): res = { 'ETag': self.etag, 'last-modified': self.last_modified_RFC1123, - 'content-length': str(len(self.value)), + 'content-length': str(self.size), } if self._storage_class != 'STANDARD': res['x-amz-storage-class'] = self._storage_class @@ -150,7 +185,8 @@ class FakeKey(BaseModel): @property def size(self): - return len(self.value) + self._value_buffer.seek(0, os.SEEK_END) + return self._value_buffer.tell() @property def storage_class(self): @@ -161,6 +197,26 @@ class FakeKey(BaseModel): if self._expiry is not None: return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT") + # Keys need to be pickleable due to some implementation details of boto3. + # Since file objects aren't pickleable, we need to override the default + # behavior. The following is adapted from the Python docs: + # https://docs.python.org/3/library/pickle.html#handling-stateful-objects + def __getstate__(self): + state = self.__dict__.copy() + state['value'] = self.value + del state['_value_buffer'] + return state + + def __setstate__(self, state): + self.__dict__.update({ + k: v for k, v in six.iteritems(state) + if k != 'value' + }) + + self._value_buffer = \ + tempfile.SpooledTemporaryFile(max_size=self._max_buffer_size) + self.value = state['value'] + class FakeMultipart(BaseModel): @@ -634,6 +690,8 @@ class S3Backend(BaseBackend): def create_bucket(self, bucket_name, region_name): if bucket_name in self.buckets: raise BucketAlreadyExists(bucket=bucket_name) + if not MIN_BUCKET_NAME_LENGTH <= len(bucket_name) <= MAX_BUCKET_NAME_LENGTH: + raise InvalidBucketName() new_bucket = FakeBucket(name=bucket_name, region_name=region_name) self.buckets[bucket_name] = new_bucket return new_bucket @@ -663,17 +721,18 @@ class S3Backend(BaseBackend): def get_bucket_latest_versions(self, bucket_name): versions = self.get_bucket_versions(bucket_name) - maximum_version_per_key = {} + latest_modified_per_key = {} latest_versions = {} for version in versions: name = version.name + last_modified = version.last_modified version_id = version.version_id - maximum_version_per_key[name] = max( - version_id, - maximum_version_per_key.get(name, -1) + latest_modified_per_key[name] = max( + last_modified, + latest_modified_per_key.get(name, datetime.datetime.min) ) - if version_id == maximum_version_per_key[name]: + if last_modified == latest_modified_per_key[name]: latest_versions[name] = version_id return latest_versions @@ -721,20 +780,19 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) - old_key = bucket.keys.get(key_name, None) - if old_key is not None and bucket.is_versioned: - new_version_id = old_key._version_id + 1 - else: - new_version_id = 0 - new_key = FakeKey( name=key_name, value=value, storage=storage, etag=etag, is_versioned=bucket.is_versioned, - version_id=new_version_id) - bucket.keys[key_name] = new_key + version_id=str(uuid.uuid4()) if bucket.is_versioned else None) + + keys = [ + key for key in bucket.keys.getlist(key_name, []) + if key.version_id != new_key.version_id + ] + [new_key] + bucket.keys.setlist(key_name, keys) return new_key @@ -773,6 +831,9 @@ class S3Backend(BaseBackend): return key def put_bucket_tagging(self, bucket_name, tagging): + tag_keys = [tag.key for tag in tagging.tag_set.tags] + if len(tag_keys) != len(set(tag_keys)): + raise DuplicateTagKeys() bucket = self.get_bucket(bucket_name) bucket.set_tags(tagging) @@ -915,17 +976,15 @@ class S3Backend(BaseBackend): dest_bucket = self.get_bucket(dest_bucket_name) key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) - if dest_key_name != src_key_name: - key = key.copy(dest_key_name) - dest_bucket.keys[dest_key_name] = key - # By this point, the destination key must exist, or KeyError - if dest_bucket.is_versioned: - dest_bucket.keys[dest_key_name].increment_version() + new_key = key.copy(dest_key_name, dest_bucket.is_versioned) + if storage is not None: - key.set_storage_class(storage) + new_key.set_storage_class(storage) if acl is not None: - key.set_acl(acl) + new_key.set_acl(acl) + + dest_bucket.keys[dest_key_name] = new_key def set_bucket_acl(self, bucket_name, acl): bucket = self.get_bucket(bucket_name) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 13e5f87d9..856178941 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -19,7 +19,7 @@ from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, Missi MalformedACLError, InvalidNotificationARN, InvalidNotificationEvent from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \ FakeTag -from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url +from .utils import bucket_name_from_url, clean_key_name, metadata_from_headers, parse_region_from_url from xml.dom import minidom @@ -193,7 +193,13 @@ class ResponseObject(_TemplateEnvironmentMixin): elif 'location' in querystring: bucket = self.backend.get_bucket(bucket_name) template = self.response_template(S3_BUCKET_LOCATION) - return template.render(location=bucket.location) + + location = bucket.location + # us-east-1 is different - returns a None location + if location == DEFAULT_REGION_NAME: + location = None + + return template.render(location=location) elif 'lifecycle' in querystring: bucket = self.backend.get_bucket(bucket_name) if not bucket.rules: @@ -338,9 +344,15 @@ class ResponseObject(_TemplateEnvironmentMixin): if continuation_token or start_after: limit = continuation_token or start_after - result_keys = self._get_results_from_token(result_keys, limit) + if not delimiter: + result_keys = self._get_results_from_token(result_keys, limit) + else: + result_folders = self._get_results_from_token(result_folders, limit) - result_keys, is_truncated, next_continuation_token = self._truncate_result(result_keys, max_keys) + if not delimiter: + result_keys, is_truncated, next_continuation_token = self._truncate_result(result_keys, max_keys) + else: + result_folders, is_truncated, next_continuation_token = self._truncate_result(result_folders, max_keys) return template.render( bucket=bucket, @@ -358,7 +370,7 @@ class ResponseObject(_TemplateEnvironmentMixin): def _get_results_from_token(self, result_keys, token): continuation_index = 0 for key in result_keys: - if key.name > token: + if (key.name if isinstance(key, FakeKey) else key) > token: break continuation_index += 1 return result_keys[continuation_index:] @@ -367,7 +379,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if len(result_keys) > max_keys: is_truncated = 'true' result_keys = result_keys[:max_keys] - next_continuation_token = result_keys[-1].name + item = result_keys[-1] + next_continuation_token = (item.name if isinstance(item, FakeKey) else item) else: is_truncated = 'false' next_continuation_token = None @@ -432,8 +445,19 @@ class ResponseObject(_TemplateEnvironmentMixin): else: if body: + # us-east-1, the default AWS region behaves a bit differently + # - you should not use it as a location constraint --> it fails + # - querying the location constraint returns None try: - region_name = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint'] + forced_region = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint'] + + if forced_region == DEFAULT_REGION_NAME: + raise S3ClientError( + 'InvalidLocationConstraint', + 'The specified location-constraint is not valid' + ) + else: + region_name = forced_region except KeyError: pass @@ -709,7 +733,7 @@ class ResponseObject(_TemplateEnvironmentMixin): # Copy key # you can have a quoted ?version=abc with a version Id, so work on # we need to parse the unquoted string first - src_key = request.headers.get("x-amz-copy-source") + src_key = clean_key_name(request.headers.get("x-amz-copy-source")) if isinstance(src_key, six.binary_type): src_key = src_key.decode('utf-8') src_key_parsed = urlparse(src_key) @@ -1176,7 +1200,7 @@ S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """ """ S3_BUCKET_LOCATION = """ -{{ location }}""" +{% if location != None %}{{ location }}{% endif %}""" S3_BUCKET_LIFECYCLE_CONFIGURATION = """ @@ -1279,7 +1303,7 @@ S3_BUCKET_GET_VERSIONS = """ {% for key in key_list %} {{ key.name }} - {{ key.version_id }} + {% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %} {% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %} {{ key.last_modified_ISO8601 }} {{ key.etag }} diff --git a/moto/secretsmanager/exceptions.py b/moto/secretsmanager/exceptions.py index a72a32645..06010c411 100644 --- a/moto/secretsmanager/exceptions.py +++ b/moto/secretsmanager/exceptions.py @@ -27,3 +27,10 @@ class InvalidParameterException(SecretsManagerClientError): super(InvalidParameterException, self).__init__( 'InvalidParameterException', message) + + +class InvalidRequestException(SecretsManagerClientError): + def __init__(self, message): + super(InvalidRequestException, self).__init__( + 'InvalidRequestException', + message) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 1404a0ec8..44ac1ef47 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import time import json +import uuid +import datetime import boto3 @@ -9,6 +11,7 @@ from moto.core import BaseBackend, BaseModel from .exceptions import ( ResourceNotFoundException, InvalidParameterException, + InvalidRequestException, ClientError ) from .utils import random_password, secret_arn @@ -18,10 +21,6 @@ class SecretsManager(BaseModel): def __init__(self, region_name, **kwargs): self.region = region_name - self.secret_id = kwargs.get('secret_id', '') - self.version_id = kwargs.get('version_id', '') - self.version_stage = kwargs.get('version_stage', '') - self.secret_string = '' class SecretsManagerBackend(BaseBackend): @@ -29,14 +28,7 @@ class SecretsManagerBackend(BaseBackend): def __init__(self, region_name=None, **kwargs): super(SecretsManagerBackend, self).__init__() self.region = region_name - self.secret_id = kwargs.get('secret_id', '') - self.name = kwargs.get('name', '') - self.createdate = int(time.time()) - self.secret_string = '' - self.rotation_enabled = False - self.rotation_lambda_arn = '' - self.auto_rotate_after_days = 0 - self.version_id = '' + self.secrets = {} def reset(self): region_name = self.region @@ -44,36 +36,60 @@ class SecretsManagerBackend(BaseBackend): self.__init__(region_name) def _is_valid_identifier(self, identifier): - return identifier in (self.name, self.secret_id) + return identifier in self.secrets + + def _unix_time_secs(self, dt): + epoch = datetime.datetime.utcfromtimestamp(0) + return (dt - epoch).total_seconds() def get_secret_value(self, secret_id, version_id, version_stage): if not self._is_valid_identifier(secret_id): raise ResourceNotFoundException() + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + + secret = self.secrets[secret_id] + response = json.dumps({ - "ARN": secret_arn(self.region, self.secret_id), - "Name": self.name, - "VersionId": "A435958A-D821-4193-B719-B7769357AER4", - "SecretString": self.secret_string, + "ARN": secret_arn(self.region, secret['secret_id']), + "Name": secret['name'], + "VersionId": secret['version_id'], + "SecretString": secret['secret_string'], "VersionStages": [ "AWSCURRENT", ], - "CreatedDate": "2018-05-23 13:16:57.198000" + "CreatedDate": secret['createdate'] }) return response - def create_secret(self, name, secret_string, **kwargs): + def create_secret(self, name, secret_string, tags, **kwargs): - self.secret_string = secret_string - self.secret_id = name - self.name = name + generated_version_id = str(uuid.uuid4()) + + secret = { + 'secret_string': secret_string, + 'secret_id': name, + 'name': name, + 'createdate': int(time.time()), + 'rotation_enabled': False, + 'rotation_lambda_arn': '', + 'auto_rotate_after_days': 0, + 'version_id': generated_version_id, + 'tags': tags + } + + self.secrets[name] = secret response = json.dumps({ "ARN": secret_arn(self.region, name), - "Name": self.name, - "VersionId": "A435958A-D821-4193-B719-B7769357AER4", + "Name": name, + "VersionId": generated_version_id, }) return response @@ -82,26 +98,23 @@ class SecretsManagerBackend(BaseBackend): if not self._is_valid_identifier(secret_id): raise ResourceNotFoundException + secret = self.secrets[secret_id] + response = json.dumps({ - "ARN": secret_arn(self.region, self.secret_id), - "Name": self.name, + "ARN": secret_arn(self.region, secret['secret_id']), + "Name": secret['name'], "Description": "", "KmsKeyId": "", - "RotationEnabled": self.rotation_enabled, - "RotationLambdaARN": self.rotation_lambda_arn, + "RotationEnabled": secret['rotation_enabled'], + "RotationLambdaARN": secret['rotation_lambda_arn'], "RotationRules": { - "AutomaticallyAfterDays": self.auto_rotate_after_days + "AutomaticallyAfterDays": secret['auto_rotate_after_days'] }, "LastRotatedDate": None, "LastChangedDate": None, "LastAccessedDate": None, - "DeletedDate": None, - "Tags": [ - { - "Key": "", - "Value": "" - }, - ] + "DeletedDate": secret.get('deleted_date', None), + "Tags": secret['tags'] }) return response @@ -114,6 +127,12 @@ class SecretsManagerBackend(BaseBackend): if not self._is_valid_identifier(secret_id): raise ResourceNotFoundException + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + if client_request_token: token_length = len(client_request_token) if token_length < 32 or token_length > 64: @@ -141,17 +160,19 @@ class SecretsManagerBackend(BaseBackend): ) raise InvalidParameterException(msg) - self.version_id = client_request_token or '' - self.rotation_lambda_arn = rotation_lambda_arn or '' + secret = self.secrets[secret_id] + + secret['version_id'] = client_request_token or '' + secret['rotation_lambda_arn'] = rotation_lambda_arn or '' if rotation_rules: - self.auto_rotate_after_days = rotation_rules.get(rotation_days, 0) - if self.auto_rotate_after_days > 0: - self.rotation_enabled = True + secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0) + if secret['auto_rotate_after_days'] > 0: + secret['rotation_enabled'] = True response = json.dumps({ - "ARN": secret_arn(self.region, self.secret_id), - "Name": self.name, - "VersionId": self.version_id + "ARN": secret_arn(self.region, secret['secret_id']), + "Name": secret['name'], + "VersionId": secret['version_id'] }) return response @@ -185,6 +206,85 @@ class SecretsManagerBackend(BaseBackend): return response + def list_secrets(self, max_results, next_token): + # TODO implement pagination and limits + + secret_list = [{ + "ARN": secret_arn(self.region, secret['secret_id']), + "DeletedDate": secret.get('deleted_date', None), + "Description": "", + "KmsKeyId": "", + "LastAccessedDate": None, + "LastChangedDate": None, + "LastRotatedDate": None, + "Name": secret['name'], + "RotationEnabled": secret['rotation_enabled'], + "RotationLambdaARN": secret['rotation_lambda_arn'], + "RotationRules": { + "AutomaticallyAfterDays": secret['auto_rotate_after_days'] + }, + "SecretVersionsToStages": { + secret['version_id']: ["AWSCURRENT"] + }, + "Tags": secret['tags'] + } for secret in self.secrets.values()] + + return secret_list, None + + def delete_secret(self, secret_id, recovery_window_in_days, force_delete_without_recovery): + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + + if recovery_window_in_days and force_delete_without_recovery: + raise InvalidParameterException( + "An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \ + use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays." + ) + + if recovery_window_in_days and (recovery_window_in_days < 7 or recovery_window_in_days > 30): + raise InvalidParameterException( + "An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \ + RecoveryWindowInDays value must be between 7 and 30 days (inclusive)." + ) + + deletion_date = datetime.datetime.utcnow() + + if force_delete_without_recovery: + secret = self.secrets.pop(secret_id, None) + else: + deletion_date += datetime.timedelta(days=recovery_window_in_days or 30) + self.secrets[secret_id]['deleted_date'] = self._unix_time_secs(deletion_date) + secret = self.secrets.get(secret_id, None) + + if not secret: + raise ResourceNotFoundException + + arn = secret_arn(self.region, secret['secret_id']) + name = secret['name'] + + return arn, name, self._unix_time_secs(deletion_date) + + def restore_secret(self, secret_id): + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + self.secrets[secret_id].pop('deleted_date', None) + + secret = self.secrets[secret_id] + + arn = secret_arn(self.region, secret['secret_id']) + name = secret['name'] + + return arn, name + available_regions = ( boto3.session.Session().get_available_regions("secretsmanager") diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index b8b6872a8..0eb02e39b 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -4,6 +4,8 @@ from moto.core.responses import BaseResponse from .models import secretsmanager_backends +import json + class SecretsManagerResponse(BaseResponse): @@ -19,9 +21,11 @@ class SecretsManagerResponse(BaseResponse): def create_secret(self): name = self._get_param('Name') secret_string = self._get_param('SecretString') + tags = self._get_param('Tags', if_none=[]) return secretsmanager_backends[self.region].create_secret( name=name, - secret_string=secret_string + secret_string=secret_string, + tags=tags ) def get_random_password(self): @@ -62,3 +66,30 @@ class SecretsManagerResponse(BaseResponse): rotation_lambda_arn=rotation_lambda_arn, rotation_rules=rotation_rules ) + + def list_secrets(self): + max_results = self._get_int_param("MaxResults") + next_token = self._get_param("NextToken") + secret_list, next_token = secretsmanager_backends[self.region].list_secrets( + max_results=max_results, + next_token=next_token, + ) + return json.dumps(dict(SecretList=secret_list, NextToken=next_token)) + + def delete_secret(self): + secret_id = self._get_param("SecretId") + recovery_window_in_days = self._get_param("RecoveryWindowInDays") + force_delete_without_recovery = self._get_param("ForceDeleteWithoutRecovery") + arn, name, deletion_date = secretsmanager_backends[self.region].delete_secret( + secret_id=secret_id, + recovery_window_in_days=recovery_window_in_days, + force_delete_without_recovery=force_delete_without_recovery, + ) + return json.dumps(dict(ARN=arn, Name=name, DeletionDate=deletion_date)) + + def restore_secret(self): + secret_id = self._get_param("SecretId") + arn, name = secretsmanager_backends[self.region].restore_secret( + secret_id=secret_id, + ) + return json.dumps(dict(ARN=arn, Name=name)) diff --git a/moto/secretsmanager/utils.py b/moto/secretsmanager/utils.py index 2cb92020a..231fea296 100644 --- a/moto/secretsmanager/utils.py +++ b/moto/secretsmanager/utils.py @@ -52,8 +52,9 @@ def random_password(password_length, exclude_characters, exclude_numbers, def secret_arn(region, secret_id): - return "arn:aws:secretsmanager:{0}:1234567890:secret:{1}-rIjad".format( - region, secret_id) + id_string = ''.join(random.choice(string.ascii_letters) for _ in range(5)) + return "arn:aws:secretsmanager:{0}:1234567890:secret:{1}-{2}".format( + region, secret_id, id_string) def _exclude_characters(password, exclude_characters): diff --git a/moto/server.py b/moto/server.py index ba2470478..5ad02d383 100644 --- a/moto/server.py +++ b/moto/server.py @@ -80,10 +80,13 @@ class DomainDispatcherApplication(object): region = 'us-east-1' if service == 'dynamodb': - dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0] - # If Newer API version, use dynamodb2 - if dynamo_api_version > "20111205": - host = "dynamodb2" + if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'): + host = 'dynamodbstreams' + else: + dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0] + # If Newer API version, use dynamodb2 + if dynamo_api_version > "20111205": + host = "dynamodb2" else: host = "{service}.{region}.amazonaws.com".format( service=service, region=region) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index f3262a988..1404ded75 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -534,7 +534,7 @@ class SQSBackend(BaseBackend): break import time - time.sleep(0.001) + time.sleep(0.01) continue previous_result_count = len(result) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index b4f64b14e..5ddaf8849 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -420,7 +420,7 @@ CREATE_QUEUE_RESPONSE = """ {{ queue.visibility_timeout }} - {{ requestid }} + """ @@ -429,7 +429,7 @@ GET_QUEUE_URL_RESPONSE = """ {{ queue.url(request_url) }} - {{ requestid }} + """ @@ -440,13 +440,13 @@ LIST_QUEUES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_QUEUE_RESPONSE = """ - {{ requestid }} + """ @@ -460,13 +460,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ SET_QUEUE_ATTRIBUTE_RESPONSE = """ - {{ requestid }} + """ @@ -483,7 +483,7 @@ SEND_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -543,7 +543,7 @@ RECEIVE_MESSAGE_RESPONSE = """ {% endfor %} - {{ requestid }} + """ @@ -561,13 +561,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -580,13 +580,13 @@ DELETE_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ CHANGE_MESSAGE_VISIBILITY_RESPONSE = """ - {{ requestid }} + """ @@ -613,7 +613,7 @@ CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """ - {{ requestid }} + """ diff --git a/moto/ssm/models.py b/moto/ssm/models.py index f16a7d981..2f316a3ac 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -14,10 +14,12 @@ import itertools class Parameter(BaseModel): - def __init__(self, name, value, type, description, keyid, last_modified_date, version): + def __init__(self, name, value, type, description, allowed_pattern, keyid, + last_modified_date, version): self.name = name self.type = type self.description = description + self.allowed_pattern = allowed_pattern self.keyid = keyid self.last_modified_date = last_modified_date self.version = version @@ -58,6 +60,10 @@ class Parameter(BaseModel): if self.keyid: r['KeyId'] = self.keyid + + if self.allowed_pattern: + r['AllowedPattern'] = self.allowed_pattern + return r @@ -291,7 +297,8 @@ class SimpleSystemManagerBackend(BaseBackend): return self._parameters[name] return None - def put_parameter(self, name, description, value, type, keyid, overwrite): + def put_parameter(self, name, description, value, type, allowed_pattern, + keyid, overwrite): previous_parameter = self._parameters.get(name) version = 1 @@ -302,8 +309,8 @@ class SimpleSystemManagerBackend(BaseBackend): return last_modified_date = time.time() - self._parameters[name] = Parameter( - name, value, type, description, keyid, last_modified_date, version) + self._parameters[name] = Parameter(name, value, type, description, + allowed_pattern, keyid, last_modified_date, version) return version def add_tags_to_resource(self, resource_type, resource_id, tags): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index eb05e51b6..c47d4127a 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -160,11 +160,12 @@ class SimpleSystemManagerResponse(BaseResponse): description = self._get_param('Description') value = self._get_param('Value') type_ = self._get_param('Type') + allowed_pattern = self._get_param('AllowedPattern') keyid = self._get_param('KeyId') overwrite = self._get_param('Overwrite', False) result = self.ssm_backend.put_parameter( - name, description, value, type_, keyid, overwrite) + name, description, value, type_, allowed_pattern, keyid, overwrite) if result is None: error = { diff --git a/moto/ssm/urls.py b/moto/ssm/urls.py index d22866486..9ac327325 100644 --- a/moto/ssm/urls.py +++ b/moto/ssm/urls.py @@ -3,6 +3,7 @@ from .responses import SimpleSystemManagerResponse url_bases = [ "https?://ssm.(.+).amazonaws.com", + "https?://ssm.(.+).amazonaws.com.cn", ] url_paths = { diff --git a/setup.py b/setup.py index a1b8c5dae..7ee84cf6e 100755 --- a/setup.py +++ b/setup.py @@ -1,29 +1,44 @@ #!/usr/bin/env python from __future__ import unicode_literals +import codecs +import os +import re import setuptools from setuptools import setup, find_packages import sys +# Borrowed from pip at https://github.com/pypa/pip/blob/62c27dee45625e1b63d1e023b0656310f276e050/setup.py#L11-L15 +here = os.path.abspath(os.path.dirname(__file__)) + +def read(*parts): + # intentionally *not* adding an encoding option to open, See: + # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 + with codecs.open(os.path.join(here, *parts), 'r') as fp: + return fp.read() + + install_requires = [ - "Jinja2>=2.7.3", + "Jinja2>=2.10.1", "boto>=2.36.0", - "boto3>=1.6.16", - "botocore>=1.12.13", + "boto3>=1.9.86", + "botocore>=1.12.86", "cryptography>=2.3.0", "requests>=2.5", "xmltodict", "six>1.9", "werkzeug", - "pyaml", + "PyYAML", "pytz", "python-dateutil<3.0.0,>=2.1", - "python-jose<3.0.0", + "python-jose<4.0.0", "mock", "docker>=2.5.1", - "jsondiff==1.1.1", + "jsondiff==1.1.2", "aws-xray-sdk!=0.96,>=0.93", "responses>=0.9.0", + "idna<2.9,>=2.5", + "cfn-lint", ] extras_require = { @@ -40,9 +55,11 @@ else: setup( name='moto', - version='1.3.7', + version='1.3.8', description='A library that allows your python tests to easily' ' mock out the boto library', + long_description=read('README.md'), + long_description_content_type='text/markdown', author='Steve Pulec', author_email='spulec@gmail.com', url='https://github.com/spulec/moto', diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index f86ca2b81..d01f2dcb3 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -543,6 +543,7 @@ def test_describe_load_balancers(): ) response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + assert response['ResponseMetadata']['RequestId'] list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') @@ -710,6 +711,7 @@ def test_create_autoscaling_group_boto3(): 'PropagateAtLaunch': False }], VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=False, ) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) @@ -728,13 +730,48 @@ def test_describe_autoscaling_groups_boto3(): MaxSize=20, DesiredCapacity=5, VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, ) + response = client.describe_auto_scaling_groups( AutoScalingGroupNames=["test_asg"] ) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['AutoScalingGroups'][0][ - 'AutoScalingGroupName'].should.equal('test_asg') + group = response['AutoScalingGroups'][0] + group['AutoScalingGroupName'].should.equal('test_asg') + group['NewInstancesProtectedFromScaleIn'].should.equal(True) + group['Instances'][0]['ProtectedFromScaleIn'].should.equal(True) + + +@mock_autoscaling +def test_describe_autoscaling_instances_boto3(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, + ) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=["test_asg"] + ) + instance_ids = [ + instance['InstanceId'] + for instance in response['AutoScalingGroups'][0]['Instances'] + ] + + response = client.describe_auto_scaling_instances(InstanceIds=instance_ids) + for instance in response['AutoScalingInstances']: + instance['AutoScalingGroupName'].should.equal('test_asg') + instance['ProtectedFromScaleIn'].should.equal(True) @mock_autoscaling @@ -751,17 +788,21 @@ def test_update_autoscaling_group_boto3(): MaxSize=20, DesiredCapacity=5, VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, ) - response = client.update_auto_scaling_group( + _ = client.update_auto_scaling_group( AutoScalingGroupName='test_asg', MinSize=1, + NewInstancesProtectedFromScaleIn=False, ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=["test_asg"] ) - response['AutoScalingGroups'][0]['MinSize'].should.equal(1) + group = response['AutoScalingGroups'][0] + group['MinSize'].should.equal(1) + group['NewInstancesProtectedFromScaleIn'].should.equal(False) @mock_autoscaling @@ -992,9 +1033,7 @@ def test_attach_one_instance(): 'PropagateAtLaunch': True }], VPCZoneIdentifier=mocked_networking['subnet1'], - ) - response = client.describe_auto_scaling_groups( - AutoScalingGroupNames=['test_asg'] + NewInstancesProtectedFromScaleIn=True, ) ec2 = boto3.resource('ec2', 'us-east-1') @@ -1009,7 +1048,11 @@ def test_attach_one_instance(): response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] ) - response['AutoScalingGroups'][0]['Instances'].should.have.length_of(3) + instances = response['AutoScalingGroups'][0]['Instances'] + instances.should.have.length_of(3) + for instance in instances: + instance['ProtectedFromScaleIn'].should.equal(True) + @mock_autoscaling @mock_ec2 @@ -1100,3 +1143,111 @@ def test_suspend_processes(): launch_suspended = True assert launch_suspended is True + +@mock_autoscaling +def test_set_instance_protection(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=False, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + instance_ids = [ + instance['InstanceId'] + for instance in response['AutoScalingGroups'][0]['Instances'] + ] + protected = instance_ids[:3] + + _ = client.set_instance_protection( + AutoScalingGroupName='test_asg', + InstanceIds=protected, + ProtectedFromScaleIn=True, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + for instance in response['AutoScalingGroups'][0]['Instances']: + instance['ProtectedFromScaleIn'].should.equal( + instance['InstanceId'] in protected + ) + + +@mock_autoscaling +def test_set_desired_capacity_up_boto3(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, + ) + + _ = client.set_desired_capacity( + AutoScalingGroupName='test_asg', + DesiredCapacity=10, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + instances = response['AutoScalingGroups'][0]['Instances'] + instances.should.have.length_of(10) + for instance in instances: + instance['ProtectedFromScaleIn'].should.equal(True) + + +@mock_autoscaling +def test_set_desired_capacity_down_boto3(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + instance_ids = [ + instance['InstanceId'] + for instance in response['AutoScalingGroups'][0]['Instances'] + ] + unprotected, protected = instance_ids[:2], instance_ids[2:] + + _ = client.set_instance_protection( + AutoScalingGroupName='test_asg', + InstanceIds=unprotected, + ProtectedFromScaleIn=False, + ) + + _ = client.set_desired_capacity( + AutoScalingGroupName='test_asg', + DesiredCapacity=1, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + group = response['AutoScalingGroups'][0] + group['DesiredCapacity'].should.equal(1) + instance_ids = {instance['InstanceId'] for instance in group['Instances']} + set(protected).should.equal(instance_ids) + set(unprotected).should_not.be.within(instance_ids) # only unprotected killed diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 8ea9cc6fd..479aaaa8a 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -12,6 +12,8 @@ import sure # noqa from freezegun import freeze_time from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings +from nose.tools import assert_raises +from botocore.exceptions import ClientError _lambda_region = 'us-west-2' @@ -397,6 +399,11 @@ def test_get_function(): result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') result['Configuration']['Version'].should.equal('$LATEST') + # Test get function when can't find function name + with assert_raises(ClientError): + conn.get_function(FunctionName='junk', Qualifier='$LATEST') + + @mock_lambda @mock_s3 @@ -464,7 +471,8 @@ def test_publish(): function_list['Functions'].should.have.length_of(1) latest_arn = function_list['Functions'][0]['FunctionArn'] - conn.publish_version(FunctionName='testFunction') + res = conn.publish_version(FunctionName='testFunction') + assert res['ResponseMetadata']['HTTPStatusCode'] == 201 function_list = conn.list_functions() function_list['Functions'].should.have.length_of(2) @@ -819,3 +827,87 @@ def get_function_policy(): assert isinstance(response['Policy'], str) res = json.loads(response['Policy']) assert res['Statement'][0]['Action'] == 'lambda:InvokeFunction' + + +@mock_lambda +@mock_s3 +def test_list_versions_by_function(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + res = conn.publish_version(FunctionName='testFunction') + assert res['ResponseMetadata']['HTTPStatusCode'] == 201 + versions = conn.list_versions_by_function(FunctionName='testFunction') + + assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST' + + +@mock_lambda +@mock_s3 +def test_create_function_with_already_exists(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + assert response['FunctionName'] == 'testFunction' + + +@mock_lambda +@mock_s3 +def test_list_versions_by_function_for_nonexistent_function(): + conn = boto3.client('lambda', 'us-west-2') + versions = conn.list_versions_by_function(FunctionName='testFunction') + + assert len(versions['Versions']) == 0 diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index ec24cd911..310ac0b48 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -323,6 +323,54 @@ def test_create_job_queue(): resp.should.contain('jobQueues') len(resp['jobQueues']).should.equal(0) + # Create job queue which already exists + try: + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + + except ClientError as err: + err.response['Error']['Code'].should.equal('ClientException') + + + # Create job queue with incorrect state + try: + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue2', + state='JUNK', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + + except ClientError as err: + err.response['Error']['Code'].should.equal('ClientException') + + # Create job queue with no compute env + try: + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue3', + state='JUNK', + priority=123, + computeEnvironmentOrder=[ + + ] + ) + + except ClientError as err: + err.response['Error']['Code'].should.equal('ClientException') @mock_ec2 @mock_ecs @@ -397,6 +445,17 @@ def test_update_job_queue(): len(resp['jobQueues']).should.equal(1) resp['jobQueues'][0]['priority'].should.equal(5) + batch_client.update_job_queue( + jobQueue='test_job_queue', + priority=5 + ) + + resp = batch_client.describe_job_queues() + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(1) + resp['jobQueues'][0]['priority'].should.equal(5) + + @mock_ec2 @mock_ecs diff --git a/tests/test_cloudformation/fixtures/vpc_eni.py b/tests/test_cloudformation/fixtures/vpc_eni.py index ef9eb1d08..3f8eb2d03 100644 --- a/tests/test_cloudformation/fixtures/vpc_eni.py +++ b/tests/test_cloudformation/fixtures/vpc_eni.py @@ -29,6 +29,10 @@ template = { "NinjaENI": { "Description": "Elastic IP mapping to Auto-Scaling Group", "Value": {"Ref": "ENI"} + }, + "ENIIpAddress": { + "Description": "ENI's Private IP address", + "Value": {"Fn::GetAtt": ["ENI", "PrimaryPrivateIpAddress"]} } } } diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 801faf8a1..b7906632b 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -266,9 +266,9 @@ def test_delete_stack_by_name(): template_body=dummy_template_json, ) - conn.list_stacks().should.have.length_of(1) + conn.describe_stacks().should.have.length_of(1) conn.delete_stack("test_stack") - conn.list_stacks().should.have.length_of(0) + conn.describe_stacks().should.have.length_of(0) @mock_cloudformation_deprecated @@ -279,9 +279,9 @@ def test_delete_stack_by_id(): template_body=dummy_template_json, ) - conn.list_stacks().should.have.length_of(1) + conn.describe_stacks().should.have.length_of(1) conn.delete_stack(stack_id) - conn.list_stacks().should.have.length_of(0) + conn.describe_stacks().should.have.length_of(0) with assert_raises(BotoServerError): conn.describe_stacks("test_stack") @@ -296,9 +296,9 @@ def test_delete_stack_with_resource_missing_delete_attr(): template_body=dummy_template_json3, ) - conn.list_stacks().should.have.length_of(1) + conn.describe_stacks().should.have.length_of(1) conn.delete_stack("test_stack") - conn.list_stacks().should.have.length_of(0) + conn.describe_stacks().should.have.length_of(0) @mock_cloudformation_deprecated diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 9bfae6174..d05bc1b53 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -184,6 +184,423 @@ dummy_import_template_json = json.dumps(dummy_import_template) dummy_redrive_template_json = json.dumps(dummy_redrive_template) +@mock_cloudformation +def test_boto3_describe_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + use1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-east-1', + ) + + usw2_instance['StackInstance'].should.have.key('Region').which.should.equal('us-west-2') + usw2_instance['StackInstance'].should.have.key('Account').which.should.equal('123456789012') + use1_instance['StackInstance'].should.have.key('Region').which.should.equal('us-east-1') + use1_instance['StackInstance'].should.have.key('Account').which.should.equal('123456789012') + + +@mock_cloudformation +def test_boto3_list_stacksets_length(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_set( + StackSetName="test_stack_set2", + TemplateBody=dummy_template_yaml, + ) + stacksets = cf_conn.list_stack_sets() + stacksets.should.have.length_of(2) + + +@mock_cloudformation +def test_boto3_list_stacksets_contents(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + stacksets = cf_conn.list_stack_sets() + stacksets['Summaries'][0].should.have.key('StackSetName').which.should.equal('test_stack_set') + stacksets['Summaries'][0].should.have.key('Status').which.should.equal('ACTIVE') + + +@mock_cloudformation +def test_boto3_stop_stack_set_operation(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + list_operation = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set" + ) + list_operation['Summaries'][-1]['Status'].should.equal('STOPPED') + + +@mock_cloudformation +def test_boto3_describe_stack_set_operation(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + response = cf_conn.describe_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id, + ) + + response['StackSetOperation']['Status'].should.equal('STOPPED') + response['StackSetOperation']['Action'].should.equal('CREATE') + + +@mock_cloudformation +def test_boto3_list_stack_set_operation_results(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + response = cf_conn.list_stack_set_operation_results( + StackSetName="test_stack_set", + OperationId=operation_id, + ) + + response['Summaries'].should.have.length_of(3) + response['Summaries'][0].should.have.key('Account').which.should.equal('123456789012') + response['Summaries'][1].should.have.key('Status').which.should.equal('STOPPED') + + +@mock_cloudformation +def test_boto3_update_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'SomeParam', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'AnotherParam', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'SomeParam', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'AnotherParam', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + cf_conn.update_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-west-1', 'us-west-2'], + ParameterOverrides=param_overrides, + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + usw1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-1', + ) + use1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-east-1', + ) + + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + usw1_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw1_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw1_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw1_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + use1_instance['StackInstance']['ParameterOverrides'].should.be.empty + + +@mock_cloudformation +def test_boto3_delete_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + cf_conn.delete_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1'], + RetainStacks=False, + ) + + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'].should.have.length_of(1) + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'][0]['Region'].should.equal( + 'us-west-2') + + +@mock_cloudformation +def test_boto3_create_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'].should.have.length_of(2) + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'][0]['Account'].should.equal( + '123456789012') + + +@mock_cloudformation +def test_boto3_create_stack_instances_with_param_overrides(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ParameterOverrides=param_overrides, + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + +@mock_cloudformation +def test_update_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.update_stack_set( + StackSetName='test_stack_set', + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param_overrides, + ) + stackset = cf_conn.describe_stack_set(StackSetName='test_stack_set') + + stackset['StackSet']['Parameters'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + stackset['StackSet']['Parameters'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + stackset['StackSet']['Parameters'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + stackset['StackSet']['Parameters'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + + +@mock_cloudformation +def test_boto3_list_stack_set_operations(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + cf_conn.update_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + list_operation = cf_conn.list_stack_set_operations(StackSetName="test_stack_set") + list_operation['Summaries'].should.have.length_of(2) + list_operation['Summaries'][-1]['Action'].should.equal('UPDATE') + + +@mock_cloudformation +def test_boto3_delete_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.delete_stack_set(StackSetName='test_stack_set') + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['Status'].should.equal( + 'DELETED') + + +@mock_cloudformation +def test_boto3_create_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['TemplateBody'].should.equal( + dummy_template_json) + + +@mock_cloudformation +def test_boto3_create_stack_set_with_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml, + ) + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['TemplateBody'].should.equal( + dummy_template_yaml) + + +@mock_cloudformation +@mock_s3 +def test_create_stack_set_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + + cf_conn = boto3.client('cloudformation', region_name='us-west-1') + cf_conn.create_stack_set( + StackSetName='stack_from_url', + TemplateURL=key_url, + ) + cf_conn.describe_stack_set(StackSetName="stack_from_url")['StackSet']['TemplateBody'].should.equal( + dummy_template_json) + + +@mock_cloudformation +def test_boto3_create_stack_set_with_ref_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack_set( + StackSetName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.describe_stack_set(StackSetName="test_stack")['StackSet']['TemplateBody'].should.equal( + dummy_template_yaml_with_ref) + + +@mock_cloudformation +def test_boto3_describe_stack_set_params(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack_set( + StackSetName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.describe_stack_set(StackSetName="test_stack")['StackSet']['Parameters'].should.equal( + params) + @mock_cloudformation def test_boto3_create_stack(): @@ -391,11 +808,40 @@ def test_create_change_set_from_s3_url(): TemplateURL=key_url, ChangeSetName='NewChangeSet', ChangeSetType='CREATE', + Tags=[ + {'Key': 'tag-key', 'Value': 'tag-value'} + ], ) assert 'arn:aws:cloudformation:us-west-1:123456789:changeSet/NewChangeSet/' in response['Id'] assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId'] +@mock_cloudformation +def test_describe_change_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + + stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet") + stack['ChangeSetName'].should.equal('NewChangeSet') + stack['StackName'].should.equal('NewStack') + + cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_update_template_json, + ChangeSetName='NewChangeSet2', + ChangeSetType='UPDATE', + ) + stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet2") + stack['ChangeSetName'].should.equal('NewChangeSet2') + stack['StackName'].should.equal('NewStack') + stack['Changes'].should.have.length_of(2) + + @mock_cloudformation def test_execute_change_set_w_arn(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') @@ -417,7 +863,7 @@ def test_execute_change_set_w_name(): ChangeSetName='NewChangeSet', ChangeSetType='CREATE', ) - cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack') + cf_conn.execute_change_set(ChangeSetName='NewChangeSet', StackName='NewStack') @mock_cloudformation @@ -486,6 +932,20 @@ def test_describe_stack_by_stack_id(): stack_by_id['StackName'].should.equal("test_stack") +@mock_cloudformation +def test_list_change_sets(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_change_set( + StackName='NewStack2', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet2', + ChangeSetType='CREATE', + ) + change_set = cf_conn.list_change_sets(StackName='NewStack2')['Summaries'][0] + change_set['StackName'].should.equal('NewStack2') + change_set['ChangeSetName'].should.equal('NewChangeSet2') + + @mock_cloudformation def test_list_stacks(): cf = boto3.resource('cloudformation', region_name='us-east-1') @@ -518,6 +978,22 @@ def test_delete_stack_from_resource(): list(cf.stacks.all()).should.have.length_of(0) +@mock_cloudformation +@mock_ec2 +def test_delete_change_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + + cf_conn.list_change_sets(StackName='NewStack')['Summaries'].should.have.length_of(1) + cf_conn.delete_change_set(ChangeSetName='NewChangeSet', StackName='NewStack') + cf_conn.list_change_sets(StackName='NewStack')['Summaries'].should.have.length_of(0) + + @mock_cloudformation @mock_ec2 def test_delete_stack_by_name(): @@ -532,6 +1008,21 @@ def test_delete_stack_by_name(): cf_conn.describe_stacks()['Stacks'].should.have.length_of(0) +@mock_cloudformation +def test_delete_stack(): + cf = boto3.client('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + cf.delete_stack( + StackName="test_stack", + ) + stacks = cf.list_stacks() + assert stacks['StackSummaries'][0]['StackStatus'] == 'DELETE_COMPLETE' + + @mock_cloudformation def test_describe_deleted_stack(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 2c808726f..449fde4ce 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -1952,7 +1952,12 @@ def lambda_handler(event, context): "Description": "Test function", "MemorySize": 128, "Role": "test-role", - "Runtime": "python2.7" + "Runtime": "python2.7", + "Environment": { + "Variables": { + "TEST_ENV_KEY": "test-env-val", + } + }, } } } @@ -1973,6 +1978,9 @@ def lambda_handler(event, context): result['Functions'][0]['MemorySize'].should.equal(128) result['Functions'][0]['Role'].should.equal('test-role') result['Functions'][0]['Runtime'].should.equal('python2.7') + result['Functions'][0]['Environment'].should.equal({ + "Variables": {"TEST_ENV_KEY": "test-env-val"} + }) @mock_cloudformation diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d25c69cf1..d21db2d48 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -448,8 +448,8 @@ def test_short_form_func_in_yaml_teamplate(): KeySplit: !Split [A, B] KeySub: !Sub A """ - yaml.add_multi_constructor('', yaml_tag_constructor) - template_dict = yaml.load(template) + yaml.add_multi_constructor('', yaml_tag_constructor, Loader=yaml.Loader) + template_dict = yaml.load(template, Loader=yaml.Loader) key_and_expects = [ ['KeyRef', {'Ref': 'foo'}], ['KeyB64', {'Fn::Base64': 'valueToEncode'}], diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py new file mode 100644 index 000000000..e2c3af05d --- /dev/null +++ b/tests/test_cloudformation/test_validate.py @@ -0,0 +1,115 @@ +from collections import OrderedDict +import json +import yaml +import os +import boto3 +from nose.tools import raises +import botocore + + +from moto.cloudformation.exceptions import ValidationError +from moto.cloudformation.models import FakeStack +from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export +from moto.sqs.models import Queue +from moto.s3.models import FakeBucket +from moto.cloudformation.utils import yaml_tag_constructor +from boto.cloudformation.stack import Output +from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 + +json_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-d3adb33f", + "KeyName": "dummy", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Description", + "Value": "Test tag" + }, + { + "Key": "Name", + "Value": "Name tag for tests" + } + ] + } + } + } +} + +# One resource is required +json_bad_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1" +} + +dummy_template_json = json.dumps(json_template) +dummy_bad_template_json = json.dumps(json_bad_template) + + +@mock_cloudformation +def test_boto3_json_validate_successful(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + response = cf_conn.validate_template( + TemplateBody=dummy_template_json, + ) + assert response['Description'] == "Stack 1" + assert response['Parameters'] == [] + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + +@mock_cloudformation +def test_boto3_json_invalid_missing_resource(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + try: + cf_conn.validate_template( + TemplateBody=dummy_bad_template_json, + ) + assert False + except botocore.exceptions.ClientError as e: + assert str(e) == 'An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack' \ + ' with id Missing top level item Resources to file module does not exist' + assert True + + +yaml_template = """ + AWSTemplateFormatVersion: '2010-09-09' + Description: Simple CloudFormation Test Template + Resources: + S3Bucket: + Type: AWS::S3::Bucket + Properties: + AccessControl: PublicRead + BucketName: cf-test-bucket-1 +""" + +yaml_bad_template = """ + AWSTemplateFormatVersion: '2010-09-09' + Description: Simple CloudFormation Test Template +""" + +@mock_cloudformation +def test_boto3_yaml_validate_successful(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + response = cf_conn.validate_template( + TemplateBody=yaml_template, + ) + assert response['Description'] == "Simple CloudFormation Test Template" + assert response['Parameters'] == [] + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + +@mock_cloudformation +def test_boto3_yaml_invalid_missing_resource(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + try: + cf_conn.validate_template( + TemplateBody=yaml_bad_template, + ) + assert False + except botocore.exceptions.ClientError as e: + assert str(e) == 'An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack' \ + ' with id Missing top level item Resources to file module does not exist' + assert True diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index f72a44762..e4e38e821 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1,14 +1,18 @@ from __future__ import unicode_literals -import boto3 import json import os +import random import uuid +import boto3 +# noinspection PyUnresolvedReferences +import sure # noqa +from botocore.exceptions import ClientError from jose import jws +from nose.tools import assert_raises from moto import mock_cognitoidp -import sure # noqa @mock_cognitoidp @@ -41,6 +45,56 @@ def test_list_user_pools(): result["UserPools"][0]["Name"].should.equal(name) +@mock_cognitoidp +def test_list_user_pools_returns_max_items(): + conn = boto3.client("cognito-idp", "us-west-2") + + # Given 10 user pools + pool_count = 10 + for i in range(pool_count): + conn.create_user_pool(PoolName=str(uuid.uuid4())) + + max_results = 5 + result = conn.list_user_pools(MaxResults=max_results) + result["UserPools"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + +@mock_cognitoidp +def test_list_user_pools_returns_next_tokens(): + conn = boto3.client("cognito-idp", "us-west-2") + + # Given 10 user pool clients + pool_count = 10 + for i in range(pool_count): + conn.create_user_pool(PoolName=str(uuid.uuid4())) + + max_results = 5 + result = conn.list_user_pools(MaxResults=max_results) + result["UserPools"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + next_token = result["NextToken"] + result_2 = conn.list_user_pools(MaxResults=max_results, NextToken=next_token) + result_2["UserPools"].should.have.length_of(max_results) + result_2.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_list_user_pools_when_max_items_more_than_total_items(): + conn = boto3.client("cognito-idp", "us-west-2") + + # Given 10 user pool clients + pool_count = 10 + for i in range(pool_count): + conn.create_user_pool(PoolName=str(uuid.uuid4())) + + max_results = pool_count + 5 + result = conn.list_user_pools(MaxResults=max_results) + result["UserPools"].should.have.length_of(pool_count) + result.shouldnt.have.key("NextToken") + + @mock_cognitoidp def test_describe_user_pool(): conn = boto3.client("cognito-idp", "us-west-2") @@ -140,6 +194,67 @@ def test_list_user_pool_clients(): result["UserPoolClients"][0]["ClientName"].should.equal(client_name) +@mock_cognitoidp +def test_list_user_pool_clients_returns_max_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 user pool clients + client_count = 10 + for i in range(client_count): + client_name = str(uuid.uuid4()) + conn.create_user_pool_client(UserPoolId=user_pool_id, + ClientName=client_name) + max_results = 5 + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, + MaxResults=max_results) + result["UserPoolClients"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + +@mock_cognitoidp +def test_list_user_pool_clients_returns_next_tokens(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 user pool clients + client_count = 10 + for i in range(client_count): + client_name = str(uuid.uuid4()) + conn.create_user_pool_client(UserPoolId=user_pool_id, + ClientName=client_name) + max_results = 5 + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, + MaxResults=max_results) + result["UserPoolClients"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + next_token = result["NextToken"] + result_2 = conn.list_user_pool_clients(UserPoolId=user_pool_id, + MaxResults=max_results, + NextToken=next_token) + result_2["UserPoolClients"].should.have.length_of(max_results) + result_2.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_list_user_pool_clients_when_max_items_more_than_total_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 user pool clients + client_count = 10 + for i in range(client_count): + client_name = str(uuid.uuid4()) + conn.create_user_pool_client(UserPoolId=user_pool_id, + ClientName=client_name) + max_results = client_count + 5 + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, + MaxResults=max_results) + result["UserPoolClients"].should.have.length_of(client_count) + result.shouldnt.have.key("NextToken") + + @mock_cognitoidp def test_describe_user_pool_client(): conn = boto3.client("cognito-idp", "us-west-2") @@ -264,6 +379,83 @@ def test_list_identity_providers(): result["Providers"][0]["ProviderType"].should.equal(provider_type) +@mock_cognitoidp +def test_list_identity_providers_returns_max_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 identity providers linked to a user pool + identity_provider_count = 10 + for i in range(identity_provider_count): + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + max_results = 5 + result = conn.list_identity_providers(UserPoolId=user_pool_id, + MaxResults=max_results) + result["Providers"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + +@mock_cognitoidp +def test_list_identity_providers_returns_next_tokens(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 identity providers linked to a user pool + identity_provider_count = 10 + for i in range(identity_provider_count): + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + max_results = 5 + result = conn.list_identity_providers(UserPoolId=user_pool_id, MaxResults=max_results) + result["Providers"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + next_token = result["NextToken"] + result_2 = conn.list_identity_providers(UserPoolId=user_pool_id, + MaxResults=max_results, + NextToken=next_token) + result_2["Providers"].should.have.length_of(max_results) + result_2.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_list_identity_providers_when_max_items_more_than_total_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 identity providers linked to a user pool + identity_provider_count = 10 + for i in range(identity_provider_count): + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + max_results = identity_provider_count + 5 + result = conn.list_identity_providers(UserPoolId=user_pool_id, MaxResults=max_results) + result["Providers"].should.have.length_of(identity_provider_count) + result.shouldnt.have.key("NextToken") + + @mock_cognitoidp def test_describe_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") @@ -292,6 +484,82 @@ def test_describe_identity_providers(): result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) +@mock_cognitoidp +def test_update_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result = conn.update_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderDetails={ + "thing": new_value + }, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(new_value) + + +@mock_cognitoidp +def test_update_identity_provider_no_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + new_value = str(uuid.uuid4()) + + with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + conn.update_identity_provider( + UserPoolId="foo", + ProviderName="bar", + ProviderDetails={ + "thing": new_value + }, + ) + + cm.exception.operation_name.should.equal('UpdateIdentityProvider') + cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + +@mock_cognitoidp +def test_update_identity_provider_no_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + conn.update_identity_provider( + UserPoolId=user_pool_id, + ProviderName="foo", + ProviderDetails={ + "thing": new_value + }, + ) + + cm.exception.operation_name.should.equal('UpdateIdentityProvider') + cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + @mock_cognitoidp def test_delete_identity_providers(): conn = boto3.client("cognito-idp", "us-west-2") @@ -323,6 +591,245 @@ def test_delete_identity_providers(): caught.should.be.true +@mock_cognitoidp +def test_create_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + description = str(uuid.uuid4()) + role_arn = "arn:aws:iam:::role/my-iam-role" + precedence = random.randint(0, 100000) + + result = conn.create_group( + GroupName=group_name, + UserPoolId=user_pool_id, + Description=description, + RoleArn=role_arn, + Precedence=precedence, + ) + + result["Group"]["GroupName"].should.equal(group_name) + result["Group"]["UserPoolId"].should.equal(user_pool_id) + result["Group"]["Description"].should.equal(description) + result["Group"]["RoleArn"].should.equal(role_arn) + result["Group"]["Precedence"].should.equal(precedence) + result["Group"]["LastModifiedDate"].should.be.a("datetime.datetime") + result["Group"]["CreationDate"].should.be.a("datetime.datetime") + + +@mock_cognitoidp +def test_create_group_with_duplicate_name_raises_error(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + with assert_raises(ClientError) as cm: + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + cm.exception.operation_name.should.equal('CreateGroup') + cm.exception.response['Error']['Code'].should.equal('GroupExistsException') + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + +@mock_cognitoidp +def test_get_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + result = conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) + + result["Group"]["GroupName"].should.equal(group_name) + result["Group"]["UserPoolId"].should.equal(user_pool_id) + result["Group"]["LastModifiedDate"].should.be.a("datetime.datetime") + result["Group"]["CreationDate"].should.be.a("datetime.datetime") + + +@mock_cognitoidp +def test_list_groups(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + result = conn.list_groups(UserPoolId=user_pool_id) + + result["Groups"].should.have.length_of(1) + result["Groups"][0]["GroupName"].should.equal(group_name) + + +@mock_cognitoidp +def test_delete_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + result = conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + with assert_raises(ClientError) as cm: + conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) + cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + + +@mock_cognitoidp +def test_admin_add_user_to_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + result = conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + +@mock_cognitoidp +def test_admin_add_user_to_group_again_is_noop(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + +@mock_cognitoidp +def test_list_users_in_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + result = conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) + + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username) + + +@mock_cognitoidp +def test_list_users_in_group_ignores_deleted_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + username2 = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username2) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username2, GroupName=group_name) + conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) + + result = conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) + + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username2) + + +@mock_cognitoidp +def test_admin_list_groups_for_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) + + result["Groups"].should.have.length_of(1) + result["Groups"][0]["GroupName"].should.equal(group_name) + + +@mock_cognitoidp +def test_admin_list_groups_for_user_ignores_deleted_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + group_name2 = str(uuid.uuid4()) + conn.create_group(GroupName=group_name2, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name2) + conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) + + result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) + + result["Groups"].should.have.length_of(1) + result["Groups"][0]["GroupName"].should.equal(group_name2) + + +@mock_cognitoidp +def test_admin_remove_user_from_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + result = conn.admin_remove_user_from_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) \ + ["Users"].should.have.length_of(0) + conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) \ + ["Groups"].should.have.length_of(0) + + +@mock_cognitoidp +def test_admin_remove_user_from_group_again_is_noop(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + @mock_cognitoidp def test_admin_create_user(): conn = boto3.client("cognito-idp", "us-west-2") @@ -396,6 +903,62 @@ def test_list_users(): result["Users"][0]["Username"].should.equal(username) +@mock_cognitoidp +def test_list_users_returns_limit_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 users + user_count = 10 + for i in range(user_count): + conn.admin_create_user(UserPoolId=user_pool_id, + Username=str(uuid.uuid4())) + max_results = 5 + result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) + result["Users"].should.have.length_of(max_results) + result.should.have.key("PaginationToken") + + +@mock_cognitoidp +def test_list_users_returns_pagination_tokens(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 users + user_count = 10 + for i in range(user_count): + conn.admin_create_user(UserPoolId=user_pool_id, + Username=str(uuid.uuid4())) + + max_results = 5 + result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) + result["Users"].should.have.length_of(max_results) + result.should.have.key("PaginationToken") + + next_token = result["PaginationToken"] + result_2 = conn.list_users(UserPoolId=user_pool_id, + Limit=max_results, PaginationToken=next_token) + result_2["Users"].should.have.length_of(max_results) + result_2.shouldnt.have.key("PaginationToken") + + +@mock_cognitoidp +def test_list_users_when_limit_more_than_total_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 users + user_count = 10 + for i in range(user_count): + conn.admin_create_user(UserPoolId=user_pool_id, + Username=str(uuid.uuid4())) + + max_results = user_count + 5 + result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) + result["Users"].should.have.length_of(user_count) + result.shouldnt.have.key("PaginationToken") + + @mock_cognitoidp def test_admin_disable_user(): conn = boto3.client("cognito-idp", "us-west-2") diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py new file mode 100644 index 000000000..96c62455c --- /dev/null +++ b/tests/test_config/test_config.py @@ -0,0 +1,491 @@ +from datetime import datetime, timedelta + +import boto3 +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto.config import mock_config + + +@mock_config +def test_put_configuration_recorder(): + client = boto3.client('config', region_name='us-west-2') + + # Try without a name supplied: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={'roleARN': 'somearn'}) + assert ce.exception.response['Error']['Code'] == 'InvalidConfigurationRecorderNameException' + assert 'is not valid, blank string.' in ce.exception.response['Error']['Message'] + + # Try with a really long name: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={'name': 'a' * 257, 'roleARN': 'somearn'}) + assert ce.exception.response['Error']['Code'] == 'ValidationException' + assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message'] + + # With resource types and flags set to True: + bad_groups = [ + {'allSupported': True, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']}, + {'allSupported': False, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']}, + {'allSupported': True, 'includeGlobalResourceTypes': False, 'resourceTypes': ['item']}, + {'allSupported': False, 'includeGlobalResourceTypes': False, 'resourceTypes': []}, + {'includeGlobalResourceTypes': False, 'resourceTypes': []}, + {'includeGlobalResourceTypes': True}, + {'resourceTypes': []}, + {} + ] + + for bg in bad_groups: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'default', + 'roleARN': 'somearn', + 'recordingGroup': bg + }) + assert ce.exception.response['Error']['Code'] == 'InvalidRecordingGroupException' + assert ce.exception.response['Error']['Message'] == 'The recording group provided is not valid' + + # With an invalid Resource Type: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'default', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + # 2 good, and 2 bad: + 'resourceTypes': ['AWS::EC2::Volume', 'LOLNO', 'AWS::EC2::VPC', 'LOLSTILLNO'] + } + }) + assert ce.exception.response['Error']['Code'] == 'ValidationException' + assert "2 validation error detected: Value '['LOLNO', 'LOLSTILLNO']" in str(ce.exception.response['Error']['Message']) + assert 'AWS::EC2::Instance' in ce.exception.response['Error']['Message'] + + # Create a proper one: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + result = client.describe_configuration_recorders()['ConfigurationRecorders'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert result[0]['roleARN'] == 'somearn' + assert not result[0]['recordingGroup']['allSupported'] + assert not result[0]['recordingGroup']['includeGlobalResourceTypes'] + assert len(result[0]['recordingGroup']['resourceTypes']) == 2 + assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \ + and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes'] + + # Now update the configuration recorder: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': True, + 'includeGlobalResourceTypes': True + } + }) + result = client.describe_configuration_recorders()['ConfigurationRecorders'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert result[0]['roleARN'] == 'somearn' + assert result[0]['recordingGroup']['allSupported'] + assert result[0]['recordingGroup']['includeGlobalResourceTypes'] + assert len(result[0]['recordingGroup']['resourceTypes']) == 0 + + # With a default recording group (i.e. lacking one) + client.put_configuration_recorder(ConfigurationRecorder={'name': 'testrecorder', 'roleARN': 'somearn'}) + result = client.describe_configuration_recorders()['ConfigurationRecorders'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert result[0]['roleARN'] == 'somearn' + assert result[0]['recordingGroup']['allSupported'] + assert not result[0]['recordingGroup']['includeGlobalResourceTypes'] + assert not result[0]['recordingGroup'].get('resourceTypes') + + # Can currently only have exactly 1 Config Recorder in an account/region: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'someotherrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + } + }) + assert ce.exception.response['Error']['Code'] == 'MaxNumberOfConfigurationRecordersExceededException' + assert "maximum number of configuration recorders: 1 is reached." in ce.exception.response['Error']['Message'] + + +@mock_config +def test_describe_configurations(): + client = boto3.client('config', region_name='us-west-2') + + # Without any configurations: + result = client.describe_configuration_recorders() + assert not result['ConfigurationRecorders'] + + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + result = client.describe_configuration_recorders()['ConfigurationRecorders'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert result[0]['roleARN'] == 'somearn' + assert not result[0]['recordingGroup']['allSupported'] + assert not result[0]['recordingGroup']['includeGlobalResourceTypes'] + assert len(result[0]['recordingGroup']['resourceTypes']) == 2 + assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \ + and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes'] + + # Specify an incorrect name: + with assert_raises(ClientError) as ce: + client.describe_configuration_recorders(ConfigurationRecorderNames=['wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + # And with both a good and wrong name: + with assert_raises(ClientError) as ce: + client.describe_configuration_recorders(ConfigurationRecorderNames=['testrecorder', 'wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + +@mock_config +def test_delivery_channels(): + client = boto3.client('config', region_name='us-west-2') + + # Try without a config recorder: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={}) + assert ce.exception.response['Error']['Code'] == 'NoAvailableConfigurationRecorderException' + assert ce.exception.response['Error']['Message'] == 'Configuration recorder is not available to ' \ + 'put delivery channel.' + + # Create a config recorder to continue testing: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Try without a name supplied: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={}) + assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryChannelNameException' + assert 'is not valid, blank string.' in ce.exception.response['Error']['Message'] + + # Try with a really long name: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={'name': 'a' * 257}) + assert ce.exception.response['Error']['Code'] == 'ValidationException' + assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message'] + + # Without specifying a bucket name: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel'}) + assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException' + assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.' + + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': ''}) + assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException' + assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.' + + # With an empty string for the S3 key prefix: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', 's3BucketName': 'somebucket', 's3KeyPrefix': ''}) + assert ce.exception.response['Error']['Code'] == 'InvalidS3KeyPrefixException' + assert 'empty s3 key prefix.' in ce.exception.response['Error']['Message'] + + # With an empty string for the SNS ARN: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', 's3BucketName': 'somebucket', 'snsTopicARN': ''}) + assert ce.exception.response['Error']['Code'] == 'InvalidSNSTopicARNException' + assert 'The sns topic arn' in ce.exception.response['Error']['Message'] + + # With an invalid delivery frequency: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', + 's3BucketName': 'somebucket', + 'configSnapshotDeliveryProperties': {'deliveryFrequency': 'WRONG'} + }) + assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryFrequency' + assert 'WRONG' in ce.exception.response['Error']['Message'] + assert 'TwentyFour_Hours' in ce.exception.response['Error']['Message'] + + # Create a proper one: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + result = client.describe_delivery_channels()['DeliveryChannels'] + assert len(result) == 1 + assert len(result[0].keys()) == 2 + assert result[0]['name'] == 'testchannel' + assert result[0]['s3BucketName'] == 'somebucket' + + # Overwrite it with another proper configuration: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', + 's3BucketName': 'somebucket', + 'snsTopicARN': 'sometopicarn', + 'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'} + }) + result = client.describe_delivery_channels()['DeliveryChannels'] + assert len(result) == 1 + assert len(result[0].keys()) == 4 + assert result[0]['name'] == 'testchannel' + assert result[0]['s3BucketName'] == 'somebucket' + assert result[0]['snsTopicARN'] == 'sometopicarn' + assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours' + + # Can only have 1: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel2', 's3BucketName': 'somebucket'}) + assert ce.exception.response['Error']['Code'] == 'MaxNumberOfDeliveryChannelsExceededException' + assert 'because the maximum number of delivery channels: 1 is reached.' in ce.exception.response['Error']['Message'] + + +@mock_config +def test_describe_delivery_channels(): + client = boto3.client('config', region_name='us-west-2') + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Without any channels: + result = client.describe_delivery_channels() + assert not result['DeliveryChannels'] + + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + result = client.describe_delivery_channels()['DeliveryChannels'] + assert len(result) == 1 + assert len(result[0].keys()) == 2 + assert result[0]['name'] == 'testchannel' + assert result[0]['s3BucketName'] == 'somebucket' + + # Overwrite it with another proper configuration: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', + 's3BucketName': 'somebucket', + 'snsTopicARN': 'sometopicarn', + 'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'} + }) + result = client.describe_delivery_channels()['DeliveryChannels'] + assert len(result) == 1 + assert len(result[0].keys()) == 4 + assert result[0]['name'] == 'testchannel' + assert result[0]['s3BucketName'] == 'somebucket' + assert result[0]['snsTopicARN'] == 'sometopicarn' + assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours' + + # Specify an incorrect name: + with assert_raises(ClientError) as ce: + client.describe_delivery_channels(DeliveryChannelNames=['wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + # And with both a good and wrong name: + with assert_raises(ClientError) as ce: + client.describe_delivery_channels(DeliveryChannelNames=['testchannel', 'wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + +@mock_config +def test_start_configuration_recorder(): + client = boto3.client('config', region_name='us-west-2') + + # Without a config recorder: + with assert_raises(ClientError) as ce: + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + + # Make the config recorder; + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Without a delivery channel: + with assert_raises(ClientError) as ce: + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + assert ce.exception.response['Error']['Code'] == 'NoAvailableDeliveryChannelException' + + # Make the delivery channel: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + + # Start it: + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # Verify it's enabled: + result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus'] + lower_bound = (datetime.utcnow() - timedelta(minutes=5)) + assert result[0]['recording'] + assert result[0]['lastStatus'] == 'PENDING' + assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow() + assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow() + + +@mock_config +def test_stop_configuration_recorder(): + client = boto3.client('config', region_name='us-west-2') + + # Without a config recorder: + with assert_raises(ClientError) as ce: + client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder') + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + + # Make the config recorder; + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Make the delivery channel for creation: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + + # Start it: + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # Verify it's disabled: + result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus'] + lower_bound = (datetime.utcnow() - timedelta(minutes=5)) + assert not result[0]['recording'] + assert result[0]['lastStatus'] == 'PENDING' + assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow() + assert lower_bound < result[0]['lastStopTime'].replace(tzinfo=None) <= datetime.utcnow() + assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow() + + +@mock_config +def test_describe_configuration_recorder_status(): + client = boto3.client('config', region_name='us-west-2') + + # Without any: + result = client.describe_configuration_recorder_status() + assert not result['ConfigurationRecordersStatus'] + + # Make the config recorder; + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Without specifying a config recorder: + result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert not result[0]['recording'] + + # With a proper name: + result = client.describe_configuration_recorder_status( + ConfigurationRecorderNames=['testrecorder'])['ConfigurationRecordersStatus'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert not result[0]['recording'] + + # Invalid name: + with assert_raises(ClientError) as ce: + client.describe_configuration_recorder_status(ConfigurationRecorderNames=['testrecorder', 'wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + +@mock_config +def test_delete_configuration_recorder(): + client = boto3.client('config', region_name='us-west-2') + + # Make the config recorder; + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Delete it: + client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # Try again -- it should be deleted: + with assert_raises(ClientError) as ce: + client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder') + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + + +@mock_config +def test_delete_delivery_channel(): + client = boto3.client('config', region_name='us-west-2') + + # Need a recorder to test the constraint on recording being enabled: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # With the recorder enabled: + with assert_raises(ClientError) as ce: + client.delete_delivery_channel(DeliveryChannelName='testchannel') + assert ce.exception.response['Error']['Code'] == 'LastDeliveryChannelDeleteFailedException' + assert 'because there is a running configuration recorder.' in ce.exception.response['Error']['Message'] + + # Stop recording: + client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # Try again: + client.delete_delivery_channel(DeliveryChannelName='testchannel') + + # Verify: + with assert_raises(ClientError) as ce: + client.delete_delivery_channel(DeliveryChannelName='testchannel') + assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException' diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index afc919dd7..208453f0a 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -815,6 +815,16 @@ def test_scan_filter(): ) assert response['Count'] == 1 + response = table.scan( + FilterExpression=Attr('app').ne('app2') + ) + assert response['Count'] == 1 + + response = table.scan( + FilterExpression=Attr('app').ne('app1') + ) + assert response['Count'] == 0 + @mock_dynamodb2 def test_scan_filter2(): @@ -872,6 +882,26 @@ def test_scan_filter3(): ) assert response['Count'] == 1 + response = table.scan( + FilterExpression=Attr('active').ne(True) + ) + assert response['Count'] == 0 + + response = table.scan( + FilterExpression=Attr('active').ne(False) + ) + assert response['Count'] == 1 + + response = table.scan( + FilterExpression=Attr('app').ne(1) + ) + assert response['Count'] == 0 + + response = table.scan( + FilterExpression=Attr('app').ne(2) + ) + assert response['Count'] == 1 + @mock_dynamodb2 def test_scan_filter4(): @@ -919,6 +949,33 @@ def test_bad_scan_filter(): raise RuntimeError('Should of raised ResourceInUseException') +@mock_dynamodb2 +def test_create_table_pay_per_request(): + client = boto3.client('dynamodb', region_name='us-east-1') + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + BillingMode="PAY_PER_REQUEST" + ) + + +@mock_dynamodb2 +def test_create_table_error_pay_per_request_with_provisioned_param(): + client = boto3.client('dynamodb', region_name='us-east-1') + + try: + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}, + BillingMode="PAY_PER_REQUEST" + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationException') + + @mock_dynamodb2 def test_duplicate_create(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -1000,6 +1057,11 @@ def test_delete_item(): response = table.scan() assert response['Count'] == 2 + # Test ReturnValues validation + with assert_raises(ClientError) as ex: + table.delete_item(Key={'client': 'client1', 'app': 'app1'}, + ReturnValues='ALL_NEW') + # Test deletion and returning old value response = table.delete_item(Key={'client': 'client1', 'app': 'app1'}, ReturnValues='ALL_OLD') response['Attributes'].should.contain('client') @@ -1246,6 +1308,81 @@ def test_update_if_not_exists(): assert resp['Items'][0]['created_at'] == 123 +# https://github.com/spulec/moto/issues/1937 +@mock_dynamodb2 +def test_update_return_attributes(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='moto-test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1} + ) + + def update(col, to, rv): + return dynamodb.update_item( + TableName='moto-test', + Key={'id': {'S': 'foo'}}, + AttributeUpdates={col: {'Value': {'S': to}, 'Action': 'PUT'}}, + ReturnValues=rv + ) + + r = update('col1', 'val1', 'ALL_NEW') + assert r['Attributes'] == {'id': {'S': 'foo'}, 'col1': {'S': 'val1'}} + + r = update('col1', 'val2', 'ALL_OLD') + assert r['Attributes'] == {'id': {'S': 'foo'}, 'col1': {'S': 'val1'}} + + r = update('col2', 'val3', 'UPDATED_NEW') + assert r['Attributes'] == {'col2': {'S': 'val3'}} + + r = update('col2', 'val4', 'UPDATED_OLD') + assert r['Attributes'] == {'col2': {'S': 'val3'}} + + r = update('col1', 'val5', 'NONE') + assert r['Attributes'] == {} + + with assert_raises(ClientError) as ex: + r = update('col1', 'val6', 'WRONG') + + +@mock_dynamodb2 +def test_put_return_attributes(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='moto-test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1} + ) + + r = dynamodb.put_item( + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'col1': {'S': 'val1'}}, + ReturnValues='NONE' + ) + assert 'Attributes' not in r + + r = dynamodb.put_item( + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'col1': {'S': 'val2'}}, + ReturnValues='ALL_OLD' + ) + assert r['Attributes'] == {'id': {'S': 'foo'}, 'col1': {'S': 'val1'}} + + with assert_raises(ClientError) as ex: + dynamodb.put_item( + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'col1': {'S': 'val3'}}, + ReturnValues='ALL_NEW' + ) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal('Return values set to invalid value') + + @mock_dynamodb2 def test_query_global_secondary_index_when_created_via_update_table_resource(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') @@ -1336,3 +1473,236 @@ def test_query_global_secondary_index_when_created_via_update_table_resource(): assert len(forum_and_subject_items) == 1 assert forum_and_subject_items[0] == {'user_id': Decimal('1'), 'forum_name': 'cats', 'subject': 'my pet is the cutest'} + + +@mock_dynamodb2 +def test_dynamodb_streams_1(): + conn = boto3.client('dynamodb', region_name='us-east-1') + + resp = conn.create_table( + TableName='test-streams', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + StreamSpecification={ + 'StreamEnabled': True, + 'StreamViewType': 'NEW_AND_OLD_IMAGES' + } + ) + + assert 'StreamSpecification' in resp['TableDescription'] + assert resp['TableDescription']['StreamSpecification'] == { + 'StreamEnabled': True, + 'StreamViewType': 'NEW_AND_OLD_IMAGES' + } + assert 'LatestStreamLabel' in resp['TableDescription'] + assert 'LatestStreamArn' in resp['TableDescription'] + + resp = conn.delete_table(TableName='test-streams') + + assert 'StreamSpecification' in resp['TableDescription'] + + +@mock_dynamodb2 +def test_dynamodb_streams_2(): + conn = boto3.client('dynamodb', region_name='us-east-1') + + resp = conn.create_table( + TableName='test-stream-update', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + ) + + assert 'StreamSpecification' not in resp['TableDescription'] + + resp = conn.update_table( + TableName='test-stream-update', + StreamSpecification={ + 'StreamEnabled': True, + 'StreamViewType': 'NEW_IMAGE' + } + ) + + assert 'StreamSpecification' in resp['TableDescription'] + assert resp['TableDescription']['StreamSpecification'] == { + 'StreamEnabled': True, + 'StreamViewType': 'NEW_IMAGE' + } + assert 'LatestStreamLabel' in resp['TableDescription'] + assert 'LatestStreamArn' in resp['TableDescription'] + +@mock_dynamodb2 +def test_condition_expressions(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + } + ) + + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='attribute_exists(#existing) AND attribute_not_exists(#nonexistent) AND #match = :match', + ExpressionAttributeNames={ + '#existing': 'existing', + '#nonexistent': 'nope', + '#match': 'match', + }, + ExpressionAttributeValues={ + ':match': {'S': 'match'} + } + ) + + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='NOT(attribute_exists(#nonexistent1) AND attribute_exists(#nonexistent2))', + ExpressionAttributeNames={ + '#nonexistent1': 'nope', + '#nonexistent2': 'nope2' + } + ) + + with assert_raises(client.exceptions.ConditionalCheckFailedException): + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='attribute_exists(#nonexistent1) AND attribute_exists(#nonexistent2)', + ExpressionAttributeNames={ + '#nonexistent1': 'nope', + '#nonexistent2': 'nope2' + } + ) + + with assert_raises(client.exceptions.ConditionalCheckFailedException): + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='NOT(attribute_not_exists(#nonexistent1) AND attribute_not_exists(#nonexistent2))', + ExpressionAttributeNames={ + '#nonexistent1': 'nope', + '#nonexistent2': 'nope2' + } + ) + + with assert_raises(client.exceptions.ConditionalCheckFailedException): + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='attribute_exists(#existing) AND attribute_not_exists(#nonexistent) AND #match = :match', + ExpressionAttributeNames={ + '#existing': 'existing', + '#nonexistent': 'nope', + '#match': 'match', + }, + ExpressionAttributeValues={ + ':match': {'S': 'match2'} + } + ) + + +@mock_dynamodb2 +def test_query_gsi_with_range_key(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_hash_key', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_range_key', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_hash_key', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'gsi_range_key', + 'KeyType': 'RANGE' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': 'test1'}, + 'gsi_hash_key': {'S': 'key1'}, + 'gsi_range_key': {'S': 'range1'}, + } + ) + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': 'test2'}, + 'gsi_hash_key': {'S': 'key1'}, + } + ) + + res = dynamodb.query(TableName='test', IndexName='test_gsi', + KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key', + ExpressionAttributeValues={ + ':gsi_hash_key': {'S': 'key1'}, + ':gsi_range_key': {'S': 'range1'} + }) + res.should.have.key("Count").equal(1) + res.should.have.key("Items") + res['Items'][0].should.equal({ + 'id': {'S': 'test1'}, + 'gsi_hash_key': {'S': 'key1'}, + 'gsi_range_key': {'S': 'range1'}, + }) diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 15e5284b7..874804db0 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -750,6 +750,47 @@ def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_n returned_item = table.get_item(Key={'username': 'johndoe'}) assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_settype_item_with_conditions(): + class OrderedSet(set): + """A set with predictable iteration order""" + def __init__(self, values): + super(OrderedSet, self).__init__(values) + self.__ordered_values = values + + def __iter__(self): + return iter(self.__ordered_values) + + table = _create_user_table() + table.put_item(Item={'username': 'johndoe'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=:new_value', + ExpressionAttributeValues={ + ':new_value': OrderedSet(['hello', 'world']), + }, + ) + + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=:new_value', + ExpressionAttributeValues={ + ':new_value': set(['baz']), + }, + Expected={ + 'foo': { + 'ComparisonOperator': 'EQ', + 'AttributeValueList': [ + OrderedSet(['world', 'hello']), # Opposite order to original + ], + } + }, + ) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal(set(['baz'])) + + @mock_dynamodb2 def test_boto3_put_item_conditions_pass(): table = _create_user_table() diff --git a/tests/test_dynamodbstreams/test_dynamodbstreams.py b/tests/test_dynamodbstreams/test_dynamodbstreams.py new file mode 100644 index 000000000..b60c21053 --- /dev/null +++ b/tests/test_dynamodbstreams/test_dynamodbstreams.py @@ -0,0 +1,234 @@ +from __future__ import unicode_literals, print_function + +from nose.tools import assert_raises + +import boto3 +from moto import mock_dynamodb2, mock_dynamodbstreams + + +class TestCore(): + stream_arn = None + mocks = [] + + def setup(self): + self.mocks = [mock_dynamodb2(), mock_dynamodbstreams()] + for m in self.mocks: + m.start() + + # create a table with a stream + conn = boto3.client('dynamodb', region_name='us-east-1') + + resp = conn.create_table( + TableName='test-streams', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', + 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1}, + StreamSpecification={ + 'StreamEnabled': True, + 'StreamViewType': 'NEW_AND_OLD_IMAGES' + } + ) + self.stream_arn = resp['TableDescription']['LatestStreamArn'] + + def teardown(self): + conn = boto3.client('dynamodb', region_name='us-east-1') + conn.delete_table(TableName='test-streams') + self.stream_arn = None + + for m in self.mocks: + m.stop() + + + def test_verify_stream(self): + conn = boto3.client('dynamodb', region_name='us-east-1') + resp = conn.describe_table(TableName='test-streams') + assert 'LatestStreamArn' in resp['Table'] + + def test_describe_stream(self): + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.describe_stream(StreamArn=self.stream_arn) + assert 'StreamDescription' in resp + desc = resp['StreamDescription'] + assert desc['StreamArn'] == self.stream_arn + assert desc['TableName'] == 'test-streams' + + def test_list_streams(self): + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.list_streams() + assert resp['Streams'][0]['StreamArn'] == self.stream_arn + + resp = conn.list_streams(TableName='no-stream') + assert not resp['Streams'] + + def test_get_shard_iterator(self): + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.describe_stream(StreamArn=self.stream_arn) + shard_id = resp['StreamDescription']['Shards'][0]['ShardId'] + + resp = conn.get_shard_iterator( + StreamArn=self.stream_arn, + ShardId=shard_id, + ShardIteratorType='TRIM_HORIZON' + ) + assert 'ShardIterator' in resp + + def test_get_records_empty(self): + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.describe_stream(StreamArn=self.stream_arn) + shard_id = resp['StreamDescription']['Shards'][0]['ShardId'] + + resp = conn.get_shard_iterator( + StreamArn=self.stream_arn, + ShardId=shard_id, + ShardIteratorType='LATEST' + ) + iterator_id = resp['ShardIterator'] + + resp = conn.get_records(ShardIterator=iterator_id) + assert 'Records' in resp + assert len(resp['Records']) == 0 + + def test_get_records_seq(self): + conn = boto3.client('dynamodb', region_name='us-east-1') + + conn.put_item( + TableName='test-streams', + Item={ + 'id': {'S': 'entry1'}, + 'first_col': {'S': 'foo'} + } + ) + conn.put_item( + TableName='test-streams', + Item={ + 'id': {'S': 'entry1'}, + 'first_col': {'S': 'bar'}, + 'second_col': {'S': 'baz'} + } + ) + conn.delete_item( + TableName='test-streams', + Key={'id': {'S': 'entry1'}} + ) + + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.describe_stream(StreamArn=self.stream_arn) + shard_id = resp['StreamDescription']['Shards'][0]['ShardId'] + + resp = conn.get_shard_iterator( + StreamArn=self.stream_arn, + ShardId=shard_id, + ShardIteratorType='TRIM_HORIZON' + ) + iterator_id = resp['ShardIterator'] + + resp = conn.get_records(ShardIterator=iterator_id) + assert len(resp['Records']) == 3 + assert resp['Records'][0]['eventName'] == 'INSERT' + assert resp['Records'][1]['eventName'] == 'MODIFY' + assert resp['Records'][2]['eventName'] == 'DELETE' + + # now try fetching from the next shard iterator, it should be + # empty + resp = conn.get_records(ShardIterator=resp['NextShardIterator']) + assert len(resp['Records']) == 0 + + +class TestEdges(): + mocks = [] + + def setup(self): + self.mocks = [mock_dynamodb2(), mock_dynamodbstreams()] + for m in self.mocks: + m.start() + + def teardown(self): + for m in self.mocks: + m.stop() + + + def test_enable_stream_on_table(self): + conn = boto3.client('dynamodb', region_name='us-east-1') + resp = conn.create_table( + TableName='test-streams', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', + 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1} + ) + assert 'StreamSpecification' not in resp['TableDescription'] + + resp = conn.update_table( + TableName='test-streams', + StreamSpecification={ + 'StreamViewType': 'KEYS_ONLY' + } + ) + assert 'StreamSpecification' in resp['TableDescription'] + assert resp['TableDescription']['StreamSpecification'] == { + 'StreamEnabled': True, + 'StreamViewType': 'KEYS_ONLY' + } + assert 'LatestStreamLabel' in resp['TableDescription'] + + # now try to enable it again + with assert_raises(conn.exceptions.ResourceInUseException): + resp = conn.update_table( + TableName='test-streams', + StreamSpecification={ + 'StreamViewType': 'OLD_IMAGES' + } + ) + + def test_stream_with_range_key(self): + dyn = boto3.client('dynamodb', region_name='us-east-1') + + resp = dyn.create_table( + TableName='test-streams', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'color', 'KeyType': 'RANGE'}], + AttributeDefinitions=[{'AttributeName': 'id', + 'AttributeType': 'S'}, + {'AttributeName': 'color', + 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1}, + StreamSpecification={ + 'StreamViewType': 'NEW_IMAGES' + } + ) + stream_arn = resp['TableDescription']['LatestStreamArn'] + + streams = boto3.client('dynamodbstreams', region_name='us-east-1') + resp = streams.describe_stream(StreamArn=stream_arn) + shard_id = resp['StreamDescription']['Shards'][0]['ShardId'] + + resp = streams.get_shard_iterator( + StreamArn=stream_arn, + ShardId=shard_id, + ShardIteratorType='LATEST' + ) + iterator_id = resp['ShardIterator'] + + dyn.put_item( + TableName='test-streams', + Item={'id': {'S': 'row1'}, 'color': {'S': 'blue'}} + ) + dyn.put_item( + TableName='test-streams', + Item={'id': {'S': 'row2'}, 'color': {'S': 'green'}} + ) + + resp = streams.get_records(ShardIterator=iterator_id) + assert len(resp['Records']) == 2 + assert resp['Records'][0]['eventName'] == 'INSERT' + assert resp['Records'][1]['eventName'] == 'INSERT' + diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index a8d4d1b67..fd7234511 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -258,11 +258,11 @@ def test_ami_filters(): amis_by_name = conn.get_all_images(filters={'name': imageA.name}) set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) - amis_by_public = conn.get_all_images(filters={'is-public': True}) + amis_by_public = conn.get_all_images(filters={'is-public': 'true'}) set([ami.id for ami in amis_by_public]).should.contain(imageB.id) len(amis_by_public).should.equal(35) - amis_by_nonpublic = conn.get_all_images(filters={'is-public': False}) + amis_by_nonpublic = conn.get_all_images(filters={'is-public': 'false'}) set([ami.id for ami in amis_by_nonpublic]).should.contain(imageA.id) len(amis_by_nonpublic).should.equal(1) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 442e41dde..8f4a00b13 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -589,6 +589,18 @@ def test_volume_tag_escaping(): dict(snaps[0].tags).should.equal({'key': ''}) +@mock_ec2 +def test_volume_property_hidden_when_no_tags_exist(): + ec2_client = boto3.client('ec2', region_name='us-east-1') + + volume_response = ec2_client.create_volume( + Size=10, + AvailabilityZone='us-east-1a' + ) + + volume_response.get('Tags').should.equal(None) + + @freeze_time @mock_ec2 def test_copy_snapshot(): @@ -602,26 +614,26 @@ def test_copy_snapshot(): create_snapshot_response = ec2_client.create_snapshot( VolumeId=volume_response['VolumeId'] ) - + copy_snapshot_response = dest_ec2_client.copy_snapshot( SourceSnapshotId=create_snapshot_response['SnapshotId'], SourceRegion="eu-west-1" ) - + ec2 = boto3.resource('ec2', region_name='eu-west-1') dest_ec2 = boto3.resource('ec2', region_name='eu-west-2') - + source = ec2.Snapshot(create_snapshot_response['SnapshotId']) dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) - + attribs = ['data_encryption_key_id', 'encrypted', 'kms_key_id', 'owner_alias', 'owner_id', 'progress', 'state', 'state_message', 'tags', 'volume_id', 'volume_size'] - + for attrib in attribs: getattr(source, attrib).should.equal(getattr(dest, attrib)) - + # Copy from non-existent source ID. with assert_raises(ClientError) as cm: create_snapshot_error = ec2_client.create_snapshot( diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 828f9d917..70e78ae12 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -36,7 +36,8 @@ def test_elastic_network_interfaces(): all_enis.should.have.length_of(1) eni = all_enis[0] eni.groups.should.have.length_of(0) - eni.private_ip_addresses.should.have.length_of(0) + eni.private_ip_addresses.should.have.length_of(1) + eni.private_ip_addresses[0].private_ip_address.startswith('10.').should.be.true with assert_raises(EC2ResponseError) as ex: conn.delete_network_interface(eni.id, dry_run=True) @@ -354,9 +355,13 @@ def test_elastic_network_interfaces_cloudformation(): ) ec2_conn = boto.ec2.connect_to_region("us-west-1") eni = ec2_conn.get_all_network_interfaces()[0] + eni.private_ip_addresses.should.have.length_of(1) stack = conn.describe_stacks()[0] resources = stack.describe_resources() cfn_eni = [resource for resource in resources if resource.resource_type == 'AWS::EC2::NetworkInterface'][0] cfn_eni.physical_resource_id.should.equal(eni.id) + + outputs = {output.key: output.value for output in stack.outputs} + outputs['ENIIpAddress'].should.equal(eni.private_ip_addresses[0].private_ip_address) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 84b4fbd7d..c0f0eea4d 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1254,3 +1254,18 @@ def test_create_instance_ebs_optimized(): ) instance.load() instance.ebs_optimized.should.be(False) + +@mock_ec2 +def test_run_multiple_instances_in_same_command(): + instance_count = 4 + client = boto3.client('ec2', region_name='us-east-1') + client.run_instances(ImageId='ami-1234abcd', + MinCount=instance_count, + MaxCount=instance_count) + reservations = client.describe_instances()['Reservations'] + + reservations[0]['Instances'].should.have.length_of(instance_count) + + instances = reservations[0]['Instances'] + for i in range(0, instance_count): + instances[i]['AmiLaunchIndex'].should.be(i) diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index fd2ec105e..9c92c949e 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -1,8 +1,9 @@ from __future__ import unicode_literals import boto +import boto3 import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated @@ -173,3 +174,43 @@ def test_network_acl_tagging(): if na.id == network_acl.id) test_network_acl.tags.should.have.length_of(1) test_network_acl.tags["a key"].should.equal("some value") + + +@mock_ec2 +def test_new_subnet_in_new_vpc_associates_with_default_network_acl(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + new_vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + new_vpc.reload() + + subnet = ec2.create_subnet(VpcId=new_vpc.id, CidrBlock='10.0.0.0/24') + subnet.reload() + + new_vpcs_default_network_acl = next(iter(new_vpc.network_acls.all()), None) + new_vpcs_default_network_acl.reload() + new_vpcs_default_network_acl.vpc_id.should.equal(new_vpc.id) + new_vpcs_default_network_acl.associations.should.have.length_of(1) + new_vpcs_default_network_acl.associations[0]['SubnetId'].should.equal(subnet.id) + + +@mock_ec2 +def test_default_network_acl_default_entries(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + default_network_acl.entries.should.have.length_of(4) + unique_entries = [] + for entry in default_network_acl.entries: + entry['CidrBlock'].should.equal('0.0.0.0/0') + entry['Protocol'].should.equal('-1') + entry['RuleNumber'].should.be.within([100, 32767]) + entry['RuleAction'].should.be.within(['allow', 'deny']) + assert type(entry['Egress']) is bool + if entry['RuleAction'] == 'allow': + entry['RuleNumber'].should.be.equal(100) + else: + entry['RuleNumber'].should.be.equal(32767) + if entry not in unique_entries: + unique_entries.append(entry) + + unique_entries.should.have.length_of(4) diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index a2bd1d061..190f3b1f1 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -54,7 +54,7 @@ def spot_config(subnet_id, allocation_strategy="lowestPrice"): }, 'EbsOptimized': False, 'WeightedCapacity': 2.0, - 'SpotPrice': '0.13' + 'SpotPrice': '0.13', }, { 'ImageId': 'ami-123', 'KeyName': 'my-key', @@ -148,6 +148,48 @@ def test_create_diversified_spot_fleet(): instances[0]['InstanceId'].should.contain("i-") +@mock_ec2 +def test_create_spot_fleet_request_with_tag_spec(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + tag_spec = [ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'tag-1', + 'Value': 'foo', + }, + { + 'Key': 'tag-2', + 'Value': 'bar', + }, + ] + }, + ] + config = spot_config(subnet_id) + config['LaunchSpecifications'][0]['TagSpecifications'] = tag_spec + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=config + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + spot_fleet_config = spot_fleet_requests[0]['SpotFleetRequestConfig'] + spot_fleet_config['LaunchSpecifications'][0]['TagSpecifications'][0][ + 'ResourceType'].should.equal('instance') + for tag in tag_spec[0]['Tags']: + spot_fleet_config['LaunchSpecifications'][0]['TagSpecifications'][0]['Tags'].should.contain(tag) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = conn.describe_instances(InstanceIds=[i['InstanceId'] for i in instance_res['ActiveInstances']]) + for instance in instances['Reservations'][0]['Instances']: + for tag in tag_spec[0]['Tags']: + instance['Tags'].should.contain(tag) + + @mock_ec2 def test_cancel_spot_fleet_request(): conn = boto3.client("ec2", region_name='us-west-2') diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index c92a4f81f..2294979ba 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -5,6 +5,7 @@ import itertools import boto import boto3 +from botocore.exceptions import ClientError from boto.exception import EC2ResponseError from boto.ec2.instance import Reservation import sure # noqa @@ -451,3 +452,31 @@ def test_create_snapshot_with_tags(): }] assert snapshot['Tags'] == expected_tags + + +@mock_ec2 +def test_create_tag_empty_resource(): + # create ec2 client in us-west-1 + client = boto3.client('ec2', region_name='us-west-1') + # create tag with empty resource + with assert_raises(ClientError) as ex: + client.create_tags( + Resources=[], + Tags=[{'Key': 'Value'}] + ) + ex.exception.response['Error']['Code'].should.equal('MissingParameter') + ex.exception.response['Error']['Message'].should.equal('The request must contain the parameter resourceIdSet') + + +@mock_ec2 +def test_delete_tag_empty_resource(): + # create ec2 client in us-west-1 + client = boto3.client('ec2', region_name='us-west-1') + # delete tag with empty resource + with assert_raises(ClientError) as ex: + client.delete_tags( + Resources=[], + Tags=[{'Key': 'Value'}] + ) + ex.exception.response['Error']['Code'].should.equal('MissingParameter') + ex.exception.response['Error']['Message'].should.equal('The request must contain the parameter resourceIdSet') diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 1f98791b3..082499a72 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -89,7 +89,8 @@ def test_vpc_peering_connections_delete(): verdict.should.equal(True) all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(0) + all_vpc_pcxs.should.have.length_of(1) + all_vpc_pcxs[0]._status.code.should.equal('deleted') with assert_raises(EC2ResponseError) as cm: conn.delete_vpc_peering_connection("pcx-1234abcd") diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index a0e8318da..b147c4159 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -47,6 +47,15 @@ def test_list_clusters(): 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') +@mock_ecs +def test_describe_clusters(): + client = boto3.client('ecs', region_name='us-east-1') + response = client.describe_clusters(clusters=["some-cluster"]) + response['failures'].should.contain({ + 'arn': 'arn:aws:ecs:us-east-1:012345678910:cluster/some-cluster', + 'reason': 'MISSING' + }) + @mock_ecs def test_delete_cluster(): client = boto3.client('ecs', region_name='us-east-1') @@ -379,23 +388,32 @@ def test_list_services(): cluster='test_ecs_cluster', serviceName='test_ecs_service1', taskDefinition='test_ecs_task', + schedulingStrategy='REPLICA', desiredCount=2 ) _ = client.create_service( cluster='test_ecs_cluster', serviceName='test_ecs_service2', taskDefinition='test_ecs_task', + schedulingStrategy='DAEMON', desiredCount=2 ) - response = client.list_services( + unfiltered_response = client.list_services( cluster='test_ecs_cluster' ) - len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal( + len(unfiltered_response['serviceArns']).should.equal(2) + unfiltered_response['serviceArns'][0].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal( + unfiltered_response['serviceArns'][1].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + filtered_response = client.list_services( + cluster='test_ecs_cluster', + schedulingStrategy='REPLICA' + ) + len(filtered_response['serviceArns']).should.equal(1) + filtered_response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') @mock_ecs def test_describe_services(): @@ -925,6 +943,65 @@ def test_update_container_instances_state(): status='test_status').should.throw(Exception) +@mock_ec2 +@mock_ecs +def test_update_container_instances_state_by_arn(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_arns, + status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_arns, + status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_arns, + status='ACTIVE') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('ACTIVE') + ecs_client.update_container_instances_state.when.called_with(cluster=test_cluster_name, + containerInstances=test_instance_arns, + status='test_status').should.throw(Exception) + + @mock_ec2 @mock_ecs def test_run_task(): diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 80630c5b8..a9d90ec32 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -1,5 +1,4 @@ import random - import boto3 import json @@ -7,7 +6,6 @@ from moto.events import mock_events from botocore.exceptions import ClientError from nose.tools import assert_raises - RULES = [ {'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'}, {'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'}, @@ -109,6 +107,13 @@ def test_enable_disable_rule(): rule = client.describe_rule(Name=rule_name) assert(rule['State'] == 'ENABLED') + # Test invalid name + try: + client.enable_rule(Name='junk') + + except ClientError as ce: + assert ce.response['Error']['Code'] == 'ResourceNotFoundException' + @mock_events def test_list_rule_names_by_target(): diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index bc23ff712..1cd6f9e62 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -3,7 +3,9 @@ import base64 import boto import boto3 +import os import sure # noqa +import sys from boto.exception import BotoServerError from botocore.exceptions import ClientError from moto import mock_iam, mock_iam_deprecated @@ -11,9 +13,23 @@ from moto.iam.models import aws_managed_policies from nose.tools import assert_raises, assert_equals from nose.tools import raises +from datetime import datetime from tests.helpers import requires_boto_gte +MOCK_CERT = """-----BEGIN CERTIFICATE----- +MIIBpzCCARACCQCY5yOdxCTrGjANBgkqhkiG9w0BAQsFADAXMRUwEwYDVQQKDAxt +b3RvIHRlc3RpbmcwIBcNMTgxMTA1MTkwNTIwWhgPMjI5MjA4MTkxOTA1MjBaMBcx +FTATBgNVBAoMDG1vdG8gdGVzdGluZzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC +gYEA1Jn3g2h7LD3FLqdpcYNbFXCS4V4eDpuTCje9vKFcC3pi/01147X3zdfPy8Mt +ZhKxcREOwm4NXykh23P9KW7fBovpNwnbYsbPqj8Hf1ZaClrgku1arTVhEnKjx8zO +vaR/bVLCss4uE0E0VM1tJn/QGQsfthFsjuHtwx8uIWz35tUCAwEAATANBgkqhkiG +9w0BAQsFAAOBgQBWdOQ7bDc2nWkUhFjZoNIZrqjyNdjlMUndpwREVD7FQ/DuxJMj +FyDHrtlrS80dPUQWNYHw++oACDpWO01LGLPPrGmuO/7cOdojPEd852q5gd+7W9xt +8vUH+pBa6IBLbvBp+szli51V3TLSWcoyy4ceJNQU2vCkTLoFdS0RLd/7tQ== +-----END CERTIFICATE-----""" + + @mock_iam_deprecated() def test_get_all_server_certs(): conn = boto.connect_iam() @@ -108,6 +124,10 @@ def test_create_role_and_instance_profile(): conn.list_roles().roles[0].role_name.should.equal('my-role') + # Test with an empty path: + profile = conn.create_instance_profile('my-other-profile') + profile.path.should.equal('/') + @mock_iam_deprecated() def test_remove_role_from_instance_profile(): @@ -283,8 +303,18 @@ def test_create_policy_versions(): PolicyDocument='{"some":"policy"}') version = conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument='{"some":"policy"}', + SetAsDefault=True) version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + version.get('PolicyVersion').get('VersionId').should.equal("v2") + conn.delete_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + VersionId="v1") + version = conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + PolicyDocument='{"some":"policy"}') + version.get('PolicyVersion').get('VersionId').should.equal("v3") + @mock_iam def test_get_policy(): @@ -380,6 +410,19 @@ def test_get_user(): conn.get_user('my-user') +@mock_iam() +def test_update_user(): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(conn.exceptions.NoSuchEntityException): + conn.update_user(UserName='my-user') + conn.create_user(UserName='my-user') + conn.update_user(UserName='my-user', NewPath='/new-path/', NewUserName='new-user') + response = conn.get_user(UserName='new-user') + response['User'].get('Path').should.equal('/new-path/') + with assert_raises(conn.exceptions.NoSuchEntityException): + conn.get_user(UserName='my-user') + + @mock_iam_deprecated() def test_get_current_user(): """If no user is specific, IAM returns the current user""" @@ -536,6 +579,14 @@ def test_generate_credential_report(): result['generate_credential_report_response'][ 'generate_credential_report_result']['state'].should.equal('COMPLETE') +@mock_iam +def test_boto3_generate_credential_report(): + conn = boto3.client('iam', region_name='us-east-1') + result = conn.generate_credential_report() + result['State'].should.equal('STARTED') + result = conn.generate_credential_report() + result['State'].should.equal('COMPLETE') + @mock_iam_deprecated() def test_get_credential_report(): @@ -552,6 +603,20 @@ def test_get_credential_report(): report.should.match(r'.*my-user.*') +@mock_iam +def test_boto3_get_credential_report(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + with assert_raises(ClientError): + conn.get_credential_report() + result = conn.generate_credential_report() + while result['State'] != 'COMPLETE': + result = conn.generate_credential_report() + result = conn.get_credential_report() + report = result['Content'].decode('utf-8') + report.should.match(r'.*my-user.*') + + @requires_boto_gte('2.39') @mock_iam_deprecated() def test_managed_policy(): @@ -695,66 +760,536 @@ def test_update_access_key(): resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') +@mock_iam +def test_get_access_key_last_used(): + iam = boto3.resource('iam', region_name='us-east-1') + client = iam.meta.client + username = 'test-user' + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.get_access_key_last_used(AccessKeyId='non-existent-key-id') + create_key_response = client.create_access_key(UserName=username)['AccessKey'] + resp = client.get_access_key_last_used(AccessKeyId=create_key_response['AccessKeyId']) + + datetime.strftime(resp["AccessKeyLastUsed"]["LastUsedDate"], "%Y-%m-%d").should.equal(datetime.strftime( + datetime.utcnow(), + "%Y-%m-%d" + )) + resp["UserName"].should.equal(create_key_response["UserName"]) + + @mock_iam def test_get_account_authorization_details(): import json + test_policy = json.dumps({ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "s3:ListBucket", + "Resource": "*", + "Effect": "Allow", + } + ] + }) + conn = boto3.client('iam', region_name='us-east-1') conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") - conn.create_user(Path='/', UserName='testCloudAuxUser') - conn.create_group(Path='/', GroupName='testCloudAuxGroup') + conn.create_user(Path='/', UserName='testUser') + conn.create_group(Path='/', GroupName='testGroup') conn.create_policy( - PolicyName='testCloudAuxPolicy', + PolicyName='testPolicy', Path='/', - PolicyDocument=json.dumps({ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "s3:ListBucket", - "Resource": "*", - "Effect": "Allow", - } - ] - }), - Description='Test CloudAux Policy' + PolicyDocument=test_policy, + Description='Test Policy' ) + # Attach things to the user and group: + conn.put_user_policy(UserName='testUser', PolicyName='testPolicy', PolicyDocument=test_policy) + conn.put_group_policy(GroupName='testGroup', PolicyName='testPolicy', PolicyDocument=test_policy) + + conn.attach_user_policy(UserName='testUser', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + conn.attach_group_policy(GroupName='testGroup', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + + conn.add_user_to_group(UserName='testUser', GroupName='testGroup') + + # Add things to the role: + conn.create_instance_profile(InstanceProfileName='ipn') + conn.add_role_to_instance_profile(InstanceProfileName='ipn', RoleName='my-role') + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somevalue' + }, + { + 'Key': 'someotherkey', + 'Value': 'someothervalue' + } + ]) + conn.put_role_policy(RoleName='my-role', PolicyName='test-policy', PolicyDocument=test_policy) + conn.attach_role_policy(RoleName='my-role', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + result = conn.get_account_authorization_details(Filter=['Role']) - len(result['RoleDetailList']) == 1 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 0 + assert len(result['RoleDetailList']) == 1 + assert len(result['UserDetailList']) == 0 + assert len(result['GroupDetailList']) == 0 + assert len(result['Policies']) == 0 + assert len(result['RoleDetailList'][0]['InstanceProfileList']) == 1 + assert len(result['RoleDetailList'][0]['Tags']) == 2 + assert len(result['RoleDetailList'][0]['RolePolicyList']) == 1 + assert len(result['RoleDetailList'][0]['AttachedManagedPolicies']) == 1 + assert result['RoleDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' + assert result['RoleDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ + 'arn:aws:iam::123456789012:policy/testPolicy' result = conn.get_account_authorization_details(Filter=['User']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 1 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 0 + assert len(result['RoleDetailList']) == 0 + assert len(result['UserDetailList']) == 1 + assert len(result['UserDetailList'][0]['GroupList']) == 1 + assert len(result['UserDetailList'][0]['AttachedManagedPolicies']) == 1 + assert len(result['GroupDetailList']) == 0 + assert len(result['Policies']) == 0 + assert result['UserDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' + assert result['UserDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ + 'arn:aws:iam::123456789012:policy/testPolicy' result = conn.get_account_authorization_details(Filter=['Group']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 1 - len(result['Policies']) == 0 + assert len(result['RoleDetailList']) == 0 + assert len(result['UserDetailList']) == 0 + assert len(result['GroupDetailList']) == 1 + assert len(result['GroupDetailList'][0]['GroupPolicyList']) == 1 + assert len(result['GroupDetailList'][0]['AttachedManagedPolicies']) == 1 + assert len(result['Policies']) == 0 + assert result['GroupDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' + assert result['GroupDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ + 'arn:aws:iam::123456789012:policy/testPolicy' result = conn.get_account_authorization_details(Filter=['LocalManagedPolicy']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 1 + assert len(result['RoleDetailList']) == 0 + assert len(result['UserDetailList']) == 0 + assert len(result['GroupDetailList']) == 0 + assert len(result['Policies']) == 1 + assert len(result['Policies'][0]['PolicyVersionList']) == 1 # Check for greater than 1 since this should always be greater than one but might change. # See iam/aws_managed_policies.py result = conn.get_account_authorization_details(Filter=['AWSManagedPolicy']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) > 1 + assert len(result['RoleDetailList']) == 0 + assert len(result['UserDetailList']) == 0 + assert len(result['GroupDetailList']) == 0 + assert len(result['Policies']) > 1 result = conn.get_account_authorization_details() - len(result['RoleDetailList']) == 1 - len(result['UserDetailList']) == 1 - len(result['GroupDetailList']) == 1 - len(result['Policies']) > 1 + assert len(result['RoleDetailList']) == 1 + assert len(result['UserDetailList']) == 1 + assert len(result['GroupDetailList']) == 1 + assert len(result['Policies']) > 1 +@mock_iam +def test_signing_certs(): + client = boto3.client('iam', region_name='us-east-1') + + # Create the IAM user first: + client.create_user(UserName='testing') + + # Upload the cert: + resp = client.upload_signing_certificate(UserName='testing', CertificateBody=MOCK_CERT)['Certificate'] + cert_id = resp['CertificateId'] + + assert resp['UserName'] == 'testing' + assert resp['Status'] == 'Active' + assert resp['CertificateBody'] == MOCK_CERT + assert resp['CertificateId'] + + # Upload a the cert with an invalid body: + with assert_raises(ClientError) as ce: + client.upload_signing_certificate(UserName='testing', CertificateBody='notacert') + assert ce.exception.response['Error']['Code'] == 'MalformedCertificate' + + # Upload with an invalid user: + with assert_raises(ClientError): + client.upload_signing_certificate(UserName='notauser', CertificateBody=MOCK_CERT) + + # Update: + client.update_signing_certificate(UserName='testing', CertificateId=cert_id, Status='Inactive') + + with assert_raises(ClientError): + client.update_signing_certificate(UserName='notauser', CertificateId=cert_id, Status='Inactive') + + with assert_raises(ClientError) as ce: + client.update_signing_certificate(UserName='testing', CertificateId='x' * 32, Status='Inactive') + + assert ce.exception.response['Error']['Message'] == 'The Certificate with id {id} cannot be found.'.format( + id='x' * 32) + + # List the certs: + resp = client.list_signing_certificates(UserName='testing')['Certificates'] + assert len(resp) == 1 + assert resp[0]['CertificateBody'] == MOCK_CERT + assert resp[0]['Status'] == 'Inactive' # Changed with the update call above. + + with assert_raises(ClientError): + client.list_signing_certificates(UserName='notauser') + + # Delete: + client.delete_signing_certificate(UserName='testing', CertificateId=cert_id) + + with assert_raises(ClientError): + client.delete_signing_certificate(UserName='notauser', CertificateId=cert_id) + + +@mock_iam() +def test_create_saml_provider(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_saml_provider( + Name="TestSAMLProvider", + SAMLMetadataDocument='a' * 1024 + ) + response['SAMLProviderArn'].should.equal("arn:aws:iam::123456789012:saml-provider/TestSAMLProvider") + + +@mock_iam() +def test_get_saml_provider(): + conn = boto3.client('iam', region_name='us-east-1') + saml_provider_create = conn.create_saml_provider( + Name="TestSAMLProvider", + SAMLMetadataDocument='a' * 1024 + ) + response = conn.get_saml_provider( + SAMLProviderArn=saml_provider_create['SAMLProviderArn'] + ) + response['SAMLMetadataDocument'].should.equal('a' * 1024) + + +@mock_iam() +def test_list_saml_providers(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_saml_provider( + Name="TestSAMLProvider", + SAMLMetadataDocument='a' * 1024 + ) + response = conn.list_saml_providers() + response['SAMLProviderList'][0]['Arn'].should.equal("arn:aws:iam::123456789012:saml-provider/TestSAMLProvider") + + +@mock_iam() +def test_delete_saml_provider(): + conn = boto3.client('iam', region_name='us-east-1') + saml_provider_create = conn.create_saml_provider( + Name="TestSAMLProvider", + SAMLMetadataDocument='a' * 1024 + ) + response = conn.list_saml_providers() + len(response['SAMLProviderList']).should.equal(1) + conn.delete_saml_provider( + SAMLProviderArn=saml_provider_create['SAMLProviderArn'] + ) + response = conn.list_saml_providers() + len(response['SAMLProviderList']).should.equal(0) + conn.create_user(UserName='testing') + + cert_id = '123456789012345678901234' + with assert_raises(ClientError) as ce: + conn.delete_signing_certificate(UserName='testing', CertificateId=cert_id) + + assert ce.exception.response['Error']['Message'] == 'The Certificate with id {id} cannot be found.'.format( + id=cert_id) + + # Verify that it's not in the list: + resp = conn.list_signing_certificates(UserName='testing') + assert not resp['Certificates'] + + +@mock_iam() +def test_tag_role(): + """Tests both the tag_role and get_role_tags capability""" + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="{}") + + # Get without tags: + role = conn.get_role(RoleName='my-role')['Role'] + assert not role.get('Tags') + + # With proper tag values: + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somevalue' + }, + { + 'Key': 'someotherkey', + 'Value': 'someothervalue' + } + ]) + + # Get role: + role = conn.get_role(RoleName='my-role')['Role'] + assert len(role['Tags']) == 2 + assert role['Tags'][0]['Key'] == 'somekey' + assert role['Tags'][0]['Value'] == 'somevalue' + assert role['Tags'][1]['Key'] == 'someotherkey' + assert role['Tags'][1]['Value'] == 'someothervalue' + + # Same -- but for list_role_tags: + tags = conn.list_role_tags(RoleName='my-role') + assert len(tags['Tags']) == 2 + assert role['Tags'][0]['Key'] == 'somekey' + assert role['Tags'][0]['Value'] == 'somevalue' + assert role['Tags'][1]['Key'] == 'someotherkey' + assert role['Tags'][1]['Value'] == 'someothervalue' + assert not tags['IsTruncated'] + assert not tags.get('Marker') + + # Test pagination: + tags = conn.list_role_tags(RoleName='my-role', MaxItems=1) + assert len(tags['Tags']) == 1 + assert tags['IsTruncated'] + assert tags['Tags'][0]['Key'] == 'somekey' + assert tags['Tags'][0]['Value'] == 'somevalue' + assert tags['Marker'] == '1' + + tags = conn.list_role_tags(RoleName='my-role', Marker=tags['Marker']) + assert len(tags['Tags']) == 1 + assert tags['Tags'][0]['Key'] == 'someotherkey' + assert tags['Tags'][0]['Value'] == 'someothervalue' + assert not tags['IsTruncated'] + assert not tags.get('Marker') + + # Test updating an existing tag: + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somenewvalue' + } + ]) + tags = conn.list_role_tags(RoleName='my-role') + assert len(tags['Tags']) == 2 + assert tags['Tags'][0]['Key'] == 'somekey' + assert tags['Tags'][0]['Value'] == 'somenewvalue' + + # Empty is good: + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': '' + } + ]) + tags = conn.list_role_tags(RoleName='my-role') + assert len(tags['Tags']) == 2 + assert tags['Tags'][0]['Key'] == 'somekey' + assert tags['Tags'][0]['Value'] == '' + + # Test creating tags with invalid values: + # With more than 50 tags: + with assert_raises(ClientError) as ce: + too_many_tags = list(map(lambda x: {'Key': str(x), 'Value': str(x)}, range(0, 51))) + conn.tag_role(RoleName='my-role', Tags=too_many_tags) + assert 'failed to satisfy constraint: Member must have length less than or equal to 50.' \ + in ce.exception.response['Error']['Message'] + + # With a duplicate tag: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': '0', 'Value': ''}, {'Key': '0', 'Value': ''}]) + assert 'Duplicate tag keys found. Please note that Tag keys are case insensitive.' \ + in ce.exception.response['Error']['Message'] + + # Duplicate tag with different casing: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': 'a', 'Value': ''}, {'Key': 'A', 'Value': ''}]) + assert 'Duplicate tag keys found. Please note that Tag keys are case insensitive.' \ + in ce.exception.response['Error']['Message'] + + # With a really big key: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': '0' * 129, 'Value': ''}]) + assert 'Member must have length less than or equal to 128.' in ce.exception.response['Error']['Message'] + + # With a really big value: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': '0', 'Value': '0' * 257}]) + assert 'Member must have length less than or equal to 256.' in ce.exception.response['Error']['Message'] + + # With an invalid character: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': 'NOWAY!', 'Value': ''}]) + assert 'Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+' \ + in ce.exception.response['Error']['Message'] + + # With a role that doesn't exist: + with assert_raises(ClientError): + conn.tag_role(RoleName='notarole', Tags=[{'Key': 'some', 'Value': 'value'}]) + + +@mock_iam +def test_untag_role(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="{}") + + # With proper tag values: + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somevalue' + }, + { + 'Key': 'someotherkey', + 'Value': 'someothervalue' + } + ]) + + # Remove them: + conn.untag_role(RoleName='my-role', TagKeys=['somekey']) + tags = conn.list_role_tags(RoleName='my-role') + assert len(tags['Tags']) == 1 + assert tags['Tags'][0]['Key'] == 'someotherkey' + assert tags['Tags'][0]['Value'] == 'someothervalue' + + # And again: + conn.untag_role(RoleName='my-role', TagKeys=['someotherkey']) + tags = conn.list_role_tags(RoleName='my-role') + assert not tags['Tags'] + + # Test removing tags with invalid values: + # With more than 50 tags: + with assert_raises(ClientError) as ce: + conn.untag_role(RoleName='my-role', TagKeys=[str(x) for x in range(0, 51)]) + assert 'failed to satisfy constraint: Member must have length less than or equal to 50.' \ + in ce.exception.response['Error']['Message'] + assert 'tagKeys' in ce.exception.response['Error']['Message'] + + # With a really big key: + with assert_raises(ClientError) as ce: + conn.untag_role(RoleName='my-role', TagKeys=['0' * 129]) + assert 'Member must have length less than or equal to 128.' in ce.exception.response['Error']['Message'] + assert 'tagKeys' in ce.exception.response['Error']['Message'] + + # With an invalid character: + with assert_raises(ClientError) as ce: + conn.untag_role(RoleName='my-role', TagKeys=['NOWAY!']) + assert 'Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+' \ + in ce.exception.response['Error']['Message'] + assert 'tagKeys' in ce.exception.response['Error']['Message'] + + # With a role that doesn't exist: + with assert_raises(ClientError): + conn.untag_role(RoleName='notarole', TagKeys=['somevalue']) + + +@mock_iam() +def test_update_role_description(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.delete_role(RoleName="my-role") + + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + response = conn.update_role_description(RoleName="my-role", Description="test") + + assert response['Role']['RoleName'] == 'my-role' + +@mock_iam() +def test_update_role(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.delete_role(RoleName="my-role") + + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + response = conn.update_role_description(RoleName="my-role", Description="test") + assert response['Role']['RoleName'] == 'my-role' + +@mock_iam() +def test_update_role(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.delete_role(RoleName="my-role") + + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + response = conn.update_role(RoleName="my-role", Description="test") + assert len(response.keys()) == 1 + + +@mock_iam() +def test_list_entities_for_policy(): + import json + test_policy = json.dumps({ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "s3:ListBucket", + "Resource": "*", + "Effect": "Allow", + } + ] + }) + + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + conn.create_user(Path='/', UserName='testUser') + conn.create_group(Path='/', GroupName='testGroup') + conn.create_policy( + PolicyName='testPolicy', + Path='/', + PolicyDocument=test_policy, + Description='Test Policy' + ) + + # Attach things to the user and group: + conn.put_user_policy(UserName='testUser', PolicyName='testPolicy', PolicyDocument=test_policy) + conn.put_group_policy(GroupName='testGroup', PolicyName='testPolicy', PolicyDocument=test_policy) + + conn.attach_user_policy(UserName='testUser', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + conn.attach_group_policy(GroupName='testGroup', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + + conn.add_user_to_group(UserName='testUser', GroupName='testGroup') + + # Add things to the role: + conn.create_instance_profile(InstanceProfileName='ipn') + conn.add_role_to_instance_profile(InstanceProfileName='ipn', RoleName='my-role') + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somevalue' + }, + { + 'Key': 'someotherkey', + 'Value': 'someothervalue' + } + ]) + conn.put_role_policy(RoleName='my-role', PolicyName='test-policy', PolicyDocument=test_policy) + conn.attach_role_policy(RoleName='my-role', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + + response = conn.list_entities_for_policy( + PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', + EntityFilter='Role' + ) + assert response['PolicyRoles'] == [{'RoleName': 'my-role'}] + + response = conn.list_entities_for_policy( + PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', + EntityFilter='User', + ) + assert response['PolicyUsers'] == [{'UserName': 'testUser'}] + + response = conn.list_entities_for_policy( + PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', + EntityFilter='Group', + ) + assert response['PolicyGroups'] == [{'GroupName': 'testGroup'}] + + response = conn.list_entities_for_policy( + PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', + EntityFilter='LocalManagedPolicy', + ) + assert response['PolicyGroups'] == [{'GroupName': 'testGroup'}] + assert response['PolicyUsers'] == [{'UserName': 'testUser'}] + assert response['PolicyRoles'] == [{'RoleName': 'my-role'}] + + +@mock_iam() +def test_create_role_no_path(): + conn = boto3.client('iam', region_name='us-east-1') + resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test') + resp.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-role') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 5c6effd7a..826d2c56b 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -5,6 +5,8 @@ import sure # noqa import boto3 from moto import mock_iot +from botocore.exceptions import ClientError +from nose.tools import assert_raises @mock_iot @@ -261,6 +263,96 @@ def test_certs(): res.should.have.key('certificates').which.should.have.length_of(0) +@mock_iot +def test_delete_policy_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_policy(policyName=policy_name) + e.exception.response['Error']['Message'].should.contain( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' % policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_policy(policyName=policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_delete_certificate_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_id = cert['certificateId'] + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + thing_name = 'thing-1' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + client.create_thing(thingName=thing_name) + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate must be deactivated (not ACTIVE) before deletion.') + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Things must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate policies must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + + @mock_iot def test_certs_create_inactive(): client = boto3.client('iot', region_name='ap-northeast-1') @@ -309,6 +401,47 @@ def test_policy(): @mock_iot def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + # do nothing if policy have already attached to certificate + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + with assert_raises(ClientError) as e: + client.detach_policy(policyName=policy_name, target=cert_arn) + e.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + + +@mock_iot +def test_principal_policy_deprecated(): client = boto3.client('iot', region_name='ap-northeast-1') policy_name = 'my-policy' doc = '{}' diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 8bccae27a..e7ce9f74b 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -1,16 +1,18 @@ from __future__ import unicode_literals import os, re - import boto3 import boto.kms +import botocore.exceptions from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException + +from moto.kms.exceptions import NotFoundException as MotoNotFoundException import sure # noqa from moto import mock_kms, mock_kms_deprecated from nose.tools import assert_raises from freezegun import freeze_time -from datetime import datetime, timedelta -from dateutil.tz import tzlocal +from datetime import datetime +from dateutil.tz import tzutc @mock_kms_deprecated @@ -128,7 +130,7 @@ def test_enable_key_rotation_via_arn(): def test_enable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.enable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated @@ -143,7 +145,7 @@ def test_enable_key_rotation_with_alias_name_should_fail(): alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) conn.enable_key_rotation.when.called_with( - 'alias/my-alias').should.throw(JSONResponseError) + 'alias/my-alias').should.throw(NotFoundException) @mock_kms_deprecated @@ -172,6 +174,7 @@ def test_encrypt(): conn = boto.kms.connect_to_region("us-west-2") response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') + response['KeyId'].should.equal('key_id') @mock_kms_deprecated @@ -185,14 +188,14 @@ def test_decrypt(): def test_disable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.disable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated def test_get_key_rotation_status_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.get_key_rotation_status.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated @@ -278,7 +281,7 @@ def test_put_key_policy_via_alias_should_not_update(): target_key_id=key['KeyMetadata']['KeyId']) conn.put_key_policy.when.called_with( - 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) + 'alias/my-key-alias', 'default', 'new policy').should.throw(NotFoundException) policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') policy['Policy'].should.equal('my policy') @@ -598,9 +601,9 @@ def test__assert_valid_key_id(): import uuid _assert_valid_key_id.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(MotoNotFoundException) _assert_valid_key_id.when.called_with( - str(uuid.uuid4())).should_not.throw(JSONResponseError) + str(uuid.uuid4())).should_not.throw(MotoNotFoundException) @mock_kms_deprecated @@ -608,9 +611,9 @@ def test__assert_default_policy(): from moto.kms.responses import _assert_default_policy _assert_default_policy.when.called_with( - "not-default").should.throw(JSONResponseError) + "not-default").should.throw(MotoNotFoundException) _assert_default_policy.when.called_with( - "default").should_not.throw(JSONResponseError) + "default").should_not.throw(MotoNotFoundException) @mock_kms @@ -661,7 +664,7 @@ def test_schedule_key_deletion(): KeyId=key['KeyMetadata']['KeyId'] ) assert response['KeyId'] == key['KeyMetadata']['KeyId'] - assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzlocal()) + assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzutc()) else: # Can't manipulate time in server mode response = client.schedule_key_deletion( @@ -686,7 +689,7 @@ def test_schedule_key_deletion_custom(): PendingWindowInDays=7 ) assert response['KeyId'] == key['KeyMetadata']['KeyId'] - assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzlocal()) + assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzutc()) else: # Can't manipulate time in server mode response = client.schedule_key_deletion( @@ -717,3 +720,265 @@ def test_cancel_key_deletion(): assert result["KeyMetadata"]["Enabled"] == False assert result["KeyMetadata"]["KeyState"] == 'Disabled' assert 'DeletionDate' not in result["KeyMetadata"] + + +@mock_kms +def test_update_key_description(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='old_description') + key_id = key['KeyMetadata']['KeyId'] + + result = client.update_key_description(KeyId=key_id, Description='new_description') + assert 'ResponseMetadata' in result + + +@mock_kms +def test_tag_resource(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + + keyid = response['KeyId'] + response = client.tag_resource( + KeyId=keyid, + Tags=[ + { + 'TagKey': 'string', + 'TagValue': 'string' + }, + ] + ) + + # Shouldn't have any data, just header + assert len(response.keys()) == 1 + + +@mock_kms +def test_list_resource_tags(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + + keyid = response['KeyId'] + response = client.tag_resource( + KeyId=keyid, + Tags=[ + { + 'TagKey': 'string', + 'TagValue': 'string' + }, + ] + ) + + response = client.list_resource_tags(KeyId=keyid) + assert response['Tags'][0]['TagKey'] == 'string' + assert response['Tags'][0]['TagValue'] == 'string' + + +@mock_kms +def test_generate_data_key_sizes(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + resp1 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + resp2 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_128' + ) + resp3 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + NumberOfBytes=64 + ) + + assert len(resp1['Plaintext']) == 32 + assert len(resp2['Plaintext']) == 16 + assert len(resp3['Plaintext']) == 64 + + +@mock_kms +def test_generate_data_key_decrypt(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-decrypt') + + resp1 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + resp2 = client.decrypt( + CiphertextBlob=resp1['CiphertextBlob'] + ) + + assert resp1['Plaintext'] == resp2['Plaintext'] + + +@mock_kms +def test_generate_data_key_invalid_size_params(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_257' + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_128', + NumberOfBytes=16 + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + NumberOfBytes=2048 + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + +@mock_kms +def test_generate_data_key_invalid_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + with assert_raises(client.exceptions.NotFoundException): + client.generate_data_key( + KeyId='alias/randomnonexistantkey', + KeySpec='AES_256' + ) + + with assert_raises(client.exceptions.NotFoundException): + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'] + '4', + KeySpec='AES_256' + ) + + +@mock_kms +def test_generate_data_key_without_plaintext_decrypt(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-decrypt') + + resp1 = client.generate_data_key_without_plaintext( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + + assert 'Plaintext' not in resp1 + + +@mock_kms +def test_enable_key_rotation_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.enable_key_rotation( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_disable_key_rotation_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.disable_key_rotation( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_enable_key_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.enable_key( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_disable_key_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.disable_key( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_cancel_key_deletion_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.cancel_key_deletion( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_schedule_key_deletion_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.schedule_key_deletion( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_get_key_rotation_status_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.get_key_rotation_status( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_get_key_policy_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.get_key_policy( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02', + PolicyName='default' + ) + + +@mock_kms +def test_list_key_policies_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.list_key_policies( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_put_key_policy_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.put_key_policy( + KeyId='00000000-0000-0000-0000-000000000000', + PolicyName='default', + Policy='new policy' + ) + + diff --git a/tests/test_packages/__init__.py b/tests/test_packages/__init__.py new file mode 100644 index 000000000..bf582e0b3 --- /dev/null +++ b/tests/test_packages/__init__.py @@ -0,0 +1,8 @@ +from __future__ import unicode_literals + +import logging +# Disable extra logging for tests +logging.getLogger('boto').setLevel(logging.CRITICAL) +logging.getLogger('boto3').setLevel(logging.CRITICAL) +logging.getLogger('botocore').setLevel(logging.CRITICAL) +logging.getLogger('nose').setLevel(logging.CRITICAL) diff --git a/tests/test_packages/test_httpretty.py b/tests/test_packages/test_httpretty.py new file mode 100644 index 000000000..48277a2de --- /dev/null +++ b/tests/test_packages/test_httpretty.py @@ -0,0 +1,37 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import mock + +from moto.packages.httpretty.core import HTTPrettyRequest, fake_gethostname, fake_gethostbyname + + +def test_parse_querystring(): + + core = HTTPrettyRequest(headers='test test HTTP/1.1') + + qs = 'test test' + response = core.parse_querystring(qs) + + assert response == {} + +def test_parse_request_body(): + core = HTTPrettyRequest(headers='test test HTTP/1.1') + + qs = 'test' + response = core.parse_request_body(qs) + + assert response == 'test' + +def test_fake_gethostname(): + + response = fake_gethostname() + + assert response == 'localhost' + +def test_fake_gethostbyname(): + + host = 'test' + response = fake_gethostbyname(host=host) + + assert response == '127.0.0.1' \ No newline at end of file diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 76217b9d9..d730f8dcf 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -98,6 +98,16 @@ def test_rrset(): rrsets.should.have.length_of(1) rrsets[0].resource_records[0].should.equal('5.6.7.8') + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "TXT") + change.add_value("foo") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid) + rrsets.should.have.length_of(2) + rrsets[0].resource_records[0].should.equal('5.6.7.8') + rrsets[1].resource_records[0].should.equal('foo') + changes = ResourceRecordSets(conn, zoneid) changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") changes.commit() @@ -164,8 +174,8 @@ def test_alias_rrset(): rrsets = conn.get_all_rrsets(zoneid, type="A") rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records] rrset_records.should.have.length_of(2) - rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com')) - rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com')) + rrset_records.should.contain(('foo.alias.testdns.aws.com.', 'foo.testdns.aws.com')) + rrset_records.should.contain(('bar.alias.testdns.aws.com.', 'bar.testdns.aws.com')) rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') rrsets = conn.get_all_rrsets(zoneid, type="CNAME") rrsets.should.have.length_of(1) @@ -520,12 +530,12 @@ def test_change_resource_record_sets_crud_valid(): # Create A Record. a_record_endpoint_payload = { - 'Comment': 'create A record prod.redis.db', + 'Comment': 'Create A record prod.redis.db', 'Changes': [ { 'Action': 'CREATE', 'ResourceRecordSet': { - 'Name': 'prod.redis.db', + 'Name': 'prod.redis.db.', 'Type': 'A', 'TTL': 10, 'ResourceRecords': [{ @@ -540,20 +550,20 @@ def test_change_resource_record_sets_crud_valid(): response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) len(response['ResourceRecordSets']).should.equal(1) a_record_detail = response['ResourceRecordSets'][0] - a_record_detail['Name'].should.equal('prod.redis.db') + a_record_detail['Name'].should.equal('prod.redis.db.') a_record_detail['Type'].should.equal('A') a_record_detail['TTL'].should.equal(10) a_record_detail['ResourceRecords'].should.equal([{'Value': '127.0.0.1'}]) - # Update type to CNAME + # Update A Record. cname_record_endpoint_payload = { - 'Comment': 'Update to CNAME prod.redis.db', + 'Comment': 'Update A record prod.redis.db', 'Changes': [ { 'Action': 'UPSERT', 'ResourceRecordSet': { - 'Name': 'prod.redis.db', - 'Type': 'CNAME', + 'Name': 'prod.redis.db.', + 'Type': 'A', 'TTL': 60, 'ResourceRecords': [{ 'Value': '192.168.1.1' @@ -567,8 +577,8 @@ def test_change_resource_record_sets_crud_valid(): response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) len(response['ResourceRecordSets']).should.equal(1) cname_record_detail = response['ResourceRecordSets'][0] - cname_record_detail['Name'].should.equal('prod.redis.db') - cname_record_detail['Type'].should.equal('CNAME') + cname_record_detail['Name'].should.equal('prod.redis.db.') + cname_record_detail['Type'].should.equal('A') cname_record_detail['TTL'].should.equal(60) cname_record_detail['ResourceRecords'].should.equal([{'Value': '192.168.1.1'}]) @@ -688,12 +698,12 @@ def test_list_resource_record_sets_name_type_filters(): # record_type, record_name all_records = [ - ('A', 'a.a.db'), - ('A', 'a.b.db'), - ('A', 'b.b.db'), - ('CNAME', 'b.b.db'), - ('CNAME', 'b.c.db'), - ('CNAME', 'c.c.db') + ('A', 'a.a.db.'), + ('A', 'a.b.db.'), + ('A', 'b.b.db.'), + ('CNAME', 'b.b.db.'), + ('CNAME', 'b.c.db.'), + ('CNAME', 'c.c.db.') ] for record_type, record_name in all_records: create_resource_record_set(record_type, record_name) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 6e339abb6..6af23849c 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -8,6 +8,7 @@ from functools import wraps from gzip import GzipFile from io import BytesIO import zlib +import pickle import json import boto @@ -65,6 +66,50 @@ class MyModel(object): s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) +@mock_s3 +def test_keys_are_pickleable(): + """Keys must be pickleable due to boto3 implementation details.""" + key = s3model.FakeKey('name', b'data!') + assert key.value == b'data!' + + pickled = pickle.dumps(key) + loaded = pickle.loads(pickled) + assert loaded.value == key.value + + +@mock_s3 +def test_append_to_value__basic(): + key = s3model.FakeKey('name', b'data!') + assert key.value == b'data!' + assert key.size == 5 + + key.append_to_value(b' And even more data') + assert key.value == b'data! And even more data' + assert key.size == 24 + + +@mock_s3 +def test_append_to_value__nothing_added(): + key = s3model.FakeKey('name', b'data!') + assert key.value == b'data!' + assert key.size == 5 + + key.append_to_value(b'') + assert key.value == b'data!' + assert key.size == 5 + + +@mock_s3 +def test_append_to_value__empty_key(): + key = s3model.FakeKey('name', b'') + assert key.value == b'' + assert key.size == 0 + + key.append_to_value(b'stuff') + assert key.value == b'stuff' + assert key.size == 5 + + @mock_s3 def test_my_model_save(): # Create Bucket so that test can run @@ -373,6 +418,22 @@ def test_copy_key(): "new-key").get_contents_as_string().should.equal(b"some value") +@mock_s3_deprecated +def test_copy_key_with_unicode(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-unicode-💩-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-unicode-💩-key') + + bucket.get_key( + "the-unicode-💩-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") + + @mock_s3_deprecated def test_copy_key_with_version(): conn = boto.connect_s3('the_key', 'the_secret') @@ -383,7 +444,12 @@ def test_copy_key_with_version(): key.set_contents_from_string("some value") key.set_contents_from_string("another value") - bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0') + key = [ + key.version_id + for key in bucket.get_all_versions() + if not key.is_latest + ][0] + bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id=key) bucket.get_key( "the-key").get_contents_as_string().should.equal(b"another value") @@ -757,16 +823,19 @@ def test_key_version(): bucket = conn.create_bucket('foobar') bucket.configure_versioning(versioning=True) + versions = [] + key = Key(bucket) key.key = 'the-key' key.version_id.should.be.none key.set_contents_from_string('some string') - key.version_id.should.equal('0') + versions.append(key.version_id) key.set_contents_from_string('some string') - key.version_id.should.equal('1') + versions.append(key.version_id) + set(versions).should.have.length_of(2) key = bucket.get_key('the-key') - key.version_id.should.equal('1') + key.version_id.should.equal(versions[-1]) @mock_s3_deprecated @@ -775,23 +844,25 @@ def test_list_versions(): bucket = conn.create_bucket('foobar') bucket.configure_versioning(versioning=True) + key_versions = [] + key = Key(bucket, 'the-key') key.version_id.should.be.none key.set_contents_from_string("Version 1") - key.version_id.should.equal('0') + key_versions.append(key.version_id) key.set_contents_from_string("Version 2") - key.version_id.should.equal('1') + key_versions.append(key.version_id) + key_versions.should.have.length_of(2) versions = list(bucket.list_versions()) - versions.should.have.length_of(2) versions[0].name.should.equal('the-key') - versions[0].version_id.should.equal('0') + versions[0].version_id.should.equal(key_versions[0]) versions[0].get_contents_as_string().should.equal(b"Version 1") versions[1].name.should.equal('the-key') - versions[1].version_id.should.equal('1') + versions[1].version_id.should.equal(key_versions[1]) versions[1].get_contents_as_string().should.equal(b"Version 2") key = Key(bucket, 'the2-key') @@ -977,6 +1048,15 @@ def test_bucket_location(): bucket.get_location().should.equal("us-west-2") +@mock_s3 +def test_bucket_location_us_east_1(): + cli = boto3.client('s3') + bucket_name = 'mybucket' + # No LocationConstraint ==> us-east-1 + cli.create_bucket(Bucket=bucket_name) + cli.get_bucket_location(Bucket=bucket_name)['LocationConstraint'].should.equal(None) + + @mock_s3_deprecated def test_ranged_get(): conn = boto.connect_s3() @@ -1163,6 +1243,30 @@ def test_boto3_list_keys_xml_escaped(): assert 'Owner' not in resp['Contents'][0] +@mock_s3 +def test_boto3_list_objects_v2_common_prefix_pagination(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + + max_keys = 1 + keys = ['test/{i}/{i}'.format(i=i) for i in range(3)] + for key in keys: + s3.put_object(Bucket='mybucket', Key=key, Body=b'v') + + prefixes = [] + args = {"Bucket": 'mybucket', "Delimiter": "/", "Prefix": "test/", "MaxKeys": max_keys} + resp = {"IsTruncated": True} + while resp.get("IsTruncated", False): + if "NextContinuationToken" in resp: + args["ContinuationToken"] = resp["NextContinuationToken"] + resp = s3.list_objects_v2(**args) + if "CommonPrefixes" in resp: + assert len(resp["CommonPrefixes"]) == max_keys + prefixes.extend(i["Prefix"] for i in resp["CommonPrefixes"]) + + assert prefixes == [k[:k.rindex('/') + 1] for k in keys] + + @mock_s3 def test_boto3_list_objects_v2_truncated_response(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1300,6 +1404,16 @@ def test_bucket_create_duplicate(): exc.exception.response['Error']['Code'].should.equal('BucketAlreadyExists') +@mock_s3 +def test_bucket_create_force_us_east_1(): + s3 = boto3.resource('s3', region_name='us-east-1') + with assert_raises(ClientError) as exc: + s3.create_bucket(Bucket="blah", CreateBucketConfiguration={ + 'LocationConstraint': 'us-east-1', + }) + exc.exception.response['Error']['Code'].should.equal('InvalidLocationConstraint') + + @mock_s3 def test_boto3_bucket_create_eu_central(): s3 = boto3.resource('s3', region_name='eu-central-1') @@ -1379,16 +1493,22 @@ def test_boto3_head_object_with_versioning(): s3.Object('blah', 'hello.txt').put(Body=old_content) s3.Object('blah', 'hello.txt').put(Body=new_content) + versions = list(s3.Bucket('blah').object_versions.all()) + latest = list(filter(lambda item: item.is_latest, versions))[0] + oldest = list(filter(lambda item: not item.is_latest, versions))[0] + head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( Bucket='blah', Key='hello.txt') - head_object['VersionId'].should.equal('1') + head_object['VersionId'].should.equal(latest.id) head_object['ContentLength'].should.equal(len(new_content)) old_head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt', VersionId='0') - old_head_object['VersionId'].should.equal('0') + Bucket='blah', Key='hello.txt', VersionId=oldest.id) + old_head_object['VersionId'].should.equal(oldest.id) old_head_object['ContentLength'].should.equal(len(old_content)) + old_head_object['VersionId'].should_not.equal(head_object['VersionId']) + @mock_s3 def test_boto3_copy_object_with_versioning(): @@ -1403,9 +1523,6 @@ def test_boto3_copy_object_with_versioning(): obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] - # Versions should be the same - obj1_version.should.equal(obj2_version) - client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] @@ -1413,6 +1530,23 @@ def test_boto3_copy_object_with_versioning(): obj2_version_new.should_not.equal(obj2_version) +@mock_s3 +def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='src', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.create_bucket(Bucket='dest', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.put_bucket_versioning(Bucket='dest', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='src', Key='test', Body=b'content') + + obj2_version_new = client.copy_object(CopySource={'Bucket': 'src', 'Key': 'test'}, Bucket='dest', Key='test') \ + .get('VersionId') + + # VersionId should be present in the response + obj2_version_new.should_not.equal(None) + + @mock_s3 def test_boto3_deleted_versionings_list(): client = boto3.client('s3', region_name='us-east-1') @@ -1553,6 +1687,24 @@ def test_boto3_put_bucket_tagging(): }) resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + # With duplicate tag keys: + with assert_raises(ClientError) as err: + resp = s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagOne", + "Value": "ValueOneAgain" + } + ] + }) + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidTag") + e.response["Error"]["Message"].should.equal("Cannot provide multiple Tags with the same key") @mock_s3 def test_boto3_get_bucket_tagging(): @@ -2385,6 +2537,75 @@ def test_boto3_list_object_versions(): response['Body'].read().should.equal(items[-1]) +@mock_s3 +def test_boto3_list_object_versions_with_versioning_disabled(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + s3.create_bucket(Bucket=bucket_name) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + response = s3.list_object_versions( + Bucket=bucket_name + ) + + # One object version should be returned + len(response['Versions']).should.equal(1) + response['Versions'][0]['Key'].should.equal(key) + + # The version id should be the string null + response['Versions'][0]['VersionId'].should.equal('null') + + # Test latest object version is returned + response = s3.get_object(Bucket=bucket_name, Key=key) + response['Body'].read().should.equal(items[-1]) + + +@mock_s3 +def test_boto3_list_object_versions_with_versioning_enabled_late(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + s3.create_bucket(Bucket=bucket_name) + items = (six.b('v1'), six.b('v2')) + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=six.b('v1') + ) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=six.b('v2') + ) + response = s3.list_object_versions( + Bucket=bucket_name + ) + + # Two object versions should be returned + len(response['Versions']).should.equal(2) + keys = set([item['Key'] for item in response['Versions']]) + keys.should.equal({key}) + + # There should still be a null version id. + versionsId = set([item['VersionId'] for item in response['Versions']]) + versionsId.should.contain('null') + + # Test latest object version is returned + response = s3.get_object(Bucket=bucket_name, Key=key) + response['Body'].read().should.equal(items[-1]) + @mock_s3 def test_boto3_bad_prefix_list_object_versions(): s3 = boto3.client('s3', region_name='us-east-1') @@ -2441,18 +2662,25 @@ def test_boto3_delete_markers(): Bucket=bucket_name, Key=key ) - e.response['Error']['Code'].should.equal('404') + e.exception.response['Error']['Code'].should.equal('NoSuchKey') + + response = s3.list_object_versions( + Bucket=bucket_name + ) + response['Versions'].should.have.length_of(2) + response['DeleteMarkers'].should.have.length_of(1) s3.delete_object( Bucket=bucket_name, Key=key, - VersionId='2' + VersionId=response['DeleteMarkers'][0]['VersionId'] ) response = s3.get_object( Bucket=bucket_name, Key=key ) response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions( Bucket=bucket_name ) @@ -2461,10 +2689,8 @@ def test_boto3_delete_markers(): # We've asserted there is only 2 records so one is newest, one is oldest latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] - # Double check ordering of version ID's - latest['VersionId'].should.equal('1') - oldest['VersionId'].should.equal('0') + latest['VersionId'].should_not.equal(oldest['VersionId']) # Double check the name is still unicode latest['Key'].should.equal('key-with-versions-and-unicode-ó') @@ -2509,12 +2735,12 @@ def test_boto3_multiple_delete_markers(): s3.delete_object( Bucket=bucket_name, Key=key, - VersionId='2' + VersionId=response['DeleteMarkers'][0]['VersionId'] ) s3.delete_object( Bucket=bucket_name, Key=key, - VersionId='3' + VersionId=response['DeleteMarkers'][1]['VersionId'] ) response = s3.get_object( @@ -2530,8 +2756,7 @@ def test_boto3_multiple_delete_markers(): oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] # Double check ordering of version ID's - latest['VersionId'].should.equal('1') - oldest['VersionId'].should.equal('0') + latest['VersionId'].should_not.equal(oldest['VersionId']) # Double check the name is still unicode latest['Key'].should.equal('key-with-versions-and-unicode-ó') @@ -2581,3 +2806,17 @@ TEST_XML = """\ """ + +@mock_s3 +def test_boto3_bucket_name_too_long(): + s3 = boto3.client('s3', region_name='us-east-1') + with assert_raises(ClientError) as exc: + s3.create_bucket(Bucket='x'*64) + exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') + +@mock_s3 +def test_boto3_bucket_name_too_short(): + s3 = boto3.client('s3', region_name='us-east-1') + with assert_raises(ClientError) as exc: + s3.create_bucket(Bucket='x'*2) + exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 9c8252a04..b179a2329 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -15,6 +15,7 @@ class AuthenticatedClient(FlaskClient): def open(self, *args, **kwargs): kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers']['Authorization'] = "Any authorization header" + kwargs['content_length'] = 0 # Fixes content-length complaints. return super(AuthenticatedClient, self).open(*args, **kwargs) diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index 434110e87..f6238dd28 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -13,6 +13,7 @@ class AuthenticatedClient(FlaskClient): def open(self, *args, **kwargs): kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers']['Authorization'] = "Any authorization header" + kwargs['content_length'] = 0 # Fixes content-length complaints. return super(AuthenticatedClient, self).open(*args, **kwargs) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index ec384a660..81ce93cc3 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -6,6 +6,8 @@ from moto import mock_secretsmanager from botocore.exceptions import ClientError import sure # noqa import string +import pytz +from datetime import datetime import unittest from nose.tools import assert_raises @@ -34,17 +36,139 @@ def test_get_secret_that_does_not_match(): with assert_raises(ClientError): result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_get_secret_value_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='test-secret') + + @mock_secretsmanager def test_create_secret(): conn = boto3.client('secretsmanager', region_name='us-east-1') result = conn.create_secret(Name='test-secret', SecretString="foosecret") - assert result['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') + assert result['ARN'] assert result['Name'] == 'test-secret' secret = conn.get_secret_value(SecretId='test-secret') assert secret['SecretString'] == 'foosecret' +@mock_secretsmanager +def test_create_secret_with_tags(): + conn = boto3.client('secretsmanager', region_name='us-east-1') + secret_name = 'test-secret-with-tags' + + result = conn.create_secret( + Name=secret_name, + SecretString="foosecret", + Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}] + ) + assert result['ARN'] + assert result['Name'] == secret_name + secret_value = conn.get_secret_value(SecretId=secret_name) + assert secret_value['SecretString'] == 'foosecret' + secret_details = conn.describe_secret(SecretId=secret_name) + assert secret_details['Tags'] == [{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}] + + +@mock_secretsmanager +def test_delete_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + deleted_secret = conn.delete_secret(SecretId='test-secret') + + assert deleted_secret['ARN'] + assert deleted_secret['Name'] == 'test-secret' + assert deleted_secret['DeletionDate'] > datetime.fromtimestamp(1, pytz.utc) + + secret_details = conn.describe_secret(SecretId='test-secret') + + assert secret_details['ARN'] + assert secret_details['Name'] == 'test-secret' + assert secret_details['DeletedDate'] > datetime.fromtimestamp(1, pytz.utc) + + +@mock_secretsmanager +def test_delete_secret_force(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + result = conn.delete_secret(SecretId='test-secret', ForceDeleteWithoutRecovery=True) + + assert result['ARN'] + assert result['DeletionDate'] > datetime.fromtimestamp(1, pytz.utc) + assert result['Name'] == 'test-secret' + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='test-secret') + + +@mock_secretsmanager +def test_delete_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='i-dont-exist', ForceDeleteWithoutRecovery=True) + + +@mock_secretsmanager +def test_delete_secret_fails_with_both_force_delete_flag_and_recovery_window_flag(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=1, ForceDeleteWithoutRecovery=True) + + +@mock_secretsmanager +def test_delete_secret_recovery_window_too_short(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=6) + + +@mock_secretsmanager +def test_delete_secret_recovery_window_too_long(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=31) + + +@mock_secretsmanager +def test_delete_secret_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + deleted_secret = conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret') + + @mock_secretsmanager def test_get_random_password_default_length(): conn = boto3.client('secretsmanager', region_name='us-west-2') @@ -159,10 +283,17 @@ def test_describe_secret(): conn.create_secret(Name='test-secret', SecretString='foosecret') + conn.create_secret(Name='test-secret-2', + SecretString='barsecret') + secret_description = conn.describe_secret(SecretId='test-secret') + secret_description_2 = conn.describe_secret(SecretId='test-secret-2') + assert secret_description # Returned dict is not empty - assert secret_description['ARN'] == ( - 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad') + assert secret_description['Name'] == ('test-secret') + assert secret_description['ARN'] != '' # Test arn not empty + assert secret_description_2['Name'] == ('test-secret-2') + assert secret_description_2['ARN'] != '' # Test arn not empty @mock_secretsmanager def test_describe_secret_that_does_not_exist(): @@ -180,6 +311,82 @@ def test_describe_secret_that_does_not_match(): with assert_raises(ClientError): result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_list_secrets_empty(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + secrets = conn.list_secrets() + + assert secrets['SecretList'] == [] + + +@mock_secretsmanager +def test_list_secrets(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.create_secret(Name='test-secret-2', + SecretString='barsecret', + Tags=[{ + 'Key': 'a', + 'Value': '1' + }]) + + secrets = conn.list_secrets() + + assert secrets['SecretList'][0]['ARN'] is not None + assert secrets['SecretList'][0]['Name'] == 'test-secret' + assert secrets['SecretList'][1]['ARN'] is not None + assert secrets['SecretList'][1]['Name'] == 'test-secret-2' + assert secrets['SecretList'][1]['Tags'] == [{ + 'Key': 'a', + 'Value': '1' + }] + + +@mock_secretsmanager +def test_restore_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + described_secret_before = conn.describe_secret(SecretId='test-secret') + assert described_secret_before['DeletedDate'] > datetime.fromtimestamp(1, pytz.utc) + + restored_secret = conn.restore_secret(SecretId='test-secret') + assert restored_secret['ARN'] + assert restored_secret['Name'] == 'test-secret' + + described_secret_after = conn.describe_secret(SecretId='test-secret') + assert 'DeletedDate' not in described_secret_after + + +@mock_secretsmanager +def test_restore_secret_that_is_not_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + restored_secret = conn.restore_secret(SecretId='test-secret') + assert restored_secret['ARN'] + assert restored_secret['Name'] == 'test-secret' + + +@mock_secretsmanager +def test_restore_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.restore_secret(SecretId='i-dont-exist') + + @mock_secretsmanager def test_rotate_secret(): secret_name = 'test-secret' @@ -190,9 +397,7 @@ def test_rotate_secret(): rotated_secret = conn.rotate_secret(SecretId=secret_name) assert rotated_secret - assert rotated_secret['ARN'] == ( - 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad' - ) + assert rotated_secret['ARN'] != '' # Test arn not empty assert rotated_secret['Name'] == secret_name assert rotated_secret['VersionId'] != '' @@ -216,6 +421,20 @@ def test_rotate_secret_enable_rotation(): assert rotated_description['RotationEnabled'] is True assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='test-secret') + + @mock_secretsmanager def test_rotate_secret_that_does_not_exist(): conn = boto3.client('secretsmanager', 'us-west-2') diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index e573f9b67..d0f495f57 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -82,11 +82,20 @@ def test_create_secret(): headers={ "X-Amz-Target": "secretsmanager.CreateSecret"}, ) + res_2 = test_client.post('/', + data={"Name": "test-secret-2", + "SecretString": "bar-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) json_data = json.loads(res.data.decode("utf-8")) - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') + assert json_data['ARN'] != '' assert json_data['Name'] == 'test-secret' + + json_data_2 = json.loads(res_2.data.decode("utf-8")) + assert json_data_2['ARN'] != '' + assert json_data_2['Name'] == 'test-secret-2' @mock_secretsmanager def test_describe_secret(): @@ -107,12 +116,30 @@ def test_describe_secret(): "X-Amz-Target": "secretsmanager.DescribeSecret" }, ) + + create_secret_2 = test_client.post('/', + data={"Name": "test-secret-2", + "SecretString": "barsecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + describe_secret_2 = test_client.post('/', + data={"SecretId": "test-secret-2"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) json_data = json.loads(describe_secret.data.decode("utf-8")) assert json_data # Returned dict is not empty - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' - ) + assert json_data['ARN'] != '' + assert json_data['Name'] == 'test-secret' + + json_data_2 = json.loads(describe_secret_2.data.decode("utf-8")) + assert json_data_2 # Returned dict is not empty + assert json_data_2['ARN'] != '' + assert json_data_2['Name'] == 'test-secret-2' @mock_secretsmanager def test_describe_secret_that_does_not_exist(): @@ -179,9 +206,7 @@ def test_rotate_secret(): json_data = json.loads(rotate_secret.data.decode("utf-8")) assert json_data # Returned dict is not empty - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' - ) + assert json_data['ARN'] != '' assert json_data['Name'] == 'test-secret' assert json_data['VersionId'] == client_request_token diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9beb9a3fa..d53ae50f7 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -416,7 +416,9 @@ def test_send_receive_message_timestamps(): conn.create_queue(QueueName="test-queue") queue = sqs.Queue("test-queue") - queue.send_message(MessageBody="derp") + response = queue.send_message(MessageBody="derp") + assert response['ResponseMetadata']['RequestId'] + messages = conn.receive_message( QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index f8ef3a237..77d439d83 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -277,6 +277,18 @@ def test_put_parameter(): response['Parameters'][0]['Type'].should.equal('String') response['Parameters'][0]['Version'].should.equal(2) +@mock_ssm +def test_put_parameter_china(): + client = boto3.client('ssm', region_name='cn-north-1') + + response = client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response['Version'].should.equal(1) + @mock_ssm def test_get_parameter(): @@ -319,13 +331,15 @@ def test_describe_parameters(): Name='test', Description='A test parameter', Value='value', - Type='String') + Type='String', + AllowedPattern=r'.*') response = client.describe_parameters() len(response['Parameters']).should.equal(1) response['Parameters'][0]['Name'].should.equal('test') response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['AllowedPattern'].should.equal(r'.*') @mock_ssm diff --git a/tox.ini b/tox.ini index 0f3f1466a..1fea4d81d 100644 --- a/tox.ini +++ b/tox.ini @@ -2,6 +2,11 @@ envlist = py27, py36 [testenv] +setenv = + BOTO_CONFIG=/dev/null + AWS_SECRET_ACCESS_KEY=foobar_secret + AWS_ACCESS_KEY_ID=foobar_key + AWS_DEFAULT_REGION=us-east-1 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/requirements-dev.txt