diff --git a/README.md b/README.md index 3fbee44f8..a6926a58f 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | S3 | @mock_s3 | core endpoints done | |------------------------------------------------------------------------------| +| SecretsManager | @mock_secretsmanager | basic endpoints done +|------------------------------------------------------------------------------| | SES | @mock_ses | all endpoints done | |------------------------------------------------------------------------------| | SNS | @mock_sns | all endpoints done | diff --git a/moto/__init__.py b/moto/__init__.py index 5e6f71b7a..0ce5e54d1 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -34,6 +34,7 @@ from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa from .ses import mock_ses, mock_ses_deprecated # flake8: noqa +from .secretsmanager import mock_secretsmanager # flake8: noqa from .sns import mock_sns, mock_sns_deprecated # flake8: noqa from .sqs import mock_sqs, mock_sqs_deprecated # flake8: noqa from .sts import mock_sts, mock_sts_deprecated # flake8: noqa diff --git a/moto/backends.py b/moto/backends.py index 496af13e1..cd8fe174f 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -32,6 +32,7 @@ from moto.redshift import redshift_backends from moto.route53 import route53_backends from moto.s3 import s3_backends from moto.ses import ses_backends +from moto.secretsmanager import secretsmanager_backends from moto.sns import sns_backends from moto.sqs import sqs_backends from moto.ssm import ssm_backends @@ -77,6 +78,7 @@ BACKENDS = { 's3': s3_backends, 's3bucket_path': s3_backends, 'ses': ses_backends, + 'secretsmanager': secretsmanager_backends, 'sns': sns_backends, 'sqs': sqs_backends, 'ssm': ssm_backends, diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 19018158d..c4059a06b 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -96,6 +96,7 @@ NAME_TYPE_MAP = { "AWS::ElasticBeanstalk::Application": "ApplicationName", "AWS::ElasticBeanstalk::Environment": "EnvironmentName", "AWS::ElasticLoadBalancing::LoadBalancer": "LoadBalancerName", + "AWS::ElasticLoadBalancingV2::TargetGroup": "Name", "AWS::RDS::DBInstance": "DBInstanceIdentifier", "AWS::S3::Bucket": "BucketName", "AWS::SNS::Topic": "TopicName", @@ -244,6 +245,22 @@ def resource_name_property_from_type(resource_type): return NAME_TYPE_MAP.get(resource_type) +def generate_resource_name(resource_type, stack_name, logical_id): + if resource_type == "AWS::ElasticLoadBalancingV2::TargetGroup": + # Target group names need to be less than 32 characters, so when cloudformation creates a name for you + # it makes sure to stay under that limit + name_prefix = '{0}-{1}'.format(stack_name, logical_id) + my_random_suffix = random_suffix() + truncated_name_prefix = name_prefix[0:32 - (len(my_random_suffix) + 1)] + # if the truncated name ends in a dash, we'll end up with a double dash in the final name, which is + # not allowed + if truncated_name_prefix.endswith('-'): + truncated_name_prefix = truncated_name_prefix[:-1] + return '{0}-{1}'.format(truncated_name_prefix, my_random_suffix) + else: + return '{0}-{1}-{2}'.format(stack_name, logical_id, random_suffix()) + + def parse_resource(logical_id, resource_json, resources_map): resource_type = resource_json['Type'] resource_class = resource_class_from_type(resource_type) @@ -258,15 +275,12 @@ def parse_resource(logical_id, resource_json, resources_map): if 'Properties' not in resource_json: resource_json['Properties'] = dict() if resource_name_property not in resource_json['Properties']: - resource_json['Properties'][resource_name_property] = '{0}-{1}-{2}'.format( - resources_map.get('AWS::StackName'), - logical_id, - random_suffix()) + resource_json['Properties'][resource_name_property] = generate_resource_name( + resource_type, resources_map.get('AWS::StackName'), logical_id) resource_name = resource_json['Properties'][resource_name_property] else: - resource_name = '{0}-{1}-{2}'.format(resources_map.get('AWS::StackName'), - logical_id, - random_suffix()) + resource_name = generate_resource_name(resource_type, resources_map.get('AWS::StackName'), logical_id) + return resource_class, resource_json, resource_name diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index c4aa10237..db6bf04a3 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -706,7 +706,9 @@ class DynamoDBBackend(BaseBackend): gsis_by_name[gsi_to_create['IndexName']] = gsi_to_create - table.global_indexes = gsis_by_name.values() + # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other + # parts of the codebase + table.global_indexes = list(gsis_by_name.values()) return table def put_item(self, table_name, item_attrs, expected=None, overwrite=False): diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index 0009ff131..4aecfcf78 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -11,6 +11,29 @@ def try_parse_int(value, default=None): return default +def parse_sg_attributes_from_dict(sg_attributes): + ip_protocol = sg_attributes.get('IpProtocol', [None])[0] + from_port = sg_attributes.get('FromPort', [None])[0] + to_port = sg_attributes.get('ToPort', [None])[0] + + ip_ranges = [] + ip_ranges_tree = sg_attributes.get('IpRanges') or {} + for ip_range_idx in sorted(ip_ranges_tree.keys()): + ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0]) + + source_groups = [] + source_group_ids = [] + groups_tree = sg_attributes.get('Groups') or {} + for group_idx in sorted(groups_tree.keys()): + group_dict = groups_tree[group_idx] + if 'GroupId' in group_dict: + source_group_ids.append(group_dict['GroupId'][0]) + elif 'GroupName' in group_dict: + source_groups.append(group_dict['GroupName'][0]) + + return ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids + + class SecurityGroups(BaseResponse): def _process_rules_from_querystring(self): @@ -29,28 +52,17 @@ class SecurityGroups(BaseResponse): d = d[subkey] d[key_splitted[-1]] = value + if 'IpPermissions' not in querytree: + # Handle single rule syntax + ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(querytree) + yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, + source_groups, source_group_ids) + ip_permissions = querytree.get('IpPermissions') or {} for ip_permission_idx in sorted(ip_permissions.keys()): ip_permission = ip_permissions[ip_permission_idx] - ip_protocol = ip_permission.get('IpProtocol', [None])[0] - from_port = ip_permission.get('FromPort', [None])[0] - to_port = ip_permission.get('ToPort', [None])[0] - - ip_ranges = [] - ip_ranges_tree = ip_permission.get('IpRanges') or {} - for ip_range_idx in sorted(ip_ranges_tree.keys()): - ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0]) - - source_groups = [] - source_group_ids = [] - groups_tree = ip_permission.get('Groups') or {} - for group_idx in sorted(groups_tree.keys()): - group_dict = groups_tree[group_idx] - if 'GroupId' in group_dict: - source_group_ids.append(group_dict['GroupId'][0]) - elif 'GroupName' in group_dict: - source_groups.append(group_dict['GroupName'][0]) + ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(ip_permission) yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 3c51cd03f..9e32a84b6 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -177,7 +177,7 @@ class Task(BaseObject): class Service(BaseObject): - def __init__(self, cluster, service_name, task_definition, desired_count): + def __init__(self, cluster, service_name, task_definition, desired_count, load_balancers=None): self.cluster_arn = cluster.arn self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format( service_name) @@ -199,7 +199,7 @@ class Service(BaseObject): 'updatedAt': datetime.now(pytz.utc), } ] - self.load_balancers = [] + self.load_balancers = load_balancers if load_balancers is not None else [] self.pending_count = 0 @property @@ -652,7 +652,7 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("Could not find task {} on cluster {}".format( task_str, cluster_name)) - def create_service(self, cluster_str, service_name, task_definition_str, desired_count): + def create_service(self, cluster_str, service_name, task_definition_str, desired_count, load_balancers=None): cluster_name = cluster_str.split('/')[-1] if cluster_name in self.clusters: cluster = self.clusters[cluster_name] @@ -660,10 +660,12 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("{0} is not a cluster".format(cluster_name)) task_definition = self.describe_task_definition(task_definition_str) desired_count = desired_count if desired_count is not None else 0 + service = Service(cluster, service_name, - task_definition, desired_count) + task_definition, desired_count, load_balancers) cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) self.services[cluster_service_pair] = service + return service def list_services(self, cluster_str): diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index e81e04145..9455d7a28 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -153,8 +153,9 @@ class EC2ContainerServiceResponse(BaseResponse): service_name = self._get_param('serviceName') task_definition_str = self._get_param('taskDefinition') desired_count = self._get_int_param('desiredCount') + load_balancers = self._get_param('loadBalancers') service = self.ecs_backend.create_service( - cluster_str, service_name, task_definition_str, desired_count) + cluster_str, service_name, task_definition_str, desired_count, load_balancers) return json.dumps({ 'service': service.response_object }) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 8921581d3..3925fa95d 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -124,10 +124,7 @@ class FakeTargetGroup(BaseModel): elbv2_backend = elbv2_backends[region_name] - # per cloudformation docs: - # The target group name should be shorter than 22 characters because - # AWS CloudFormation uses the target group name to create the name of the load balancer. - name = properties.get('Name', resource_name[:22]) + name = properties.get('Name') vpc_id = properties.get("VpcId") protocol = properties.get('Protocol') port = properties.get("Port") @@ -437,7 +434,7 @@ class ELBv2Backend(BaseBackend): def create_target_group(self, name, **kwargs): if len(name) > 32: raise InvalidTargetGroupNameError( - "Target group name '%s' cannot be longer than '22' characters" % name + "Target group name '%s' cannot be longer than '32' characters" % name ) if not re.match('^[a-zA-Z0-9\-]+$', name): raise InvalidTargetGroupNameError( diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 8c6e291ef..26515dfd2 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -168,3 +168,13 @@ class InvalidNotificationEvent(S3ClientError): "InvalidArgument", "The event is not supported for notifications", *args, **kwargs) + + +class InvalidStorageClass(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidStorageClass, self).__init__( + "InvalidStorageClass", + "The storage class you specified is not valid", + *args, **kwargs) diff --git a/moto/s3/models.py b/moto/s3/models.py index 9e58fdb47..431c9c988 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -15,11 +15,12 @@ from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey, \ - InvalidNotificationDestination, MalformedXML + InvalidNotificationDestination, MalformedXML, InvalidStorageClass from .utils import clean_key_name, _VersionedKeyStore UPLOAD_ID_BYTES = 43 UPLOAD_PART_MIN_SIZE = 5242880 +STORAGE_CLASS = ["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA"] class FakeDeleteMarker(BaseModel): @@ -67,8 +68,10 @@ class FakeKey(BaseModel): def set_tagging(self, tagging): self._tagging = tagging - def set_storage_class(self, storage_class): - self._storage_class = storage_class + def set_storage_class(self, storage): + if storage is not None and storage not in STORAGE_CLASS: + raise InvalidStorageClass(storage=storage) + self._storage_class = storage def set_acl(self, acl): self.acl = acl @@ -676,6 +679,8 @@ class S3Backend(BaseBackend): def set_key(self, bucket_name, key_name, value, storage=None, etag=None): key_name = clean_key_name(key_name) + if storage is not None and storage not in STORAGE_CLASS: + raise InvalidStorageClass(storage=storage) bucket = self.get_bucket(bucket_name) diff --git a/moto/secretsmanager/__init__.py b/moto/secretsmanager/__init__.py new file mode 100644 index 000000000..c7fbb2869 --- /dev/null +++ b/moto/secretsmanager/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import secretsmanager_backends +from ..core.models import base_decorator + +secretsmanager_backend = secretsmanager_backends['us-east-1'] +mock_secretsmanager = base_decorator(secretsmanager_backends) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py new file mode 100644 index 000000000..fb09d20e4 --- /dev/null +++ b/moto/secretsmanager/models.py @@ -0,0 +1,49 @@ +from __future__ import unicode_literals + +import time +import json + +import boto3 + +from moto.core import BaseBackend, BaseModel + + +class SecretsManager(BaseModel): + + def __init__(self, region_name, **kwargs): + self.secret_id = kwargs.get('secret_id', '') + self.version_id = kwargs.get('version_id', '') + self.version_stage = kwargs.get('version_stage', '') + + +class SecretsManagerBackend(BaseBackend): + + def __init__(self, region_name=None, **kwargs): + super(SecretsManagerBackend, self).__init__() + self.region = region_name + self.secret_id = kwargs.get('secret_id', '') + self.createdate = int(time.time()) + + def get_secret_value(self, secret_id, version_id, version_stage): + + response = json.dumps({ + "ARN": self.secret_arn(), + "Name": self.secret_id, + "VersionId": "A435958A-D821-4193-B719-B7769357AER4", + "SecretString": "mysecretstring", + "VersionStages": [ + "AWSCURRENT", + ], + "CreatedDate": "2018-05-23 13:16:57.198000" + }) + + return response + + def secret_arn(self): + return "arn:aws:secretsmanager:{0}:1234567890:secret:{1}-rIjad".format( + self.region, self.secret_id) + + +available_regions = boto3.session.Session().get_available_regions("secretsmanager") +print(available_regions) +secretsmanager_backends = {region: SecretsManagerBackend(region_name=region) for region in available_regions} diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py new file mode 100644 index 000000000..144a254ec --- /dev/null +++ b/moto/secretsmanager/responses.py @@ -0,0 +1,17 @@ +from __future__ import unicode_literals + +from moto.core.responses import BaseResponse + +from .models import secretsmanager_backends + + +class SecretsManagerResponse(BaseResponse): + + def get_secret_value(self): + secret_id = self._get_param('SecretId') + version_id = self._get_param('VersionId') + version_stage = self._get_param('VersionStage') + return secretsmanager_backends[self.region].get_secret_value( + secret_id=secret_id, + version_id=version_id, + version_stage=version_stage) diff --git a/moto/secretsmanager/urls.py b/moto/secretsmanager/urls.py new file mode 100644 index 000000000..9e39e7263 --- /dev/null +++ b/moto/secretsmanager/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import SecretsManagerResponse + +url_bases = [ + "https?://secretsmanager.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': SecretsManagerResponse.dispatch, +} diff --git a/moto/sns/models.py b/moto/sns/models.py index ebdf5cd16..41e83aba4 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -24,6 +24,7 @@ from .utils import make_arn_for_topic, make_arn_for_subscription DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_PAGE_SIZE = 100 +MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB class Topic(BaseModel): @@ -327,6 +328,9 @@ class SNSBackend(BaseBackend): # Note that the AWS docs around length are wrong: https://github.com/spulec/moto/issues/1503 raise ValueError('Subject must be less than 100 characters') + if len(message) > MAXIMUM_MESSAGE_LENGTH: + raise InvalidParameterValue("An error occurred (InvalidParameter) when calling the Publish operation: Invalid parameter: Message too long") + try: topic = self.get_topic(arn) message_id = topic.publish(message, subject=subject, diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 93188001f..ab8f25856 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,17 +1,17 @@ from __future__ import unicode_literals, print_function +from decimal import Decimal + import six import boto import boto3 -from boto3.dynamodb.conditions import Attr +from boto3.dynamodb.conditions import Attr, Key import sure # noqa import requests -from pytest import raises from moto import mock_dynamodb2, mock_dynamodb2_deprecated from moto.dynamodb2 import dynamodb_backend2 from boto.exception import JSONResponseError from botocore.exceptions import ClientError -from boto3.dynamodb.conditions import Key from tests.helpers import requires_boto_gte import tests.backport_assert_raises @@ -1119,7 +1119,7 @@ def test_update_item_on_map(): }) # Test nested value for a nonexistent attribute. - with raises(client.exceptions.ConditionalCheckFailedException): + with assert_raises(client.exceptions.ConditionalCheckFailedException): table.update_item(Key={ 'forum_name': 'the-key', 'subject': '123' @@ -1200,3 +1200,95 @@ def test_update_if_not_exists(): resp = table.scan() # Still the original value assert resp['Items'][0]['created_at'] == 123 + + +@mock_dynamodb2 +def test_query_global_secondary_index_when_created_via_update_table_resource(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'user_id', + 'KeyType': 'HASH' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'user_id', + 'AttributeType': 'N', + }, + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + }, + ) + table = dynamodb.Table('users') + table.update( + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + ], + GlobalSecondaryIndexUpdates=[ + {'Create': + { + 'IndexName': 'forum_name_index', + 'KeySchema': [ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + }, + } + } + ] + ) + + next_user_id = 1 + for my_forum_name in ['cats', 'dogs']: + for my_subject in ['my pet is the cutest', 'wow look at what my pet did', "don't you love my pet?"]: + table.put_item(Item={'user_id': next_user_id, 'forum_name': my_forum_name, 'subject': my_subject}) + next_user_id += 1 + + # get all the cat users + forum_only_query_response = table.query( + IndexName='forum_name_index', + Select='ALL_ATTRIBUTES', + KeyConditionExpression=Key('forum_name').eq('cats'), + ) + forum_only_items = forum_only_query_response['Items'] + assert len(forum_only_items) == 3 + for item in forum_only_items: + assert item['forum_name'] == 'cats' + + # query all cat users with a particular subject + forum_and_subject_query_results = table.query( + IndexName='forum_name_index', + Select='ALL_ATTRIBUTES', + KeyConditionExpression=Key('forum_name').eq('cats'), + FilterExpression=Attr('subject').eq('my pet is the cutest'), + ) + forum_and_subject_items = forum_and_subject_query_results['Items'] + assert len(forum_and_subject_items) == 1 + assert forum_and_subject_items[0] == {'user_id': Decimal('1'), 'forum_name': 'cats', + 'subject': 'my pet is the cutest'} diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 0d7565a31..d843087a6 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -689,6 +689,31 @@ def test_authorize_and_revoke_in_bulk(): sg01.ip_permissions_egress.shouldnt.contain(ip_permission) +@mock_ec2 +def test_security_group_ingress_without_multirule(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Fails + assert len(sg.ip_permissions) == 1 + + +@mock_ec2 +def test_security_group_ingress_without_multirule_after_reload(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Also Fails + sg_after = ec2.SecurityGroup(sg.id) + assert len(sg_after.ip_permissions) == 1 + + @mock_ec2_deprecated def test_get_all_security_groups_filter_with_same_vpc_id(): conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index d2cfd3724..7ff088676 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -8,7 +8,7 @@ import json from moto.ec2 import utils as ec2_utils from uuid import UUID -from moto import mock_cloudformation +from moto import mock_cloudformation, mock_elbv2 from moto import mock_ecs from moto import mock_ec2 from nose.tools import assert_raises @@ -2015,3 +2015,62 @@ def _fetch_container_instance_resources(container_instance_description): registered_resources['PORTS'] = \ [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] return remaining_resources, registered_resources + + +@mock_ecs +def test_create_service_load_balancing(): + client = boto3.client('ecs', region_name='us-east-1') + client.create_cluster( + clusterName='test_ecs_cluster' + ) + client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + loadBalancers=[ + { + 'targetGroupArn': 'test_target_group_arn', + 'loadBalancerName': 'test_load_balancer_name', + 'containerName': 'test_container_name', + 'containerPort': 123 + } + ] + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(1) + response['service']['loadBalancers'][0]['targetGroupArn'].should.equal( + 'test_target_group_arn') + response['service']['loadBalancers'][0]['loadBalancerName'].should.equal( + 'test_load_balancer_name') + response['service']['loadBalancers'][0]['containerName'].should.equal( + 'test_container_name') + response['service']['loadBalancers'][0]['containerPort'].should.equal(123) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index ce092976a..b58345fdb 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals + +import json import os import boto3 import botocore @@ -6,7 +8,7 @@ from botocore.exceptions import ClientError from nose.tools import assert_raises import sure # noqa -from moto import mock_elbv2, mock_ec2, mock_acm +from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation from moto.elbv2 import elbv2_backends @@ -416,6 +418,7 @@ def test_create_target_group_and_listeners(): response = conn.describe_target_groups() response.get('TargetGroups').should.have.length_of(0) + @mock_elbv2 @mock_ec2 def test_create_target_group_without_non_required_parameters(): @@ -454,6 +457,7 @@ def test_create_target_group_without_non_required_parameters(): target_group = response.get('TargetGroups')[0] target_group.should_not.be.none + @mock_elbv2 @mock_ec2 def test_create_invalid_target_group(): @@ -1105,6 +1109,50 @@ def test_describe_invalid_target_group(): conn.describe_target_groups(Names=['invalid']) +@mock_elbv2 +@mock_ec2 +def test_describe_target_groups_no_arguments(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers')[0].get('LoadBalancerArn') + + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + assert len(conn.describe_target_groups()['TargetGroups']) == 1 + + @mock_elbv2 def test_describe_account_limits(): client = boto3.client('elbv2', region_name='eu-central-1') @@ -1473,3 +1521,68 @@ def test_modify_listener_http_to_https(): {'Type': 'forward', 'TargetGroupArn': target_group_arn} ] ) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_create_target_groups_through_cloudformation(): + cfn_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + # test that setting a name manually as well as letting cloudformation create a name both work + # this is a special case because test groups have a name length limit of 22 characters, and must be unique + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "testGroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 80, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 90, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup3": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Name": "MyTargetGroup", + "Port": 70, + "Protocol": "HTTPS", + "VpcId": {"Ref": "testVPC"}, + }, + }, + } + } + template_json = json.dumps(template) + cfn_conn.create_stack( + StackName="test-stack", + TemplateBody=template_json, + ) + + describe_target_groups_response = elbv2_client.describe_target_groups() + target_group_dicts = describe_target_groups_response['TargetGroups'] + assert len(target_group_dicts) == 3 + + # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) + # and one named MyTargetGroup + assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1 + assert len( + [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] + ) == 2 diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py new file mode 100644 index 000000000..c4c83a285 --- /dev/null +++ b/tests/test_s3/test_s3_storageclass.py @@ -0,0 +1,106 @@ +from __future__ import unicode_literals + +import boto +import boto3 +from boto.exception import S3CreateError, S3ResponseError +from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule + +import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises + +from moto import mock_s3_deprecated, mock_s3 + + +@mock_s3 +def test_s3_storage_class_standard(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # add an object to the bucket with standard storage + + s3.put_object(Bucket="Bucket", Key="my_key", Body="my_value") + + list_of_objects = s3.list_objects(Bucket="Bucket") + + list_of_objects['Contents'][0]["StorageClass"].should.equal("STANDARD") + + +@mock_s3 +def test_s3_storage_class_infrequent_access(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # add an object to the bucket with standard storage + + s3.put_object(Bucket="Bucket", Key="my_key_infrequent", Body="my_value_infrequent", StorageClass="STANDARD_IA") + + D = s3.list_objects(Bucket="Bucket") + + D['Contents'][0]["StorageClass"].should.equal("STANDARD_IA") + + +@mock_s3 +def test_s3_storage_class_copy(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") + + s3.create_bucket(Bucket="Bucket2") + # second object is originally of storage class REDUCED_REDUNDANCY + s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2") + + s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="ONEZONE_IA") + + list_of_copied_objects = s3.list_objects(Bucket="Bucket2") + + # checks that a copied object can be properly copied + list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("ONEZONE_IA") + + +@mock_s3 +def test_s3_invalid_copied_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") + + s3.create_bucket(Bucket="Bucket2") + s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2", StorageClass="REDUCED_REDUNDANCY") + + # Try to copy an object with an invalid storage class + with assert_raises(ClientError) as err: + s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="STANDARD2") + + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidStorageClass") + e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") + + +@mock_s3 +def test_s3_invalid_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # Try to add an object with an invalid storage class + with assert_raises(ClientError) as err: + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD") + + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidStorageClass") + e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") + +@mock_s3 +def test_s3_default_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") + + list_of_objects = s3.list_objects(Bucket="Bucket") + + # tests that the default storage class is still STANDARD + list_of_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") + + + diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py new file mode 100644 index 000000000..df4f0f69e --- /dev/null +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals + +import boto3 + +from moto import mock_secretsmanager +import sure # noqa + +@mock_secretsmanager +def test_get_secret_value(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + result = conn.get_secret_value(SecretId='java-util-test-password') + assert result['SecretString'] == 'mysecretstring' diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py new file mode 100644 index 000000000..142e9fe7d --- /dev/null +++ b/tests/test_secretsmanager/test_server.py @@ -0,0 +1,27 @@ +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_secretsmanager + +''' +Test the different server responses +''' + + +@mock_secretsmanager +def test_get_secret_value(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + res = test_client.post('/', + data={"SecretId": "test", "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['SecretString'] == "mysecretstring" diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 65d2f25cc..3d598d406 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -10,6 +10,7 @@ import sure # noqa import responses from botocore.exceptions import ClientError +from nose.tools import assert_raises from moto import mock_sns, mock_sqs @@ -308,6 +309,20 @@ def test_publish_subject(): raise RuntimeError('Should have raised an InvalidParameter exception') +@mock_sns +def test_publish_message_too_long(): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + with assert_raises(ClientError): + topic.publish( + Message="".join(["." for i in range(0, 262145)])) + + # message short enough - does not raise an error + topic.publish( + Message="".join(["." for i in range(0, 262144)])) + + def _setup_filter_policy_test(filter_policy): sns = boto3.resource('sns', region_name='us-east-1') topic = sns.create_topic(Name='some-topic')