Merge branch 'master' into redshift-copy-grants
This commit is contained in:
commit
8a0cf49b7d
@ -3,7 +3,7 @@ import logging
|
|||||||
# logging.getLogger('boto').setLevel(logging.CRITICAL)
|
# logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
__title__ = 'moto'
|
__title__ = 'moto'
|
||||||
__version__ = '1.2.0',
|
__version__ = '1.2.0'
|
||||||
|
|
||||||
from .acm import mock_acm # flake8: noqa
|
from .acm import mock_acm # flake8: noqa
|
||||||
from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa
|
from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa
|
||||||
|
@ -1,12 +1,11 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import datetime
|
|
||||||
import requests
|
import requests
|
||||||
|
import time
|
||||||
|
|
||||||
from moto.packages.responses import responses
|
from moto.packages.responses import responses
|
||||||
from moto.core import BaseBackend, BaseModel
|
from moto.core import BaseBackend, BaseModel
|
||||||
from moto.core.utils import iso_8601_datetime_with_milliseconds
|
|
||||||
from .utils import create_id
|
from .utils import create_id
|
||||||
from .exceptions import StageNotFoundException
|
from .exceptions import StageNotFoundException
|
||||||
|
|
||||||
@ -20,8 +19,7 @@ class Deployment(BaseModel, dict):
|
|||||||
self['id'] = deployment_id
|
self['id'] = deployment_id
|
||||||
self['stageName'] = name
|
self['stageName'] = name
|
||||||
self['description'] = description
|
self['description'] = description
|
||||||
self['createdDate'] = iso_8601_datetime_with_milliseconds(
|
self['createdDate'] = int(time.time())
|
||||||
datetime.datetime.now())
|
|
||||||
|
|
||||||
|
|
||||||
class IntegrationResponse(BaseModel, dict):
|
class IntegrationResponse(BaseModel, dict):
|
||||||
@ -300,7 +298,7 @@ class RestAPI(BaseModel):
|
|||||||
self.region_name = region_name
|
self.region_name = region_name
|
||||||
self.name = name
|
self.name = name
|
||||||
self.description = description
|
self.description = description
|
||||||
self.create_date = datetime.datetime.utcnow()
|
self.create_date = int(time.time())
|
||||||
|
|
||||||
self.deployments = {}
|
self.deployments = {}
|
||||||
self.stages = {}
|
self.stages = {}
|
||||||
@ -313,7 +311,7 @@ class RestAPI(BaseModel):
|
|||||||
"id": self.id,
|
"id": self.id,
|
||||||
"name": self.name,
|
"name": self.name,
|
||||||
"description": self.description,
|
"description": self.description,
|
||||||
"createdDate": iso_8601_datetime_with_milliseconds(self.create_date),
|
"createdDate": int(time.time()),
|
||||||
}
|
}
|
||||||
|
|
||||||
def add_child(self, path, parent_id=None):
|
def add_child(self, path, parent_id=None):
|
||||||
|
@ -104,7 +104,7 @@ class _DockerDataVolumeContext:
|
|||||||
|
|
||||||
# It doesn't exist so we need to create it
|
# It doesn't exist so we need to create it
|
||||||
self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256)
|
self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256)
|
||||||
container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: '/tmp/data'}, detach=True)
|
container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: {'bind': '/tmp/data', 'mode': 'rw'}}, detach=True)
|
||||||
try:
|
try:
|
||||||
tar_bytes = zip2tar(self._lambda_func.code_bytes)
|
tar_bytes = zip2tar(self._lambda_func.code_bytes)
|
||||||
container.put_archive('/tmp/data', tar_bytes)
|
container.put_archive('/tmp/data', tar_bytes)
|
||||||
@ -309,7 +309,7 @@ class LambdaFunction(BaseModel):
|
|||||||
finally:
|
finally:
|
||||||
if container:
|
if container:
|
||||||
try:
|
try:
|
||||||
exit_code = container.wait(timeout=300)
|
exit_code = container.wait(timeout=300)['StatusCode']
|
||||||
except requests.exceptions.ReadTimeout:
|
except requests.exceptions.ReadTimeout:
|
||||||
exit_code = -1
|
exit_code = -1
|
||||||
container.stop()
|
container.stop()
|
||||||
|
@ -345,6 +345,10 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
|||||||
if is_tracked(name) or not name.startswith(param_prefix):
|
if is_tracked(name) or not name.startswith(param_prefix):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if len(name) > len(param_prefix) and \
|
||||||
|
not name[len(param_prefix):].startswith('.'):
|
||||||
|
continue
|
||||||
|
|
||||||
match = self.param_list_regex.search(name[len(param_prefix):]) if len(name) > len(param_prefix) else None
|
match = self.param_list_regex.search(name[len(param_prefix):]) if len(name) > len(param_prefix) else None
|
||||||
if match:
|
if match:
|
||||||
prefix = param_prefix + match.group(1)
|
prefix = param_prefix + match.group(1)
|
||||||
|
@ -2943,7 +2943,7 @@ class SpotFleetRequest(TaggedEC2Resource):
|
|||||||
'Properties']['SpotFleetRequestConfigData']
|
'Properties']['SpotFleetRequestConfigData']
|
||||||
ec2_backend = ec2_backends[region_name]
|
ec2_backend = ec2_backends[region_name]
|
||||||
|
|
||||||
spot_price = properties['SpotPrice']
|
spot_price = properties.get('SpotPrice')
|
||||||
target_capacity = properties['TargetCapacity']
|
target_capacity = properties['TargetCapacity']
|
||||||
iam_fleet_role = properties['IamFleetRole']
|
iam_fleet_role = properties['IamFleetRole']
|
||||||
allocation_strategy = properties['AllocationStrategy']
|
allocation_strategy = properties['AllocationStrategy']
|
||||||
@ -2977,7 +2977,8 @@ class SpotFleetRequest(TaggedEC2Resource):
|
|||||||
launch_spec_index += 1
|
launch_spec_index += 1
|
||||||
else: # lowestPrice
|
else: # lowestPrice
|
||||||
cheapest_spec = sorted(
|
cheapest_spec = sorted(
|
||||||
self.launch_specs, key=lambda spec: float(spec.spot_price))[0]
|
# FIXME: change `+inf` to the on demand price scaled to weighted capacity when it's not present
|
||||||
|
self.launch_specs, key=lambda spec: float(spec.spot_price or '+inf'))[0]
|
||||||
weight_so_far = weight_to_add + (weight_to_add % cheapest_spec.weighted_capacity)
|
weight_so_far = weight_to_add + (weight_to_add % cheapest_spec.weighted_capacity)
|
||||||
weight_map[cheapest_spec] = int(
|
weight_map[cheapest_spec] = int(
|
||||||
weight_so_far // cheapest_spec.weighted_capacity)
|
weight_so_far // cheapest_spec.weighted_capacity)
|
||||||
|
@ -40,7 +40,7 @@ class SpotFleets(BaseResponse):
|
|||||||
|
|
||||||
def request_spot_fleet(self):
|
def request_spot_fleet(self):
|
||||||
spot_config = self._get_dict_param("SpotFleetRequestConfig.")
|
spot_config = self._get_dict_param("SpotFleetRequestConfig.")
|
||||||
spot_price = spot_config['spot_price']
|
spot_price = spot_config.get('spot_price')
|
||||||
target_capacity = spot_config['target_capacity']
|
target_capacity = spot_config['target_capacity']
|
||||||
iam_fleet_role = spot_config['iam_fleet_role']
|
iam_fleet_role = spot_config['iam_fleet_role']
|
||||||
allocation_strategy = spot_config['allocation_strategy']
|
allocation_strategy = spot_config['allocation_strategy']
|
||||||
@ -78,7 +78,9 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """<DescribeSpotFleetRequestsResponse xmlns="http
|
|||||||
<spotFleetRequestId>{{ request.id }}</spotFleetRequestId>
|
<spotFleetRequestId>{{ request.id }}</spotFleetRequestId>
|
||||||
<spotFleetRequestState>{{ request.state }}</spotFleetRequestState>
|
<spotFleetRequestState>{{ request.state }}</spotFleetRequestState>
|
||||||
<spotFleetRequestConfig>
|
<spotFleetRequestConfig>
|
||||||
|
{% if request.spot_price %}
|
||||||
<spotPrice>{{ request.spot_price }}</spotPrice>
|
<spotPrice>{{ request.spot_price }}</spotPrice>
|
||||||
|
{% endif %}
|
||||||
<targetCapacity>{{ request.target_capacity }}</targetCapacity>
|
<targetCapacity>{{ request.target_capacity }}</targetCapacity>
|
||||||
<iamFleetRole>{{ request.iam_fleet_role }}</iamFleetRole>
|
<iamFleetRole>{{ request.iam_fleet_role }}</iamFleetRole>
|
||||||
<allocationStrategy>{{ request.allocation_strategy }}</allocationStrategy>
|
<allocationStrategy>{{ request.allocation_strategy }}</allocationStrategy>
|
||||||
@ -93,7 +95,9 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """<DescribeSpotFleetRequestsResponse xmlns="http
|
|||||||
<iamInstanceProfile><arn>{{ launch_spec.iam_instance_profile }}</arn></iamInstanceProfile>
|
<iamInstanceProfile><arn>{{ launch_spec.iam_instance_profile }}</arn></iamInstanceProfile>
|
||||||
<keyName>{{ launch_spec.key_name }}</keyName>
|
<keyName>{{ launch_spec.key_name }}</keyName>
|
||||||
<monitoring><enabled>{{ launch_spec.monitoring }}</enabled></monitoring>
|
<monitoring><enabled>{{ launch_spec.monitoring }}</enabled></monitoring>
|
||||||
|
{% if launch_spec.spot_price %}
|
||||||
<spotPrice>{{ launch_spec.spot_price }}</spotPrice>
|
<spotPrice>{{ launch_spec.spot_price }}</spotPrice>
|
||||||
|
{% endif %}
|
||||||
<userData>{{ launch_spec.user_data }}</userData>
|
<userData>{{ launch_spec.user_data }}</userData>
|
||||||
<weightedCapacity>{{ launch_spec.weighted_capacity }}</weightedCapacity>
|
<weightedCapacity>{{ launch_spec.weighted_capacity }}</weightedCapacity>
|
||||||
<groupSet>
|
<groupSet>
|
||||||
|
@ -486,6 +486,10 @@ class ELBv2Backend(BaseBackend):
|
|||||||
arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self))
|
arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self))
|
||||||
listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions)
|
listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions)
|
||||||
balancer.listeners[listener.arn] = listener
|
balancer.listeners[listener.arn] = listener
|
||||||
|
for action in default_actions:
|
||||||
|
if action['target_group_arn'] in self.target_groups.keys():
|
||||||
|
target_group = self.target_groups[action['target_group_arn']]
|
||||||
|
target_group.load_balancer_arns.append(load_balancer_arn)
|
||||||
return listener
|
return listener
|
||||||
|
|
||||||
def describe_load_balancers(self, arns, names):
|
def describe_load_balancers(self, arns, names):
|
||||||
|
@ -108,3 +108,24 @@ class ResourceNotFoundFaultError(RedshiftClientError):
|
|||||||
msg = message
|
msg = message
|
||||||
super(ResourceNotFoundFaultError, self).__init__(
|
super(ResourceNotFoundFaultError, self).__init__(
|
||||||
'ResourceNotFoundFault', msg)
|
'ResourceNotFoundFault', msg)
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotCopyDisabledFaultError(RedshiftClientError):
|
||||||
|
def __init__(self, cluster_identifier):
|
||||||
|
super(SnapshotCopyDisabledFaultError, self).__init__(
|
||||||
|
'SnapshotCopyDisabledFault',
|
||||||
|
"Cannot modify retention period because snapshot copy is disabled on Cluster {0}.".format(cluster_identifier))
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotCopyAlreadyDisabledFaultError(RedshiftClientError):
|
||||||
|
def __init__(self, cluster_identifier):
|
||||||
|
super(SnapshotCopyAlreadyDisabledFaultError, self).__init__(
|
||||||
|
'SnapshotCopyAlreadyDisabledFault',
|
||||||
|
"Snapshot Copy is already disabled on Cluster {0}.".format(cluster_identifier))
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError):
|
||||||
|
def __init__(self, cluster_identifier):
|
||||||
|
super(SnapshotCopyAlreadyEnabledFaultError, self).__init__(
|
||||||
|
'SnapshotCopyAlreadyEnabledFault',
|
||||||
|
"Snapshot Copy is already enabled on Cluster {0}.".format(cluster_identifier))
|
||||||
|
@ -4,6 +4,7 @@ import copy
|
|||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
import boto.redshift
|
import boto.redshift
|
||||||
|
from botocore.exceptions import ClientError
|
||||||
from moto.compat import OrderedDict
|
from moto.compat import OrderedDict
|
||||||
from moto.core import BaseBackend, BaseModel
|
from moto.core import BaseBackend, BaseModel
|
||||||
from moto.core.utils import iso_8601_datetime_with_milliseconds
|
from moto.core.utils import iso_8601_datetime_with_milliseconds
|
||||||
@ -18,8 +19,11 @@ from .exceptions import (
|
|||||||
InvalidParameterValueError,
|
InvalidParameterValueError,
|
||||||
InvalidSubnetError,
|
InvalidSubnetError,
|
||||||
ResourceNotFoundFaultError,
|
ResourceNotFoundFaultError,
|
||||||
|
SnapshotCopyAlreadyDisabledFaultError,
|
||||||
|
SnapshotCopyAlreadyEnabledFaultError,
|
||||||
|
SnapshotCopyDisabledFaultError,
|
||||||
SnapshotCopyGrantAlreadyExistsFaultError,
|
SnapshotCopyGrantAlreadyExistsFaultError,
|
||||||
SnapshotCopyGrantNotFoundFaultError
|
SnapshotCopyGrantNotFoundFaultError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -196,7 +200,7 @@ class Cluster(TaggableResourceMixin, BaseModel):
|
|||||||
return self.cluster_identifier
|
return self.cluster_identifier
|
||||||
|
|
||||||
def to_json(self):
|
def to_json(self):
|
||||||
return {
|
json_response = {
|
||||||
"MasterUsername": self.master_username,
|
"MasterUsername": self.master_username,
|
||||||
"MasterUserPassword": "****",
|
"MasterUserPassword": "****",
|
||||||
"ClusterVersion": self.cluster_version,
|
"ClusterVersion": self.cluster_version,
|
||||||
@ -233,6 +237,12 @@ class Cluster(TaggableResourceMixin, BaseModel):
|
|||||||
"Tags": self.tags
|
"Tags": self.tags
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
json_response['ClusterSnapshotCopyStatus'] = self.cluster_snapshot_copy_status
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
return json_response
|
||||||
|
|
||||||
|
|
||||||
class SnapshotCopyGrant(TaggableResourceMixin, BaseModel):
|
class SnapshotCopyGrant(TaggableResourceMixin, BaseModel):
|
||||||
|
|
||||||
@ -435,6 +445,43 @@ class RedshiftBackend(BaseBackend):
|
|||||||
self.__dict__ = {}
|
self.__dict__ = {}
|
||||||
self.__init__(ec2_backend, region_name)
|
self.__init__(ec2_backend, region_name)
|
||||||
|
|
||||||
|
def enable_snapshot_copy(self, **kwargs):
|
||||||
|
cluster_identifier = kwargs['cluster_identifier']
|
||||||
|
cluster = self.clusters[cluster_identifier]
|
||||||
|
if not hasattr(cluster, 'cluster_snapshot_copy_status'):
|
||||||
|
if cluster.encrypted == 'true' and kwargs['snapshot_copy_grant_name'] is None:
|
||||||
|
raise ClientError(
|
||||||
|
'InvalidParameterValue',
|
||||||
|
'SnapshotCopyGrantName is required for Snapshot Copy '
|
||||||
|
'on KMS encrypted clusters.'
|
||||||
|
)
|
||||||
|
status = {
|
||||||
|
'DestinationRegion': kwargs['destination_region'],
|
||||||
|
'RetentionPeriod': kwargs['retention_period'],
|
||||||
|
'SnapshotCopyGrantName': kwargs['snapshot_copy_grant_name'],
|
||||||
|
}
|
||||||
|
cluster.cluster_snapshot_copy_status = status
|
||||||
|
return cluster
|
||||||
|
else:
|
||||||
|
raise SnapshotCopyAlreadyEnabledFaultError(cluster_identifier)
|
||||||
|
|
||||||
|
def disable_snapshot_copy(self, **kwargs):
|
||||||
|
cluster_identifier = kwargs['cluster_identifier']
|
||||||
|
cluster = self.clusters[cluster_identifier]
|
||||||
|
if hasattr(cluster, 'cluster_snapshot_copy_status'):
|
||||||
|
del cluster.cluster_snapshot_copy_status
|
||||||
|
return cluster
|
||||||
|
else:
|
||||||
|
raise SnapshotCopyAlreadyDisabledFaultError(cluster_identifier)
|
||||||
|
|
||||||
|
def modify_snapshot_copy_retention_period(self, cluster_identifier, retention_period):
|
||||||
|
cluster = self.clusters[cluster_identifier]
|
||||||
|
if hasattr(cluster, 'cluster_snapshot_copy_status'):
|
||||||
|
cluster.cluster_snapshot_copy_status['RetentionPeriod'] = retention_period
|
||||||
|
return cluster
|
||||||
|
else:
|
||||||
|
raise SnapshotCopyDisabledFaultError(cluster_identifier)
|
||||||
|
|
||||||
def create_cluster(self, **cluster_kwargs):
|
def create_cluster(self, **cluster_kwargs):
|
||||||
cluster_identifier = cluster_kwargs['cluster_identifier']
|
cluster_identifier = cluster_kwargs['cluster_identifier']
|
||||||
cluster = Cluster(self, **cluster_kwargs)
|
cluster = Cluster(self, **cluster_kwargs)
|
||||||
|
@ -550,3 +550,58 @@ class RedshiftResponse(BaseResponse):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
def enable_snapshot_copy(self):
|
||||||
|
snapshot_copy_kwargs = {
|
||||||
|
'cluster_identifier': self._get_param('ClusterIdentifier'),
|
||||||
|
'destination_region': self._get_param('DestinationRegion'),
|
||||||
|
'retention_period': self._get_param('RetentionPeriod', 7),
|
||||||
|
'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'),
|
||||||
|
}
|
||||||
|
cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs)
|
||||||
|
|
||||||
|
return self.get_response({
|
||||||
|
"EnableSnapshotCopyResponse": {
|
||||||
|
"EnableSnapshotCopyResult": {
|
||||||
|
"Cluster": cluster.to_json()
|
||||||
|
},
|
||||||
|
"ResponseMetadata": {
|
||||||
|
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
def disable_snapshot_copy(self):
|
||||||
|
snapshot_copy_kwargs = {
|
||||||
|
'cluster_identifier': self._get_param('ClusterIdentifier'),
|
||||||
|
}
|
||||||
|
cluster = self.redshift_backend.disable_snapshot_copy(**snapshot_copy_kwargs)
|
||||||
|
|
||||||
|
return self.get_response({
|
||||||
|
"DisableSnapshotCopyResponse": {
|
||||||
|
"DisableSnapshotCopyResult": {
|
||||||
|
"Cluster": cluster.to_json()
|
||||||
|
},
|
||||||
|
"ResponseMetadata": {
|
||||||
|
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
def modify_snapshot_copy_retention_period(self):
|
||||||
|
snapshot_copy_kwargs = {
|
||||||
|
'cluster_identifier': self._get_param('ClusterIdentifier'),
|
||||||
|
'retention_period': self._get_param('RetentionPeriod'),
|
||||||
|
}
|
||||||
|
cluster = self.redshift_backend.modify_snapshot_copy_retention_period(**snapshot_copy_kwargs)
|
||||||
|
|
||||||
|
return self.get_response({
|
||||||
|
"ModifySnapshotCopyRetentionPeriodResponse": {
|
||||||
|
"ModifySnapshotCopyRetentionPeriodResult": {
|
||||||
|
"Clusters": [cluster.to_json()]
|
||||||
|
},
|
||||||
|
"ResponseMetadata": {
|
||||||
|
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
@ -18,10 +18,10 @@ from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, Missi
|
|||||||
MalformedACLError
|
MalformedACLError
|
||||||
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \
|
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \
|
||||||
FakeTag
|
FakeTag
|
||||||
from .utils import bucket_name_from_url, metadata_from_headers
|
from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url
|
||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
|
|
||||||
REGION_URL_REGEX = r'\.s3-(.+?)\.amazonaws\.com'
|
|
||||||
DEFAULT_REGION_NAME = 'us-east-1'
|
DEFAULT_REGION_NAME = 'us-east-1'
|
||||||
|
|
||||||
|
|
||||||
@ -128,10 +128,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
parsed_url = urlparse(full_url)
|
parsed_url = urlparse(full_url)
|
||||||
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
|
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
|
||||||
method = request.method
|
method = request.method
|
||||||
region_name = DEFAULT_REGION_NAME
|
region_name = parse_region_from_url(full_url)
|
||||||
region_match = re.search(REGION_URL_REGEX, full_url)
|
|
||||||
if region_match:
|
|
||||||
region_name = region_match.groups()[0]
|
|
||||||
|
|
||||||
bucket_name = self.parse_bucket_name_from_url(request, full_url)
|
bucket_name = self.parse_bucket_name_from_url(request, full_url)
|
||||||
if not bucket_name:
|
if not bucket_name:
|
||||||
@ -172,7 +169,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
# HEAD (which the real API responds with), and instead
|
# HEAD (which the real API responds with), and instead
|
||||||
# raises NoSuchBucket, leading to inconsistency in
|
# raises NoSuchBucket, leading to inconsistency in
|
||||||
# error response between real and mocked responses.
|
# error response between real and mocked responses.
|
||||||
return 404, {}, "Not Found"
|
return 404, {}, ""
|
||||||
return 200, {}, ""
|
return 200, {}, ""
|
||||||
|
|
||||||
def _bucket_response_get(self, bucket_name, querystring, headers):
|
def _bucket_response_get(self, bucket_name, querystring, headers):
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
import logging
|
||||||
|
|
||||||
from boto.s3.key import Key
|
from boto.s3.key import Key
|
||||||
import re
|
import re
|
||||||
@ -6,6 +7,10 @@ import six
|
|||||||
from six.moves.urllib.parse import urlparse, unquote
|
from six.moves.urllib.parse import urlparse, unquote
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
bucket_name_regex = re.compile("(.+).s3(.*).amazonaws.com")
|
bucket_name_regex = re.compile("(.+).s3(.*).amazonaws.com")
|
||||||
|
|
||||||
|
|
||||||
@ -27,6 +32,20 @@ def bucket_name_from_url(url):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
REGION_URL_REGEX = re.compile(
|
||||||
|
r'^https?://(s3[-\.](?P<region1>.+)\.amazonaws\.com/(.+)|'
|
||||||
|
r'(.+)\.s3-(?P<region2>.+)\.amazonaws\.com)/?')
|
||||||
|
|
||||||
|
|
||||||
|
def parse_region_from_url(url):
|
||||||
|
match = REGION_URL_REGEX.search(url)
|
||||||
|
if match:
|
||||||
|
region = match.group('region1') or match.group('region2')
|
||||||
|
else:
|
||||||
|
region = 'us-east-1'
|
||||||
|
return region
|
||||||
|
|
||||||
|
|
||||||
def metadata_from_headers(headers):
|
def metadata_from_headers(headers):
|
||||||
metadata = {}
|
metadata = {}
|
||||||
meta_regex = re.compile(
|
meta_regex = re.compile(
|
||||||
|
@ -8,7 +8,7 @@ freezegun
|
|||||||
flask
|
flask
|
||||||
boto>=2.45.0
|
boto>=2.45.0
|
||||||
boto3>=1.4.4
|
boto3>=1.4.4
|
||||||
botocore>=1.5.77
|
botocore>=1.8.36
|
||||||
six>=1.9
|
six>=1.9
|
||||||
prompt-toolkit==1.0.14
|
prompt-toolkit==1.0.14
|
||||||
click==6.7
|
click==6.7
|
||||||
|
@ -1,2 +1,8 @@
|
|||||||
|
[nosetests]
|
||||||
|
verbosity=1
|
||||||
|
detailed-errors=1
|
||||||
|
with-coverage=1
|
||||||
|
cover-package=moto
|
||||||
|
|
||||||
[bdist_wheel]
|
[bdist_wheel]
|
||||||
universal=1
|
universal=1
|
||||||
|
2
setup.py
2
setup.py
@ -22,7 +22,7 @@ install_requires = [
|
|||||||
"mock",
|
"mock",
|
||||||
"docker>=2.5.1",
|
"docker>=2.5.1",
|
||||||
"jsondiff==1.1.1",
|
"jsondiff==1.1.1",
|
||||||
"aws-xray-sdk>=0.93",
|
"aws-xray-sdk<0.96,>=0.93",
|
||||||
]
|
]
|
||||||
|
|
||||||
extras_require = {
|
extras_require = {
|
||||||
|
@ -2156,6 +2156,78 @@ def test_stack_spot_fleet():
|
|||||||
launch_spec['WeightedCapacity'].should.equal(2.0)
|
launch_spec['WeightedCapacity'].should.equal(2.0)
|
||||||
|
|
||||||
|
|
||||||
|
@mock_cloudformation()
|
||||||
|
@mock_ec2()
|
||||||
|
def test_stack_spot_fleet_should_figure_out_default_price():
|
||||||
|
conn = boto3.client('ec2', 'us-east-1')
|
||||||
|
|
||||||
|
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
|
||||||
|
subnet = conn.create_subnet(
|
||||||
|
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
|
||||||
|
subnet_id = subnet['SubnetId']
|
||||||
|
|
||||||
|
spot_fleet_template = {
|
||||||
|
'Resources': {
|
||||||
|
"SpotFleet1": {
|
||||||
|
"Type": "AWS::EC2::SpotFleet",
|
||||||
|
"Properties": {
|
||||||
|
"SpotFleetRequestConfigData": {
|
||||||
|
"IamFleetRole": "arn:aws:iam::123456789012:role/fleet",
|
||||||
|
"TargetCapacity": 6,
|
||||||
|
"AllocationStrategy": "diversified",
|
||||||
|
"LaunchSpecifications": [
|
||||||
|
{
|
||||||
|
"EbsOptimized": "false",
|
||||||
|
"InstanceType": 't2.small',
|
||||||
|
"ImageId": "ami-1234",
|
||||||
|
"SubnetId": subnet_id,
|
||||||
|
"WeightedCapacity": "2",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"EbsOptimized": "true",
|
||||||
|
"InstanceType": 't2.large',
|
||||||
|
"ImageId": "ami-1234",
|
||||||
|
"Monitoring": {"Enabled": "true"},
|
||||||
|
"SecurityGroups": [{"GroupId": "sg-123"}],
|
||||||
|
"SubnetId": subnet_id,
|
||||||
|
"IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"},
|
||||||
|
"WeightedCapacity": "4",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spot_fleet_template_json = json.dumps(spot_fleet_template)
|
||||||
|
|
||||||
|
cf_conn = boto3.client('cloudformation', 'us-east-1')
|
||||||
|
stack_id = cf_conn.create_stack(
|
||||||
|
StackName='test_stack',
|
||||||
|
TemplateBody=spot_fleet_template_json,
|
||||||
|
)['StackId']
|
||||||
|
|
||||||
|
stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
|
||||||
|
stack_resources['StackResourceSummaries'].should.have.length_of(1)
|
||||||
|
spot_fleet_id = stack_resources[
|
||||||
|
'StackResourceSummaries'][0]['PhysicalResourceId']
|
||||||
|
|
||||||
|
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||||
|
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||||
|
len(spot_fleet_requests).should.equal(1)
|
||||||
|
spot_fleet_request = spot_fleet_requests[0]
|
||||||
|
spot_fleet_request['SpotFleetRequestState'].should.equal("active")
|
||||||
|
spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig']
|
||||||
|
|
||||||
|
assert 'SpotPrice' not in spot_fleet_config
|
||||||
|
len(spot_fleet_config['LaunchSpecifications']).should.equal(2)
|
||||||
|
launch_spec1 = spot_fleet_config['LaunchSpecifications'][0]
|
||||||
|
launch_spec2 = spot_fleet_config['LaunchSpecifications'][1]
|
||||||
|
|
||||||
|
assert 'SpotPrice' not in launch_spec1
|
||||||
|
assert 'SpotPrice' not in launch_spec2
|
||||||
|
|
||||||
|
|
||||||
@mock_ec2
|
@mock_ec2
|
||||||
@mock_elbv2
|
@mock_elbv2
|
||||||
@mock_cloudformation
|
@mock_cloudformation
|
||||||
|
@ -5,10 +5,11 @@ from nose.tools import assert_raises
|
|||||||
|
|
||||||
from moto.ec2 import ec2_backends
|
from moto.ec2 import ec2_backends
|
||||||
import boto
|
import boto
|
||||||
|
import boto3
|
||||||
from boto.exception import EC2ResponseError
|
from boto.exception import EC2ResponseError
|
||||||
import sure # noqa
|
import sure # noqa
|
||||||
|
|
||||||
from moto import mock_ec2_deprecated
|
from moto import mock_ec2_deprecated, mock_ec2
|
||||||
|
|
||||||
|
|
||||||
@mock_ec2_deprecated
|
@mock_ec2_deprecated
|
||||||
@ -579,3 +580,25 @@ def test_volume_tag_escaping():
|
|||||||
|
|
||||||
snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id]
|
snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id]
|
||||||
dict(snaps[0].tags).should.equal({'key': '</closed>'})
|
dict(snaps[0].tags).should.equal({'key': '</closed>'})
|
||||||
|
|
||||||
|
|
||||||
|
@mock_ec2
|
||||||
|
def test_search_for_many_snapshots():
|
||||||
|
ec2_client = boto3.client('ec2', region_name='eu-west-1')
|
||||||
|
|
||||||
|
volume_response = ec2_client.create_volume(
|
||||||
|
AvailabilityZone='eu-west-1a', Size=10
|
||||||
|
)
|
||||||
|
|
||||||
|
snapshot_ids = []
|
||||||
|
for i in range(1, 20):
|
||||||
|
create_snapshot_response = ec2_client.create_snapshot(
|
||||||
|
VolumeId=volume_response['VolumeId']
|
||||||
|
)
|
||||||
|
snapshot_ids.append(create_snapshot_response['SnapshotId'])
|
||||||
|
|
||||||
|
snapshots_response = ec2_client.describe_snapshots(
|
||||||
|
SnapshotIds=snapshot_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(snapshots_response['Snapshots']) == len(snapshot_ids)
|
||||||
|
@ -316,3 +316,30 @@ def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate():
|
|||||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||||
spot_fleet_config['TargetCapacity'].should.equal(1)
|
spot_fleet_config['TargetCapacity'].should.equal(1)
|
||||||
spot_fleet_config['FulfilledCapacity'].should.equal(2.0)
|
spot_fleet_config['FulfilledCapacity'].should.equal(2.0)
|
||||||
|
|
||||||
|
|
||||||
|
@mock_ec2
|
||||||
|
def test_create_spot_fleet_without_spot_price():
|
||||||
|
conn = boto3.client("ec2", region_name='us-west-2')
|
||||||
|
subnet_id = get_subnet_id(conn)
|
||||||
|
|
||||||
|
# remove prices to force a fallback to ondemand price
|
||||||
|
spot_config_without_price = spot_config(subnet_id)
|
||||||
|
del spot_config_without_price['SpotPrice']
|
||||||
|
for spec in spot_config_without_price['LaunchSpecifications']:
|
||||||
|
del spec['SpotPrice']
|
||||||
|
|
||||||
|
spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId']
|
||||||
|
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||||
|
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||||
|
len(spot_fleet_requests).should.equal(1)
|
||||||
|
spot_fleet_request = spot_fleet_requests[0]
|
||||||
|
spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig']
|
||||||
|
|
||||||
|
len(spot_fleet_config['LaunchSpecifications']).should.equal(2)
|
||||||
|
launch_spec1 = spot_fleet_config['LaunchSpecifications'][0]
|
||||||
|
launch_spec2 = spot_fleet_config['LaunchSpecifications'][1]
|
||||||
|
|
||||||
|
# AWS will figure out the price
|
||||||
|
assert 'SpotPrice' not in launch_spec1
|
||||||
|
assert 'SpotPrice' not in launch_spec2
|
||||||
|
@ -340,6 +340,10 @@ def test_create_target_group_and_listeners():
|
|||||||
'Type': 'forward'}])
|
'Type': 'forward'}])
|
||||||
http_listener_arn = listener.get('ListenerArn')
|
http_listener_arn = listener.get('ListenerArn')
|
||||||
|
|
||||||
|
response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn,
|
||||||
|
Names=['a-target'])
|
||||||
|
response.get('TargetGroups').should.have.length_of(1)
|
||||||
|
|
||||||
# And another with SSL
|
# And another with SSL
|
||||||
response = conn.create_listener(
|
response = conn.create_listener(
|
||||||
LoadBalancerArn=load_balancer_arn,
|
LoadBalancerArn=load_balancer_arn,
|
||||||
|
@ -1081,3 +1081,98 @@ def test_tagged_resource_not_found_error():
|
|||||||
ResourceName='bad:arn'
|
ResourceName='bad:arn'
|
||||||
).should.throw(ClientError, "Tagging is not supported for this type of resource")
|
).should.throw(ClientError, "Tagging is not supported for this type of resource")
|
||||||
|
|
||||||
|
|
||||||
|
@mock_redshift
|
||||||
|
def test_enable_snapshot_copy():
|
||||||
|
client = boto3.client('redshift', region_name='us-east-1')
|
||||||
|
client.create_cluster(
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
ClusterType='single-node',
|
||||||
|
DBName='test',
|
||||||
|
Encrypted=True,
|
||||||
|
MasterUsername='user',
|
||||||
|
MasterUserPassword='password',
|
||||||
|
NodeType='ds2.xlarge',
|
||||||
|
)
|
||||||
|
client.enable_snapshot_copy(
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
DestinationRegion='us-west-2',
|
||||||
|
RetentionPeriod=3,
|
||||||
|
SnapshotCopyGrantName='copy-us-east-1-to-us-west-2'
|
||||||
|
)
|
||||||
|
response = client.describe_clusters(ClusterIdentifier='test')
|
||||||
|
cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus']
|
||||||
|
cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3)
|
||||||
|
cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2')
|
||||||
|
cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2')
|
||||||
|
|
||||||
|
|
||||||
|
@mock_redshift
|
||||||
|
def test_enable_snapshot_copy_unencrypted():
|
||||||
|
client = boto3.client('redshift', region_name='us-east-1')
|
||||||
|
client.create_cluster(
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
ClusterType='single-node',
|
||||||
|
DBName='test',
|
||||||
|
MasterUsername='user',
|
||||||
|
MasterUserPassword='password',
|
||||||
|
NodeType='ds2.xlarge',
|
||||||
|
)
|
||||||
|
client.enable_snapshot_copy(
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
DestinationRegion='us-west-2',
|
||||||
|
)
|
||||||
|
response = client.describe_clusters(ClusterIdentifier='test')
|
||||||
|
cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus']
|
||||||
|
cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7)
|
||||||
|
cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2')
|
||||||
|
|
||||||
|
|
||||||
|
@mock_redshift
|
||||||
|
def test_disable_snapshot_copy():
|
||||||
|
client = boto3.client('redshift', region_name='us-east-1')
|
||||||
|
client.create_cluster(
|
||||||
|
DBName='test',
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
ClusterType='single-node',
|
||||||
|
NodeType='ds2.xlarge',
|
||||||
|
MasterUsername='user',
|
||||||
|
MasterUserPassword='password',
|
||||||
|
)
|
||||||
|
client.enable_snapshot_copy(
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
DestinationRegion='us-west-2',
|
||||||
|
RetentionPeriod=3,
|
||||||
|
SnapshotCopyGrantName='copy-us-east-1-to-us-west-2',
|
||||||
|
)
|
||||||
|
client.disable_snapshot_copy(
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
)
|
||||||
|
response = client.describe_clusters(ClusterIdentifier='test')
|
||||||
|
response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus')
|
||||||
|
|
||||||
|
|
||||||
|
@mock_redshift
|
||||||
|
def test_modify_snapshot_copy_retention_period():
|
||||||
|
client = boto3.client('redshift', region_name='us-east-1')
|
||||||
|
client.create_cluster(
|
||||||
|
DBName='test',
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
ClusterType='single-node',
|
||||||
|
NodeType='ds2.xlarge',
|
||||||
|
MasterUsername='user',
|
||||||
|
MasterUserPassword='password',
|
||||||
|
)
|
||||||
|
client.enable_snapshot_copy(
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
DestinationRegion='us-west-2',
|
||||||
|
RetentionPeriod=3,
|
||||||
|
SnapshotCopyGrantName='copy-us-east-1-to-us-west-2',
|
||||||
|
)
|
||||||
|
client.modify_snapshot_copy_retention_period(
|
||||||
|
ClusterIdentifier='test',
|
||||||
|
RetentionPeriod=5,
|
||||||
|
)
|
||||||
|
response = client.describe_clusters(ClusterIdentifier='test')
|
||||||
|
cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus']
|
||||||
|
cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
from sure import expect
|
from sure import expect
|
||||||
from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore
|
from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url
|
||||||
|
|
||||||
|
|
||||||
def test_base_url():
|
def test_base_url():
|
||||||
@ -53,3 +53,21 @@ def test_versioned_key_store():
|
|||||||
d.setlist('key', [[1], [2]])
|
d.setlist('key', [[1], [2]])
|
||||||
d['key'].should.have.length_of(1)
|
d['key'].should.have.length_of(1)
|
||||||
d.getlist('key').should.be.equal([[1], [2]])
|
d.getlist('key').should.be.equal([[1], [2]])
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_region_from_url():
|
||||||
|
expected = 'us-west-2'
|
||||||
|
for url in ['http://s3-us-west-2.amazonaws.com/bucket',
|
||||||
|
'http://s3.us-west-2.amazonaws.com/bucket',
|
||||||
|
'http://bucket.s3-us-west-2.amazonaws.com',
|
||||||
|
'https://s3-us-west-2.amazonaws.com/bucket',
|
||||||
|
'https://s3.us-west-2.amazonaws.com/bucket',
|
||||||
|
'https://bucket.s3-us-west-2.amazonaws.com']:
|
||||||
|
parse_region_from_url(url).should.equal(expected)
|
||||||
|
|
||||||
|
expected = 'us-east-1'
|
||||||
|
for url in ['http://s3.amazonaws.com/bucket',
|
||||||
|
'http://bucket.s3.amazonaws.com',
|
||||||
|
'https://s3.amazonaws.com/bucket',
|
||||||
|
'https://bucket.s3.amazonaws.com']:
|
||||||
|
parse_region_from_url(url).should.equal(expected)
|
||||||
|
Loading…
Reference in New Issue
Block a user