Merge pull request #26 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2020-01-24 08:48:41 +00:00 committed by GitHub
commit b223cbc11b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 281 additions and 154 deletions

View File

@ -283,14 +283,14 @@ def test_describe_instances_allowed():
]
}
access_key = ...
# create access key for an IAM user/assumed role that has the policy above.
# create access key for an IAM user/assumed role that has the policy above.
# this part should call __exactly__ 4 AWS actions, so that authentication and authorization starts exactly after this
client = boto3.client('ec2', region_name='us-east-1',
aws_access_key_id=access_key['AccessKeyId'],
aws_secret_access_key=access_key['SecretAccessKey'])
# if the IAM principal whose access key is used, does not have the permission to describe instances, this will fail
# if the IAM principal whose access key is used, does not have the permission to describe instances, this will fail
instances = client.describe_instances()['Reservations'][0]['Instances']
assert len(instances) == 0
```
@ -310,16 +310,16 @@ You need to ensure that the mocks are actually in place. Changes made to recent
have altered some of the mock behavior. In short, you need to ensure that you _always_ do the following:
1. Ensure that your tests have dummy environment variables set up:
export AWS_ACCESS_KEY_ID='testing'
export AWS_SECRET_ACCESS_KEY='testing'
export AWS_SECURITY_TOKEN='testing'
export AWS_SESSION_TOKEN='testing'
1. __VERY IMPORTANT__: ensure that you have your mocks set up __BEFORE__ your `boto3` client is established.
1. __VERY IMPORTANT__: ensure that you have your mocks set up __BEFORE__ your `boto3` client is established.
This can typically happen if you import a module that has a `boto3` client instantiated outside of a function.
See the pesky imports section below on how to work around this.
### Example on usage?
If you are a user of [pytest](https://pytest.org/en/latest/), you can leverage [pytest fixtures](https://pytest.org/en/latest/fixture.html#fixture)
to help set up your mocks and other AWS resources that you would need.
@ -354,7 +354,7 @@ def cloudwatch(aws_credentials):
... etc.
```
In the code sample above, all of the AWS/mocked fixtures take in a parameter of `aws_credentials`,
In the code sample above, all of the AWS/mocked fixtures take in a parameter of `aws_credentials`,
which sets the proper fake environment variables. The fake environment variables are used so that `botocore` doesn't try to locate real
credentials on your system.
@ -364,7 +364,7 @@ def test_create_bucket(s3):
# s3 is a fixture defined above that yields a boto3 s3 client.
# Feel free to instantiate another boto3 S3 client -- Keep note of the region though.
s3.create_bucket(Bucket="somebucket")
result = s3.list_buckets()
assert len(result['Buckets']) == 1
assert result['Buckets'][0]['Name'] == 'somebucket'
@ -373,7 +373,7 @@ def test_create_bucket(s3):
### What about those pesky imports?
Recall earlier, it was mentioned that mocks should be established __BEFORE__ the clients are set up. One way
to avoid import issues is to make use of local Python imports -- i.e. import the module inside of the unit
test you want to run vs. importing at the top of the file.
test you want to run vs. importing at the top of the file.
Example:
```python
@ -381,12 +381,12 @@ def test_something(s3):
from some.package.that.does.something.with.s3 import some_func # <-- Local import for unit test
# ^^ Importing here ensures that the mock has been established.
sume_func() # The mock has been established from the "s3" pytest fixture, so this function that uses
some_func() # The mock has been established from the "s3" pytest fixture, so this function that uses
# a package-level S3 client will properly use the mock and not reach out to AWS.
```
### Other caveats
For Tox, Travis CI, and other build systems, you might need to also perform a `touch ~/.aws/credentials`
For Tox, Travis CI, and other build systems, you might need to also perform a `touch ~/.aws/credentials`
command before running the tests. As long as that file is present (empty preferably) and the environment
variables above are set, you should be good to go.

View File

@ -76,7 +76,7 @@ Currently implemented Services:
+---------------------------+-----------------------+------------------------------------+
| Logs | @mock_logs | basic endpoints done |
+---------------------------+-----------------------+------------------------------------+
| Organizations | @mock_organizations | some core edpoints done |
| Organizations | @mock_organizations | some core endpoints done |
+---------------------------+-----------------------+------------------------------------+
| Polly | @mock_polly | all endpoints done |
+---------------------------+-----------------------+------------------------------------+

View File

@ -53,9 +53,6 @@ try:
except ImportError:
from backports.tempfile import TemporaryDirectory
# The lambci container is returning a special escape character for the "RequestID" fields. Unicode 033:
# _stderr_regex = re.compile(r"START|END|REPORT RequestId: .*")
_stderr_regex = re.compile(r"\033\[\d+.*")
_orig_adapter_send = requests.adapters.HTTPAdapter.send
docker_3 = docker.__version__[0] >= "3"
@ -385,7 +382,7 @@ class LambdaFunction(BaseModel):
try:
# TODO: I believe we can keep the container running and feed events as needed
# also need to hook it up to the other services so it can make kws/s3 etc calls
# Should get invoke_id /RequestId from invovation
# Should get invoke_id /RequestId from invocation
env_vars = {
"AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
"AWS_LAMBDA_FUNCTION_NAME": self.function_name,
@ -453,14 +450,9 @@ class LambdaFunction(BaseModel):
if exit_code != 0:
raise Exception("lambda invoke failed output: {}".format(output))
# strip out RequestId lines (TODO: This will return an additional '\n' in the response)
output = os.linesep.join(
[
line
for line in self.convert(output).splitlines()
if not _stderr_regex.match(line)
]
)
# We only care about the response from the lambda
# Which is the last line of the output, according to https://github.com/lambci/docker-lambda/issues/25
output = output.splitlines()[-1]
return output, False
except BaseException as e:
traceback.print_exc()

View File

@ -108,7 +108,9 @@ class CognitoIdpUserPool(BaseModel):
return user_pool_json
def create_jwt(self, client_id, username, expires_in=60 * 60, extra_data={}):
def create_jwt(
self, client_id, username, token_use, expires_in=60 * 60, extra_data={}
):
now = int(time.time())
payload = {
"iss": "https://cognito-idp.{}.amazonaws.com/{}".format(
@ -116,7 +118,7 @@ class CognitoIdpUserPool(BaseModel):
),
"sub": self.users[username].id,
"aud": client_id,
"token_use": "id",
"token_use": token_use,
"auth_time": now,
"exp": now + expires_in,
}
@ -125,7 +127,10 @@ class CognitoIdpUserPool(BaseModel):
return jws.sign(payload, self.json_web_key, algorithm="RS256"), expires_in
def create_id_token(self, client_id, username):
id_token, expires_in = self.create_jwt(client_id, username)
extra_data = self.get_user_extra_data_by_client_id(client_id, username)
id_token, expires_in = self.create_jwt(
client_id, username, "id", extra_data=extra_data
)
self.id_tokens[id_token] = (client_id, username)
return id_token, expires_in
@ -135,10 +140,7 @@ class CognitoIdpUserPool(BaseModel):
return refresh_token
def create_access_token(self, client_id, username):
extra_data = self.get_user_extra_data_by_client_id(client_id, username)
access_token, expires_in = self.create_jwt(
client_id, username, extra_data=extra_data
)
access_token, expires_in = self.create_jwt(client_id, username, "access")
self.access_tokens[access_token] = (client_id, username)
return access_token, expires_in

View File

@ -977,10 +977,8 @@ class OpLessThan(Op):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
# In python3 None is not a valid comparator when using < or > so must be handled specially
if lhs and rhs:
if lhs is not None and rhs is not None:
return lhs < rhs
elif lhs is None and rhs:
return True
else:
return False
@ -992,10 +990,8 @@ class OpGreaterThan(Op):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
# In python3 None is not a valid comparator when using < or > so must be handled specially
if lhs and rhs:
if lhs is not None and rhs is not None:
return lhs > rhs
elif lhs and rhs is None:
return True
else:
return False
@ -1025,10 +1021,8 @@ class OpLessThanOrEqual(Op):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
# In python3 None is not a valid comparator when using < or > so must be handled specially
if lhs and rhs:
if lhs is not None and rhs is not None:
return lhs <= rhs
elif lhs is None and rhs or lhs is None and rhs is None:
return True
else:
return False
@ -1040,10 +1034,8 @@ class OpGreaterThanOrEqual(Op):
lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item)
# In python3 None is not a valid comparator when using < or > so must be handled specially
if lhs and rhs:
if lhs is not None and rhs is not None:
return lhs >= rhs
elif lhs and rhs is None or lhs is None and rhs is None:
return True
else:
return False

View File

@ -457,7 +457,7 @@ class Item(BaseModel):
)
if not old_list.is_list():
raise ParamValidationError
old_list.value.extend(new_value["L"])
old_list.value.extend([DynamoType(v) for v in new_value["L"]])
value = old_list
return value

View File

@ -104,7 +104,7 @@ class SecurityGroups(BaseResponse):
if self.is_not_dryrun("GrantSecurityGroupIngress"):
for args in self._process_rules_from_querystring():
self.ec2_backend.authorize_security_group_ingress(*args)
return AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE
return AUTHORIZE_SECURITY_GROUP_INGRESS_RESPONSE
def create_security_group(self):
name = self._get_param("GroupName")
@ -158,7 +158,7 @@ class SecurityGroups(BaseResponse):
if self.is_not_dryrun("RevokeSecurityGroupIngress"):
for args in self._process_rules_from_querystring():
self.ec2_backend.revoke_security_group_ingress(*args)
return REVOKE_SECURITY_GROUP_INGRESS_REPONSE
return REVOKE_SECURITY_GROUP_INGRESS_RESPONSE
CREATE_SECURITY_GROUP_RESPONSE = """<CreateSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
@ -265,12 +265,12 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = (
</DescribeSecurityGroupsResponse>"""
)
AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE = """<AuthorizeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
AUTHORIZE_SECURITY_GROUP_INGRESS_RESPONSE = """<AuthorizeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</AuthorizeSecurityGroupIngressResponse>"""
REVOKE_SECURITY_GROUP_INGRESS_REPONSE = """<RevokeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
REVOKE_SECURITY_GROUP_INGRESS_RESPONSE = """<RevokeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</RevokeSecurityGroupIngressResponse>"""

View File

@ -118,6 +118,7 @@ class TaskDefinition(BaseObject):
revision,
container_definitions,
region_name,
network_mode=None,
volumes=None,
tags=None,
):
@ -132,6 +133,10 @@ class TaskDefinition(BaseObject):
self.volumes = []
else:
self.volumes = volumes
if network_mode is None:
self.network_mode = "bridge"
else:
self.network_mode = network_mode
@property
def response_object(self):
@ -553,7 +558,7 @@ class EC2ContainerServiceBackend(BaseBackend):
raise Exception("{0} is not a cluster".format(cluster_name))
def register_task_definition(
self, family, container_definitions, volumes, tags=None
self, family, container_definitions, volumes=None, network_mode=None, tags=None
):
if family in self.task_definitions:
last_id = self._get_last_task_definition_revision_id(family)
@ -562,7 +567,13 @@ class EC2ContainerServiceBackend(BaseBackend):
self.task_definitions[family] = {}
revision = 1
task_definition = TaskDefinition(
family, revision, container_definitions, self.region_name, volumes, tags
family,
revision,
container_definitions,
self.region_name,
volumes=volumes,
network_mode=network_mode,
tags=tags,
)
self.task_definitions[family][revision] = task_definition

View File

@ -62,8 +62,13 @@ class EC2ContainerServiceResponse(BaseResponse):
container_definitions = self._get_param("containerDefinitions")
volumes = self._get_param("volumes")
tags = self._get_param("tags")
network_mode = self._get_param("networkMode")
task_definition = self.ecs_backend.register_task_definition(
family, container_definitions, volumes, tags
family,
container_definitions,
volumes=volumes,
network_mode=network_mode,
tags=tags,
)
return json.dumps({"taskDefinition": task_definition.response_object})

View File

@ -103,7 +103,7 @@ class LogsResponse(BaseResponse):
(
events,
next_backward_token,
next_foward_token,
next_forward_token,
) = self.logs_backend.get_log_events(
log_group_name,
log_stream_name,
@ -117,7 +117,7 @@ class LogsResponse(BaseResponse):
{
"events": events,
"nextBackwardToken": next_backward_token,
"nextForwardToken": next_foward_token,
"nextForwardToken": next_forward_token,
}
)

View File

@ -986,7 +986,7 @@ class RDS2Backend(BaseBackend):
)
if option_group_kwargs["engine_name"] not in valid_option_group_engines.keys():
raise RDSClientError(
"InvalidParameterValue", "Invalid DB engine: non-existant"
"InvalidParameterValue", "Invalid DB engine: non-existent"
)
if (
option_group_kwargs["major_engine_version"]

View File

@ -367,14 +367,14 @@ class RDS2Response(BaseResponse):
def modify_db_parameter_group(self):
db_parameter_group_name = self._get_param("DBParameterGroupName")
db_parameter_group_parameters = self._get_db_parameter_group_paramters()
db_parameter_group_parameters = self._get_db_parameter_group_parameters()
db_parameter_group = self.backend.modify_db_parameter_group(
db_parameter_group_name, db_parameter_group_parameters
)
template = self.response_template(MODIFY_DB_PARAMETER_GROUP_TEMPLATE)
return template.render(db_parameter_group=db_parameter_group)
def _get_db_parameter_group_paramters(self):
def _get_db_parameter_group_parameters(self):
parameter_group_parameters = defaultdict(dict)
for param_name, value in self.querystring.items():
if not param_name.startswith("Parameters.Parameter"):

View File

@ -271,6 +271,7 @@ LIST_RRSET_RESPONSE = """<ListResourceRecordSetsResponse xmlns="https://route53.
{{ record_set.to_xml() }}
{% endfor %}
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
</ListResourceRecordSetsResponse>"""
CHANGE_RRSET_RESPONSE = """<ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">

View File

@ -1482,7 +1482,7 @@ S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>{{ bucket.creation_date }}</CreationDate>
<CreationDate>{{ bucket.creation_date.isoformat() }}</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
@ -1869,7 +1869,6 @@ S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>1</PartNumberMarker>
<NextPartNumberMarker>{{ count }}</NextPartNumberMarker>
<MaxParts>{{ count }}</MaxParts>

View File

@ -183,6 +183,7 @@ class Queue(BaseModel):
"MaximumMessageSize",
"MessageRetentionPeriod",
"QueueArn",
"RedrivePolicy",
"ReceiveMessageWaitTimeSeconds",
"VisibilityTimeout",
]

View File

@ -20,8 +20,8 @@ import jinja2
from prompt_toolkit import (
prompt
)
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.shortcuts import print_tokens
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.shortcuts import print_formatted_text
from botocore import xform_name
from botocore.session import Session
@ -149,12 +149,12 @@ def append_mock_dict_to_backends_py(service):
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)):
if any(_ for _ in lines if re.match(".*\"{}\": {}_backends.*".format(service, service), _)):
return
filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)]
filtered_lines = [_ for _ in lines if re.match(".*\".*\":.*_backends.*", _)]
last_elem_line_index = lines.index(filtered_lines[-1])
new_line = " '{}': {}_backends,".format(service, get_escaped_service(service))
new_line = " \"{}\": {}_backends,".format(service, get_escaped_service(service))
prev_line = lines[last_elem_line_index]
if not prev_line.endswith('{') and not prev_line.endswith(','):
lines[last_elem_line_index] += ','

View File

@ -43,7 +43,7 @@ install_requires = [
"python-jose<4.0.0",
"mock",
"docker>=2.5.1",
"jsondiff==1.1.2",
"jsondiff>=1.1.2",
"aws-xray-sdk!=0.96,>=0.93",
"responses>=0.9.0",
"idna<2.9,>=2.5",

View File

@ -706,14 +706,14 @@ def test_create_autoscaling_group_boto3():
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propogate-tag-value",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
},
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "not-propogated-tag-key",
"Value": "not-propogate-tag-value",
"Value": "not-propagate-tag-value",
"PropagateAtLaunch": False,
},
],
@ -744,14 +744,14 @@ def test_create_autoscaling_group_from_instance():
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propogate-tag-value",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
},
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "not-propogated-tag-key",
"Value": "not-propogate-tag-value",
"Value": "not-propagate-tag-value",
"PropagateAtLaunch": False,
},
],
@ -1062,7 +1062,7 @@ def test_detach_one_instance_decrement():
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propogate-tag-value",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
@ -1116,7 +1116,7 @@ def test_detach_one_instance():
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propogate-tag-value",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
@ -1169,7 +1169,7 @@ def test_attach_one_instance():
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propogate-tag-value",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],

View File

@ -58,8 +58,7 @@ def lambda_handler(event, context):
volume_id = event.get('volume_id')
vol = ec2.Volume(volume_id)
print('get volume details for %s\\nVolume - %s state=%s, size=%s' % (volume_id, volume_id, vol.state, vol.size))
return event
return {{'id': vol.id, 'state': vol.state, 'size': vol.size}}
""".format(
base_url="motoserver:5000"
if settings.TEST_SERVER_MODE
@ -181,27 +180,9 @@ if settings.TEST_SERVER_MODE:
Payload=json.dumps(in_data),
)
result["StatusCode"].should.equal(202)
msg = "get volume details for %s\nVolume - %s state=%s, size=%s\n%s" % (
vol.id,
vol.id,
vol.state,
vol.size,
json.dumps(in_data).replace(
" ", ""
), # Makes the tests pass as the result is missing the whitespace
)
log_result = base64.b64decode(result["LogResult"]).decode("utf-8")
# The Docker lambda invocation will return an additional '\n', so need to replace it:
log_result = log_result.replace("\n\n", "\n")
log_result.should.equal(msg)
payload = result["Payload"].read().decode("utf-8")
# The Docker lambda invocation will return an additional '\n', so need to replace it:
payload = payload.replace("\n\n", "\n")
payload.should.equal(msg)
actual_payload = json.loads(result["Payload"].read().decode("utf-8"))
expected_payload = {"id": vol.id, "state": vol.state, "size": vol.size}
actual_payload.should.equal(expected_payload)
@mock_logs

View File

@ -1142,11 +1142,13 @@ def test_token_legitimacy():
id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256"))
id_claims["iss"].should.equal(issuer)
id_claims["aud"].should.equal(client_id)
id_claims["token_use"].should.equal("id")
for k, v in outputs["additional_fields"].items():
id_claims[k].should.equal(v)
access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256"))
access_claims["iss"].should.equal(issuer)
access_claims["aud"].should.equal(client_id)
for k, v in outputs["additional_fields"].items():
access_claims[k].should.equal(v)
access_claims["token_use"].should.equal("access")
@mock_cognitoidp

View File

@ -1719,6 +1719,32 @@ def test_scan_filter4():
assert response["Count"] == 0
@mock_dynamodb2
def test_scan_filter_should_not_return_non_existing_attributes():
table_name = "my-table"
item = {"partitionKey": "pk-2", "my-attr": 42}
# Create table
res = boto3.resource("dynamodb", region_name="us-east-1")
res.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "partitionKey", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "partitionKey", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
table = res.Table(table_name)
# Insert items
table.put_item(Item={"partitionKey": "pk-1"})
table.put_item(Item=item)
# Verify a few operations
# Assert we only find the item that has this attribute
table.scan(FilterExpression=Attr("my-attr").lt(43))["Items"].should.equal([item])
table.scan(FilterExpression=Attr("my-attr").lte(42))["Items"].should.equal([item])
table.scan(FilterExpression=Attr("my-attr").gte(42))["Items"].should.equal([item])
table.scan(FilterExpression=Attr("my-attr").gt(41))["Items"].should.equal([item])
# Sanity check that we can't find the item if the FE is wrong
table.scan(FilterExpression=Attr("my-attr").gt(43))["Items"].should.equal([])
@mock_dynamodb2
def test_bad_scan_filter():
client = boto3.client("dynamodb", region_name="us-east-1")
@ -2505,6 +2531,48 @@ def test_condition_expressions():
)
@mock_dynamodb2
def test_condition_expression_numerical_attribute():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="my-table",
KeySchema=[{"AttributeName": "partitionKey", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "partitionKey", "AttributeType": "S"}],
)
table = dynamodb.Table("my-table")
table.put_item(Item={"partitionKey": "pk-pos", "myAttr": 5})
table.put_item(Item={"partitionKey": "pk-neg", "myAttr": -5})
# try to update the item we put in the table using numerical condition expression
# Specifically, verify that we can compare with a zero-value
# First verify that > and >= work on positive numbers
update_numerical_con_expr(
key="pk-pos", con_expr="myAttr > :zero", res="6", table=table
)
update_numerical_con_expr(
key="pk-pos", con_expr="myAttr >= :zero", res="7", table=table
)
# Second verify that < and <= work on negative numbers
update_numerical_con_expr(
key="pk-neg", con_expr="myAttr < :zero", res="-4", table=table
)
update_numerical_con_expr(
key="pk-neg", con_expr="myAttr <= :zero", res="-3", table=table
)
def update_numerical_con_expr(key, con_expr, res, table):
table.update_item(
Key={"partitionKey": key},
UpdateExpression="ADD myAttr :one",
ExpressionAttributeValues={":zero": 0, ":one": 1},
ConditionExpression=con_expr,
)
table.get_item(Key={"partitionKey": key})["Item"]["myAttr"].should.equal(
Decimal(res)
)
@mock_dynamodb2
def test_condition_expression__attr_doesnt_exist():
client = boto3.client("dynamodb", region_name="us-east-1")
@ -3489,6 +3557,58 @@ def test_update_supports_nested_list_append_onto_another_list():
)
@mock_dynamodb2
def test_update_supports_list_append_maps():
client = boto3.client("dynamodb", region_name="us-west-1")
client.create_table(
AttributeDefinitions=[
{"AttributeName": "id", "AttributeType": "S"},
{"AttributeName": "rid", "AttributeType": "S"},
],
TableName="TestTable",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
{"AttributeName": "rid", "KeyType": "RANGE"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
client.put_item(
TableName="TestTable",
Item={
"id": {"S": "nested_list_append"},
"rid": {"S": "range_key"},
"a": {"L": [{"M": {"b": {"S": "bar1"}}}]},
},
)
# Update item using list_append expression
client.update_item(
TableName="TestTable",
Key={"id": {"S": "nested_list_append"}, "rid": {"S": "range_key"}},
UpdateExpression="SET a = list_append(a, :i)",
ExpressionAttributeValues={":i": {"L": [{"M": {"b": {"S": "bar2"}}}]}},
)
# Verify item is appended to the existing list
result = client.query(
TableName="TestTable",
KeyConditionExpression="id = :i AND begins_with(rid, :r)",
ExpressionAttributeValues={
":i": {"S": "nested_list_append"},
":r": {"S": "range_key"},
},
)["Items"]
result.should.equal(
[
{
"a": {"L": [{"M": {"b": {"S": "bar1"}}}, {"M": {"b": {"S": "bar2"}}}]},
"rid": {"S": "range_key"},
"id": {"S": "nested_list_append"},
}
]
)
@mock_dynamodb2
def test_update_catches_invalid_list_append_operation():
client = boto3.client("dynamodb", region_name="us-east-1")

View File

@ -236,8 +236,8 @@ def test_route_table_associations():
@mock_ec2_deprecated
def test_route_table_replace_route_table_association():
"""
Note: Boto has deprecated replace_route_table_assocation (which returns status)
and now uses replace_route_table_assocation_with_assoc (which returns association ID).
Note: Boto has deprecated replace_route_table_association (which returns status)
and now uses replace_route_table_association_with_assoc (which returns association ID).
"""
conn = boto.connect_vpc("the_key", "the_secret")
vpc = conn.create_vpc("10.0.0.0/16")

View File

@ -77,7 +77,7 @@ def test_describe_repositories():
response = client.describe_repositories()
len(response["repositories"]).should.equal(2)
respository_arns = [
repository_arns = [
"arn:aws:ecr:us-east-1:012345678910:repository/test_repository1",
"arn:aws:ecr:us-east-1:012345678910:repository/test_repository0",
]
@ -86,9 +86,9 @@ def test_describe_repositories():
response["repositories"][0]["repositoryArn"],
response["repositories"][1]["repositoryArn"],
]
).should.equal(set(respository_arns))
).should.equal(set(repository_arns))
respository_uris = [
repository_uris = [
"012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1",
"012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0",
]
@ -97,7 +97,7 @@ def test_describe_repositories():
response["repositories"][0]["repositoryUri"],
response["repositories"][1]["repositoryUri"],
]
).should.equal(set(respository_uris))
).should.equal(set(repository_uris))
@mock_ecr
@ -108,7 +108,7 @@ def test_describe_repositories_1():
response = client.describe_repositories(registryId="012345678910")
len(response["repositories"]).should.equal(2)
respository_arns = [
repository_arns = [
"arn:aws:ecr:us-east-1:012345678910:repository/test_repository1",
"arn:aws:ecr:us-east-1:012345678910:repository/test_repository0",
]
@ -117,9 +117,9 @@ def test_describe_repositories_1():
response["repositories"][0]["repositoryArn"],
response["repositories"][1]["repositoryArn"],
]
).should.equal(set(respository_arns))
).should.equal(set(repository_arns))
respository_uris = [
repository_uris = [
"012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1",
"012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0",
]
@ -128,7 +128,7 @@ def test_describe_repositories_1():
response["repositories"][0]["repositoryUri"],
response["repositories"][1]["repositoryUri"],
]
).should.equal(set(respository_uris))
).should.equal(set(repository_uris))
@mock_ecr
@ -147,11 +147,11 @@ def test_describe_repositories_3():
_ = client.create_repository(repositoryName="test_repository0")
response = client.describe_repositories(repositoryNames=["test_repository1"])
len(response["repositories"]).should.equal(1)
respository_arn = "arn:aws:ecr:us-east-1:012345678910:repository/test_repository1"
response["repositories"][0]["repositoryArn"].should.equal(respository_arn)
repository_arn = "arn:aws:ecr:us-east-1:012345678910:repository/test_repository1"
response["repositories"][0]["repositoryArn"].should.equal(repository_arn)
respository_uri = "012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1"
response["repositories"][0]["repositoryUri"].should.equal(respository_uri)
repository_uri = "012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1"
response["repositories"][0]["repositoryUri"].should.equal(repository_uri)
@mock_ecr

View File

@ -94,6 +94,7 @@ def test_register_task_definition():
"logConfiguration": {"logDriver": "json-file"},
}
],
networkMode="bridge",
tags=[
{"key": "createdBy", "value": "moto-unittest"},
{"key": "foo", "value": "bar"},
@ -124,6 +125,7 @@ def test_register_task_definition():
response["taskDefinition"]["containerDefinitions"][0]["logConfiguration"][
"logDriver"
].should.equal("json-file")
response["taskDefinition"]["networkMode"].should.equal("bridge")
@mock_ecs
@ -724,7 +726,7 @@ def test_delete_service():
@mock_ecs
def test_update_non_existant_service():
def test_update_non_existent_service():
client = boto3.client("ecs", region_name="us-east-1")
try:
client.update_service(

View File

@ -1391,7 +1391,7 @@ def test_set_security_groups():
len(resp["LoadBalancers"][0]["SecurityGroups"]).should.equal(2)
with assert_raises(ClientError):
client.set_security_groups(LoadBalancerArn=arn, SecurityGroups=["non_existant"])
client.set_security_groups(LoadBalancerArn=arn, SecurityGroups=["non_existent"])
@mock_elbv2

View File

@ -132,7 +132,7 @@ def test_get_table_versions():
helpers.update_table(client, database_name, table_name, table_input)
version_inputs["2"] = table_input
# Updateing with an indentical input should still create a new version
# Updateing with an identical input should still create a new version
helpers.update_table(client, database_name, table_name, table_input)
version_inputs["3"] = table_input

View File

@ -223,7 +223,7 @@ def test_create_stream_without_redshift():
@mock_kinesis
def test_deescribe_non_existant_stream():
def test_deescribe_non_existent_stream():
client = boto3.client("firehose", region_name="us-east-1")
client.describe_delivery_stream.when.called_with(

View File

@ -32,7 +32,7 @@ def test_create_cluster():
@mock_kinesis_deprecated
def test_describe_non_existant_stream():
def test_describe_non_existent_stream():
conn = boto.kinesis.connect_to_region("us-east-1")
conn.describe_stream.when.called_with("not-a-stream").should.throw(
ResourceNotFoundException

View File

@ -68,7 +68,7 @@ def test_get_databases_paginated():
@mock_rds_deprecated
def test_describe_non_existant_database():
def test_describe_non_existent_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError)
@ -86,7 +86,7 @@ def test_delete_database():
@mock_rds_deprecated
def test_delete_non_existant_database():
def test_delete_non_existent_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError)
@ -119,7 +119,7 @@ def test_get_security_groups():
@mock_rds_deprecated
def test_get_non_existant_security_group():
def test_get_non_existent_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(
BotoServerError
@ -138,7 +138,7 @@ def test_delete_database_security_group():
@mock_rds_deprecated
def test_delete_non_existant_security_group():
def test_delete_non_existent_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(
BotoServerError

View File

@ -312,7 +312,7 @@ def test_get_databases_paginated():
@mock_rds2
def test_describe_non_existant_database():
def test_describe_non_existent_database():
conn = boto3.client("rds", region_name="us-west-2")
conn.describe_db_instances.when.called_with(
DBInstanceIdentifier="not-a-db"
@ -378,7 +378,7 @@ def test_rename_db_instance():
@mock_rds2
def test_modify_non_existant_database():
def test_modify_non_existent_database():
conn = boto3.client("rds", region_name="us-west-2")
conn.modify_db_instance.when.called_with(
DBInstanceIdentifier="not-a-db", AllocatedStorage=20, ApplyImmediately=True
@ -403,7 +403,7 @@ def test_reboot_db_instance():
@mock_rds2
def test_reboot_non_existant_database():
def test_reboot_non_existent_database():
conn = boto3.client("rds", region_name="us-west-2")
conn.reboot_db_instance.when.called_with(
DBInstanceIdentifier="not-a-db"
@ -444,7 +444,7 @@ def test_delete_database():
@mock_rds2
def test_delete_non_existant_database():
def test_delete_non_existent_database():
conn = boto3.client("rds2", region_name="us-west-2")
conn.delete_db_instance.when.called_with(
DBInstanceIdentifier="not-a-db"
@ -663,7 +663,7 @@ def test_describe_option_group():
@mock_rds2
def test_describe_non_existant_option_group():
def test_describe_non_existent_option_group():
conn = boto3.client("rds", region_name="us-west-2")
conn.describe_option_groups.when.called_with(
OptionGroupName="not-a-option-group"
@ -688,10 +688,10 @@ def test_delete_option_group():
@mock_rds2
def test_delete_non_existant_option_group():
def test_delete_non_existent_option_group():
conn = boto3.client("rds", region_name="us-west-2")
conn.delete_option_group.when.called_with(
OptionGroupName="non-existant"
OptionGroupName="non-existent"
).should.throw(ClientError)
@ -754,10 +754,10 @@ def test_modify_option_group_no_options():
@mock_rds2
def test_modify_non_existant_option_group():
def test_modify_non_existent_option_group():
conn = boto3.client("rds", region_name="us-west-2")
conn.modify_option_group.when.called_with(
OptionGroupName="non-existant",
OptionGroupName="non-existent",
OptionsToInclude=[
(
"OptionName",
@ -771,7 +771,7 @@ def test_modify_non_existant_option_group():
@mock_rds2
def test_delete_non_existant_database():
def test_delete_non_existent_database():
conn = boto3.client("rds", region_name="us-west-2")
conn.delete_db_instance.when.called_with(
DBInstanceIdentifier="not-a-db"
@ -1053,7 +1053,7 @@ def test_get_security_groups():
@mock_rds2
def test_get_non_existant_security_group():
def test_get_non_existent_security_group():
conn = boto3.client("rds", region_name="us-west-2")
conn.describe_db_security_groups.when.called_with(
DBSecurityGroupName="not-a-sg"
@ -1076,7 +1076,7 @@ def test_delete_database_security_group():
@mock_rds2
def test_delete_non_existant_security_group():
def test_delete_non_existent_security_group():
conn = boto3.client("rds", region_name="us-west-2")
conn.delete_db_security_group.when.called_with(
DBSecurityGroupName="not-a-db"
@ -1615,7 +1615,7 @@ def test_describe_db_parameter_group():
@mock_rds2
def test_describe_non_existant_db_parameter_group():
def test_describe_non_existent_db_parameter_group():
conn = boto3.client("rds", region_name="us-west-2")
db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
len(db_parameter_groups["DBParameterGroups"]).should.equal(0)
@ -1669,10 +1669,10 @@ def test_modify_db_parameter_group():
@mock_rds2
def test_delete_non_existant_db_parameter_group():
def test_delete_non_existent_db_parameter_group():
conn = boto3.client("rds", region_name="us-west-2")
conn.delete_db_parameter_group.when.called_with(
DBParameterGroupName="non-existant"
DBParameterGroupName="non-existent"
).should.throw(ClientError)

View File

@ -862,6 +862,8 @@ def test_list_resource_record_sets_name_type_filters():
StartRecordName=all_records[start_with][1],
)
response["IsTruncated"].should.equal(False)
returned_records = [
(record["Type"], record["Name"]) for record in response["ResourceRecordSets"]
]

View File

@ -566,7 +566,7 @@ def test_bucket_deletion():
# Get non-existing bucket
conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError)
# Delete non-existant bucket
# Delete non-existent bucket
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)

View File

@ -174,7 +174,7 @@ def test_bucket_deletion():
# Get non-existing bucket
conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError)
# Delete non-existant bucket
# Delete non-existent bucket
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)

View File

@ -88,8 +88,8 @@ def test_list_platform_applications():
conn.create_platform_application(name="application1", platform="APNS")
conn.create_platform_application(name="application2", platform="APNS")
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse["ListPlatformApplicationsResponse"][
applications_response = conn.list_platform_applications()
applications = applications_response["ListPlatformApplicationsResponse"][
"ListPlatformApplicationsResult"
]["PlatformApplications"]
applications.should.have.length_of(2)
@ -101,8 +101,8 @@ def test_delete_platform_application():
conn.create_platform_application(name="application1", platform="APNS")
conn.create_platform_application(name="application2", platform="APNS")
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse["ListPlatformApplicationsResponse"][
applications_response = conn.list_platform_applications()
applications = applications_response["ListPlatformApplicationsResponse"][
"ListPlatformApplicationsResult"
]["PlatformApplications"]
applications.should.have.length_of(2)
@ -110,8 +110,8 @@ def test_delete_platform_application():
application_arn = applications[0]["PlatformApplicationArn"]
conn.delete_platform_application(application_arn)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse["ListPlatformApplicationsResponse"][
applications_response = conn.list_platform_applications()
applications = applications_response["ListPlatformApplicationsResponse"][
"ListPlatformApplicationsResult"
]["PlatformApplications"]
applications.should.have.length_of(1)

View File

@ -88,8 +88,8 @@ def test_list_platform_applications():
Name="application2", Platform="APNS", Attributes={}
)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse["PlatformApplications"]
applications_response = conn.list_platform_applications()
applications = applications_response["PlatformApplications"]
applications.should.have.length_of(2)
@ -103,15 +103,15 @@ def test_delete_platform_application():
Name="application2", Platform="APNS", Attributes={}
)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse["PlatformApplications"]
applications_response = conn.list_platform_applications()
applications = applications_response["PlatformApplications"]
applications.should.have.length_of(2)
application_arn = applications[0]["PlatformApplicationArn"]
conn.delete_platform_application(PlatformApplicationArn=application_arn)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse["PlatformApplications"]
applications_response = conn.list_platform_applications()
applications = applications_response["PlatformApplications"]
applications.should.have.length_of(1)

View File

@ -806,7 +806,7 @@ def test_filtering_string_array_with_string_no_array_no_match():
topic.publish(
Message="no_match",
MessageAttributes={
"price": {"DataType": "String.Array", "StringValue": "one hundread"}
"price": {"DataType": "String.Array", "StringValue": "one hundred"}
},
)

View File

@ -331,7 +331,20 @@ def test_delete_queue():
@mock_sqs
def test_get_queue_attributes():
client = boto3.client("sqs", region_name="us-east-1")
response = client.create_queue(QueueName="test-queue")
dlq_resp = client.create_queue(QueueName="test-dlr-queue")
dlq_arn1 = client.get_queue_attributes(QueueUrl=dlq_resp["QueueUrl"])["Attributes"][
"QueueArn"
]
response = client.create_queue(
QueueName="test-queue",
Attributes={
"RedrivePolicy": json.dumps(
{"deadLetterTargetArn": dlq_arn1, "maxReceiveCount": 2}
),
},
)
queue_url = response["QueueUrl"]
response = client.get_queue_attributes(QueueUrl=queue_url)
@ -356,6 +369,7 @@ def test_get_queue_attributes():
"ApproximateNumberOfMessages",
"MaximumMessageSize",
"QueueArn",
"RedrivePolicy",
"VisibilityTimeout",
],
)
@ -366,6 +380,9 @@ def test_get_queue_attributes():
"MaximumMessageSize": "65536",
"QueueArn": "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID),
"VisibilityTimeout": "30",
"RedrivePolicy": json.dumps(
{"deadLetterTargetArn": dlq_arn1, "maxReceiveCount": 2}
),
}
)
@ -1180,7 +1197,7 @@ def test_permissions():
client.remove_permission(QueueUrl=queue_url, Label="account2")
with assert_raises(ClientError):
client.remove_permission(QueueUrl=queue_url, Label="non_existant")
client.remove_permission(QueueUrl=queue_url, Label="non_existent")
@mock_sqs

View File

@ -5,7 +5,7 @@ from moto.swf.models import ActivityType, Domain, WorkflowType, WorkflowExecutio
# Some useful constants
# Here are some activity timeouts we use in moto/swf tests ; they're extracted
# from semi-real world example, the goal is mostly to have predictible and
# from semi-real world example, the goal is mostly to have predictable and
# intuitive behaviour in moto/swf own tests...
ACTIVITY_TASK_TIMEOUTS = {
"heartbeatTimeout": "300", # 5 mins