Merge pull request #46 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2020-05-24 10:28:32 +01:00 committed by GitHub
commit 5bd3588f74
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 1694 additions and 158 deletions

View File

@ -641,7 +641,7 @@
## athena
<details>
<summary>10% implemented</summary>
<summary>26% implemented</summary>
- [ ] batch_get_named_query
- [ ] batch_get_query_execution
@ -652,13 +652,13 @@
- [ ] get_named_query
- [ ] get_query_execution
- [ ] get_query_results
- [ ] get_work_group
- [X] get_work_group
- [ ] list_named_queries
- [ ] list_query_executions
- [ ] list_tags_for_resource
- [X] list_work_groups
- [ ] start_query_execution
- [ ] stop_query_execution
- [X] start_query_execution
- [X] stop_query_execution
- [ ] tag_resource
- [ ] untag_resource
- [ ] update_work_group
@ -5287,26 +5287,26 @@
## managedblockchain
<details>
<summary>16% implemented</summary>
<summary>77% implemented</summary>
- [ ] create_member
- [X] create_member
- [X] create_network
- [ ] create_node
- [ ] create_proposal
- [ ] delete_member
- [X] create_proposal
- [X] delete_member
- [ ] delete_node
- [ ] get_member
- [X] get_member
- [X] get_network
- [ ] get_node
- [ ] get_proposal
- [ ] list_invitations
- [ ] list_members
- [X] get_proposal
- [X] list_invitations
- [X] list_members
- [X] list_networks
- [ ] list_nodes
- [ ] list_proposal_votes
- [ ] list_proposals
- [ ] reject_invitation
- [ ] vote_on_proposal
- [X] list_proposal_votes
- [X] list_proposals
- [X] reject_invitation
- [X] vote_on_proposal
</details>
## marketplace-catalog
@ -7392,7 +7392,7 @@
## ses
<details>
<summary>18% implemented</summary>
<summary>21% implemented</summary>
- [ ] clone_receipt_rule_set
- [X] create_configuration_set
@ -7427,14 +7427,14 @@
- [ ] get_identity_verification_attributes
- [X] get_send_quota
- [X] get_send_statistics
- [ ] get_template
- [X] get_template
- [ ] list_configuration_sets
- [ ] list_custom_verification_email_templates
- [X] list_identities
- [ ] list_identity_policies
- [ ] list_receipt_filters
- [ ] list_receipt_rule_sets
- [ ] list_templates
- [X] list_templates
- [X] list_verified_email_addresses
- [ ] put_configuration_set_delivery_options
- [ ] put_identity_policy

View File

@ -2,10 +2,9 @@ from __future__ import unicode_literals
import time
from boto3 import Session
from moto.core import BaseBackend, BaseModel, ACCOUNT_ID
from moto.core import BaseBackend, BaseModel
from moto.core import ACCOUNT_ID
from uuid import uuid4
class TaggableResourceMixin(object):
@ -50,6 +49,17 @@ class WorkGroup(TaggableResourceMixin, BaseModel):
self.configuration = configuration
class Execution(BaseModel):
def __init__(self, query, context, config, workgroup):
self.id = str(uuid4())
self.query = query
self.context = context
self.config = config
self.workgroup = workgroup
self.start_time = time.time()
self.status = "QUEUED"
class AthenaBackend(BaseBackend):
region_name = None
@ -57,6 +67,7 @@ class AthenaBackend(BaseBackend):
if region_name is not None:
self.region_name = region_name
self.work_groups = {}
self.executions = {}
def create_work_group(self, name, configuration, description, tags):
if name in self.work_groups:
@ -76,6 +87,32 @@ class AthenaBackend(BaseBackend):
for wg in self.work_groups.values()
]
def get_work_group(self, name):
if name not in self.work_groups:
return None
wg = self.work_groups[name]
return {
"Name": wg.name,
"State": wg.state,
"Configuration": wg.configuration,
"Description": wg.description,
"CreationTime": time.time(),
}
def start_query_execution(self, query, context, config, workgroup):
execution = Execution(
query=query, context=context, config=config, workgroup=workgroup
)
self.executions[execution.id] = execution
return execution.id
def get_execution(self, exec_id):
return self.executions[exec_id]
def stop_query_execution(self, exec_id):
execution = self.executions[exec_id]
execution.status = "CANCELLED"
athena_backends = {}
for region in Session().get_available_regions("athena"):

View File

@ -18,15 +18,7 @@ class AthenaResponse(BaseResponse):
name, configuration, description, tags
)
if not work_group:
return (
json.dumps(
{
"__type": "InvalidRequestException",
"Message": "WorkGroup already exists",
}
),
dict(status=400),
)
return self.error("WorkGroup already exists", 400)
return json.dumps(
{
"CreateWorkGroupResponse": {
@ -39,3 +31,57 @@ class AthenaResponse(BaseResponse):
def list_work_groups(self):
return json.dumps({"WorkGroups": self.athena_backend.list_work_groups()})
def get_work_group(self):
name = self._get_param("WorkGroup")
return json.dumps({"WorkGroup": self.athena_backend.get_work_group(name)})
def start_query_execution(self):
query = self._get_param("QueryString")
context = self._get_param("QueryExecutionContext")
config = self._get_param("ResultConfiguration")
workgroup = self._get_param("WorkGroup")
if workgroup and not self.athena_backend.get_work_group(workgroup):
return self.error("WorkGroup does not exist", 400)
id = self.athena_backend.start_query_execution(
query=query, context=context, config=config, workgroup=workgroup
)
return json.dumps({"QueryExecutionId": id})
def get_query_execution(self):
exec_id = self._get_param("QueryExecutionId")
execution = self.athena_backend.get_execution(exec_id)
result = {
"QueryExecution": {
"QueryExecutionId": exec_id,
"Query": execution.query,
"StatementType": "DDL",
"ResultConfiguration": execution.config,
"QueryExecutionContext": execution.context,
"Status": {
"State": execution.status,
"SubmissionDateTime": execution.start_time,
},
"Statistics": {
"EngineExecutionTimeInMillis": 0,
"DataScannedInBytes": 0,
"TotalExecutionTimeInMillis": 0,
"QueryQueueTimeInMillis": 0,
"QueryPlanningTimeInMillis": 0,
"ServiceProcessingTimeInMillis": 0,
},
"WorkGroup": execution.workgroup,
}
}
return json.dumps(result)
def stop_query_execution(self):
exec_id = self._get_param("QueryExecutionId")
self.athena_backend.stop_query_execution(exec_id)
return json.dumps({})
def error(self, msg, status):
return (
json.dumps({"__type": "InvalidRequestException", "Message": msg,}),
dict(status=status),
)

View File

@ -98,20 +98,46 @@ MODEL_MAP = {
"AWS::Events::Rule": events_models.Rule,
}
UNDOCUMENTED_NAME_TYPE_MAP = {
"AWS::AutoScaling::AutoScalingGroup": "AutoScalingGroupName",
"AWS::AutoScaling::LaunchConfiguration": "LaunchConfigurationName",
"AWS::IAM::InstanceProfile": "InstanceProfileName",
}
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html
NAME_TYPE_MAP = {
"AWS::CloudWatch::Alarm": "Alarm",
"AWS::ApiGateway::ApiKey": "Name",
"AWS::ApiGateway::Model": "Name",
"AWS::CloudWatch::Alarm": "AlarmName",
"AWS::DynamoDB::Table": "TableName",
"AWS::ElastiCache::CacheCluster": "ClusterName",
"AWS::ElasticBeanstalk::Application": "ApplicationName",
"AWS::ElasticBeanstalk::Environment": "EnvironmentName",
"AWS::CodeDeploy::Application": "ApplicationName",
"AWS::CodeDeploy::DeploymentConfig": "DeploymentConfigName",
"AWS::CodeDeploy::DeploymentGroup": "DeploymentGroupName",
"AWS::Config::ConfigRule": "ConfigRuleName",
"AWS::Config::DeliveryChannel": "Name",
"AWS::Config::ConfigurationRecorder": "Name",
"AWS::ElasticLoadBalancing::LoadBalancer": "LoadBalancerName",
"AWS::ElasticLoadBalancingV2::LoadBalancer": "Name",
"AWS::ElasticLoadBalancingV2::TargetGroup": "Name",
"AWS::EC2::SecurityGroup": "GroupName",
"AWS::ElastiCache::CacheCluster": "ClusterName",
"AWS::ECR::Repository": "RepositoryName",
"AWS::ECS::Cluster": "ClusterName",
"AWS::Elasticsearch::Domain": "DomainName",
"AWS::Events::Rule": "Name",
"AWS::IAM::Group": "GroupName",
"AWS::IAM::ManagedPolicy": "ManagedPolicyName",
"AWS::IAM::Role": "RoleName",
"AWS::IAM::User": "UserName",
"AWS::Lambda::Function": "FunctionName",
"AWS::RDS::DBInstance": "DBInstanceIdentifier",
"AWS::S3::Bucket": "BucketName",
"AWS::SNS::Topic": "TopicName",
"AWS::SQS::Queue": "QueueName",
}
NAME_TYPE_MAP.update(UNDOCUMENTED_NAME_TYPE_MAP)
# Just ignore these models types for now
NULL_MODELS = [
@ -455,6 +481,7 @@ class ResourceMap(collections_abc.Mapping):
return self._parsed_resources[resource_logical_id]
else:
resource_json = self._resource_json_map.get(resource_logical_id)
if not resource_json:
raise KeyError(resource_logical_id)
new_resource = parse_and_create_resource(
@ -470,6 +497,34 @@ class ResourceMap(collections_abc.Mapping):
def __len__(self):
return len(self._resource_json_map)
def __get_resources_in_dependency_order(self):
resource_map = copy.deepcopy(self._resource_json_map)
resources_in_dependency_order = []
def recursively_get_dependencies(resource):
resource_info = resource_map[resource]
if "DependsOn" not in resource_info:
resources_in_dependency_order.append(resource)
del resource_map[resource]
return
dependencies = resource_info["DependsOn"]
if isinstance(dependencies, str): # Dependencies may be a string or list
dependencies = [dependencies]
for dependency in dependencies:
if dependency in resource_map:
recursively_get_dependencies(dependency)
resources_in_dependency_order.append(resource)
del resource_map[resource]
while resource_map:
recursively_get_dependencies(list(resource_map.keys())[0])
return resources_in_dependency_order
@property
def resources(self):
return self._resource_json_map.keys()
@ -547,7 +602,7 @@ class ResourceMap(collections_abc.Mapping):
"aws:cloudformation:stack-id": self.get("AWS::StackId"),
}
)
for resource in self.resources:
for resource in self.__get_resources_in_dependency_order():
if isinstance(self[resource], ec2_models.TaggedEC2Resource):
self.tags["aws:cloudformation:logical-id"] = resource
ec2_models.ec2_backends[self._region_name].create_tags(

View File

@ -16,7 +16,7 @@ from moto.core.exceptions import DryRunClientError
from jinja2 import Environment, DictLoader, TemplateNotFound
import six
from six.moves.urllib.parse import parse_qs, urlparse
from six.moves.urllib.parse import parse_qs, parse_qsl, urlparse
import xmltodict
from werkzeug.exceptions import HTTPException
@ -30,7 +30,7 @@ log = logging.getLogger(__name__)
def _decode_dict(d):
decoded = {}
decoded = OrderedDict()
for key, value in d.items():
if isinstance(key, six.binary_type):
newkey = key.decode("utf-8")
@ -199,7 +199,7 @@ class BaseResponse(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
return cls()._dispatch(*args, **kwargs)
def setup_class(self, request, full_url, headers):
querystring = {}
querystring = OrderedDict()
if hasattr(request, "body"):
# Boto
self.body = request.body
@ -211,7 +211,7 @@ class BaseResponse(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
# definition for back-compatibility
self.body = request.data
querystring = {}
querystring = OrderedDict()
for key, value in request.form.items():
querystring[key] = [value]
@ -240,7 +240,14 @@ class BaseResponse(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
querystring[key] = [value]
elif self.body:
try:
querystring.update(parse_qs(raw_body, keep_blank_values=True))
querystring.update(
OrderedDict(
(key, [value])
for key, value in parse_qsl(
raw_body, keep_blank_values=True
)
)
)
except UnicodeEncodeError:
pass # ignore encoding errors, as the body may not contain a legitimate querystring
if not querystring:

View File

@ -31,7 +31,18 @@ class ResourceNotFoundException(ManagedBlockchainClientError):
self.code = 404
super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException",
"An error occurred (BadRequestException) when calling the {0} operation: {1}".format(
"An error occurred (ResourceNotFoundException) when calling the {0} operation: {1}".format(
pretty_called_method, operation_error
),
)
class ResourceAlreadyExistsException(ManagedBlockchainClientError):
def __init__(self, pretty_called_method, operation_error):
self.code = 409
super(ResourceAlreadyExistsException, self).__init__(
"ResourceAlreadyExistsException",
"An error occurred (ResourceAlreadyExistsException) when calling the {0} operation: {1}".format(
pretty_called_method, operation_error
),
)

View File

@ -12,6 +12,7 @@ from .exceptions import (
ResourceNotFoundException,
InvalidRequestException,
ResourceLimitExceededException,
ResourceAlreadyExistsException,
)
from .utils import (
@ -22,6 +23,9 @@ from .utils import (
member_name_exist_in_network,
number_of_members_in_network,
admin_password_ok,
get_node_id,
number_of_nodes_in_member,
nodes_in_member,
)
FRAMEWORKS = [
@ -212,6 +216,10 @@ class ManagedBlockchainProposal(BaseModel):
return self.actions["Removals"]
return default_return
def check_to_expire_proposal(self):
if datetime.datetime.utcnow() > self.expirtationdate:
self.status = "EXPIRED"
def to_dict(self):
# Format for list_proposals
d = {
@ -244,10 +252,6 @@ class ManagedBlockchainProposal(BaseModel):
return d
def set_vote(self, votermemberid, votermembername, vote):
if datetime.datetime.utcnow() > self.expirtationdate:
self.status = "EXPIRED"
return False
if vote.upper() == "YES":
self.yes_vote_count += 1
else:
@ -273,7 +277,14 @@ class ManagedBlockchainProposal(BaseModel):
elif perct_no > self.network_threshold:
self.status = "REJECTED"
return True
# It is a tie - reject
if (
self.status == "IN_PROGRESS"
and self.network_threshold_comp == "GREATER_THAN"
and self.outstanding_vote_count == 0
and perct_yes == perct_no
):
self.status = "REJECTED"
class ManagedBlockchainInvitation(BaseModel):
@ -413,12 +424,92 @@ class ManagedBlockchainMember(BaseModel):
] = logpublishingconfiguration
class ManagedBlockchainNode(BaseModel):
def __init__(
self,
id,
networkid,
memberid,
availabilityzone,
instancetype,
logpublishingconfiguration,
region,
):
self.creationdate = datetime.datetime.utcnow()
self.id = id
self.instancetype = instancetype
self.networkid = networkid
self.memberid = memberid
self.logpublishingconfiguration = logpublishingconfiguration
self.region = region
self.status = "AVAILABLE"
self.availabilityzone = availabilityzone
@property
def member_id(self):
return self.memberid
@property
def node_status(self):
return self.status
def to_dict(self):
# Format for list_nodes
d = {
"Id": self.id,
"Status": self.status,
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
"AvailabilityZone": self.availabilityzone,
"InstanceType": self.instancetype,
}
return d
def get_format(self):
# Format for get_node
frameworkattributes = {
"Fabric": {
"PeerEndpoint": "{0}.{1}.{2}.managedblockchain.{3}.amazonaws.com:30003".format(
self.id.lower(),
self.networkid.lower(),
self.memberid.lower(),
self.region,
),
"PeerEventEndpoint": "{0}.{1}.{2}.managedblockchain.{3}.amazonaws.com:30004".format(
self.id.lower(),
self.networkid.lower(),
self.memberid.lower(),
self.region,
),
}
}
d = {
"NetworkId": self.networkid,
"MemberId": self.memberid,
"Id": self.id,
"InstanceType": self.instancetype,
"AvailabilityZone": self.availabilityzone,
"FrameworkAttributes": frameworkattributes,
"LogPublishingConfiguration": self.logpublishingconfiguration,
"Status": self.status,
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
}
return d
def delete(self):
self.status = "DELETED"
def update(self, logpublishingconfiguration):
self.logpublishingconfiguration = logpublishingconfiguration
class ManagedBlockchainBackend(BaseBackend):
def __init__(self, region_name):
self.networks = {}
self.members = {}
self.proposals = {}
self.invitations = {}
self.nodes = {}
self.region_name = region_name
def reset(self):
@ -453,10 +544,10 @@ class ManagedBlockchainBackend(BaseBackend):
if frameworkconfiguration["Fabric"]["Edition"] not in EDITIONS:
raise BadRequestException("CreateNetwork", "Invalid request body")
## Generate network ID
# Generate network ID
network_id = get_network_id()
## Generate memberid ID and initial member
# Generate memberid ID and initial member
member_id = get_member_id()
self.members[member_id] = ManagedBlockchainMember(
id=member_id,
@ -524,7 +615,7 @@ class ManagedBlockchainBackend(BaseBackend):
"Member ID format specified in proposal is not valid.",
)
## Generate proposal ID
# Generate proposal ID
proposal_id = get_proposal_id()
self.proposals[proposal_id] = ManagedBlockchainProposal(
@ -558,6 +649,8 @@ class ManagedBlockchainBackend(BaseBackend):
proposalsfornetwork = []
for proposal_id in self.proposals:
if self.proposals.get(proposal_id).network_id == networkid:
# See if any are expired
self.proposals.get(proposal_id).check_to_expire_proposal()
proposalsfornetwork.append(self.proposals[proposal_id])
return proposalsfornetwork
@ -572,6 +665,9 @@ class ManagedBlockchainBackend(BaseBackend):
raise ResourceNotFoundException(
"GetProposal", "Proposal {0} not found.".format(proposalid)
)
# See if it needs to be set to expipred
self.proposals.get(proposalid).check_to_expire_proposal()
return self.proposals.get(proposalid)
def vote_on_proposal(self, networkid, proposalid, votermemberid, vote):
@ -594,43 +690,65 @@ class ManagedBlockchainBackend(BaseBackend):
if vote.upper() not in VOTEVALUES:
raise BadRequestException("VoteOnProposal", "Invalid request body")
# See if it needs to be set to expipred
self.proposals.get(proposalid).check_to_expire_proposal()
# Exception if EXPIRED
if self.proposals.get(proposalid).proposal_status == "EXPIRED":
raise InvalidRequestException(
"VoteOnProposal",
"Proposal {0} is expired and you cannot vote on it.".format(proposalid),
)
# Check if IN_PROGRESS
if self.proposals.get(proposalid).proposal_status != "IN_PROGRESS":
raise InvalidRequestException(
"VoteOnProposal",
"Proposal {0} has status {1} and you cannot vote on it.".format(
proposalid, self.proposals.get(proposalid).proposal_status
),
)
# Check to see if this member already voted
# TODO Verify exception
if votermemberid in self.proposals.get(proposalid).proposal_votes:
raise BadRequestException("VoteOnProposal", "Invalid request body")
raise ResourceAlreadyExistsException(
"VoteOnProposal",
"Member {0} has already voted on proposal {1}.".format(
votermemberid, proposalid
),
)
# Will return false if vote was not cast (e.g., status wrong)
if self.proposals.get(proposalid).set_vote(
# Cast vote
self.proposals.get(proposalid).set_vote(
votermemberid, self.members.get(votermemberid).name, vote.upper()
):
if self.proposals.get(proposalid).proposal_status == "APPROVED":
## Generate invitations
for propinvitation in self.proposals.get(proposalid).proposal_actions(
"Invitations"
):
invitation_id = get_invitation_id()
self.invitations[invitation_id] = ManagedBlockchainInvitation(
id=invitation_id,
networkid=networkid,
networkname=self.networks.get(networkid).network_name,
networkframework=self.networks.get(networkid).network_framework,
networkframeworkversion=self.networks.get(
networkid
).network_framework_version,
networkcreationdate=self.networks.get(
networkid
).network_creationdate,
region=self.region_name,
networkdescription=self.networks.get(
networkid
).network_description,
)
)
## Delete members
for propmember in self.proposals.get(proposalid).proposal_actions(
"Removals"
):
self.delete_member(networkid, propmember["MemberId"])
if self.proposals.get(proposalid).proposal_status == "APPROVED":
# Generate invitations
for propinvitation in self.proposals.get(proposalid).proposal_actions(
"Invitations"
):
invitation_id = get_invitation_id()
self.invitations[invitation_id] = ManagedBlockchainInvitation(
id=invitation_id,
networkid=networkid,
networkname=self.networks.get(networkid).network_name,
networkframework=self.networks.get(networkid).network_framework,
networkframeworkversion=self.networks.get(
networkid
).network_framework_version,
networkcreationdate=self.networks.get(
networkid
).network_creationdate,
region=self.region_name,
networkdescription=self.networks.get(networkid).network_description,
)
# Delete members
for propmember in self.proposals.get(proposalid).proposal_actions(
"Removals"
):
self.delete_member(networkid, propmember["MemberId"])
def list_proposal_votes(self, networkid, proposalid):
# Check if network exists
@ -754,7 +872,7 @@ class ManagedBlockchainBackend(BaseBackend):
"GetMember", "Member {0} not found.".format(memberid)
)
## Cannot get a member than has been delted (it does show up in the list)
# Cannot get a member than has been deleted (it does show up in the list)
if self.members.get(memberid).member_status == "DELETED":
raise ResourceNotFoundException(
"GetMember", "Member {0} not found.".format(memberid)
@ -791,6 +909,10 @@ class ManagedBlockchainBackend(BaseBackend):
# Remove network
del self.networks[networkid]
# Remove any nodes associated
for nodeid in nodes_in_member(self.nodes, memberid):
del self.nodes[nodeid]
def update_member(self, networkid, memberid, logpublishingconfiguration):
# Check if network exists
if networkid not in self.networks:
@ -805,6 +927,173 @@ class ManagedBlockchainBackend(BaseBackend):
self.members.get(memberid).update(logpublishingconfiguration)
def create_node(
self,
networkid,
memberid,
availabilityzone,
instancetype,
logpublishingconfiguration,
):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"CreateNode", "Network {0} not found.".format(networkid)
)
if memberid not in self.members:
raise ResourceNotFoundException(
"CreateNode", "Member {0} not found.".format(memberid)
)
networkedition = self.networks.get(networkid).network_edition
if (
number_of_nodes_in_member(self.nodes, memberid)
>= EDITIONS[networkedition]["MaxNodesPerMember"]
):
raise ResourceLimitExceededException(
"CreateNode",
"Maximum number of nodes exceeded in member {0}. The maximum number of nodes you can have in a member in a {1} Edition network is {2}".format(
memberid,
networkedition,
EDITIONS[networkedition]["MaxNodesPerMember"],
),
)
# See if the instance family is correct
correctinstancefamily = False
for chkinsttypepre in EDITIONS["STANDARD"]["AllowedNodeInstanceTypes"]:
chkinsttypepreregex = chkinsttypepre + ".*"
if re.match(chkinsttypepreregex, instancetype, re.IGNORECASE):
correctinstancefamily = True
break
if correctinstancefamily is False:
raise InvalidRequestException(
"CreateNode",
"Requested instance {0} isn't supported.".format(instancetype),
)
# Check for specific types for starter
if networkedition == "STARTER":
if instancetype not in EDITIONS["STARTER"]["AllowedNodeInstanceTypes"]:
raise InvalidRequestException(
"CreateNode",
"Instance type {0} is not supported with STARTER Edition networks.".format(
instancetype
),
)
# Simple availability zone check
chkregionpreregex = self.region_name + "[a-z]"
if re.match(chkregionpreregex, availabilityzone, re.IGNORECASE) is None:
raise InvalidRequestException(
"CreateNode", "Availability Zone is not valid",
)
node_id = get_node_id()
self.nodes[node_id] = ManagedBlockchainNode(
id=node_id,
networkid=networkid,
memberid=memberid,
availabilityzone=availabilityzone,
instancetype=instancetype,
logpublishingconfiguration=logpublishingconfiguration,
region=self.region_name,
)
# Return the node ID
d = {"NodeId": node_id}
return d
def list_nodes(self, networkid, memberid, status=None):
if networkid not in self.networks:
raise ResourceNotFoundException(
"ListNodes", "Network {0} not found.".format(networkid)
)
if memberid not in self.members:
raise ResourceNotFoundException(
"ListNodes", "Member {0} not found.".format(memberid)
)
# If member is deleted, cannot list nodes
if self.members.get(memberid).member_status == "DELETED":
raise ResourceNotFoundException(
"ListNodes", "Member {0} not found.".format(memberid)
)
nodesformember = []
for node_id in self.nodes:
if self.nodes.get(node_id).member_id == memberid and (
status is None or self.nodes.get(node_id).node_status == status
):
nodesformember.append(self.nodes[node_id])
return nodesformember
def get_node(self, networkid, memberid, nodeid):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"GetNode", "Network {0} not found.".format(networkid)
)
if memberid not in self.members:
raise ResourceNotFoundException(
"GetNode", "Member {0} not found.".format(memberid)
)
if nodeid not in self.nodes:
raise ResourceNotFoundException(
"GetNode", "Node {0} not found.".format(nodeid)
)
# Cannot get a node than has been deleted (it does show up in the list)
if self.nodes.get(nodeid).node_status == "DELETED":
raise ResourceNotFoundException(
"GetNode", "Node {0} not found.".format(nodeid)
)
return self.nodes.get(nodeid)
def delete_node(self, networkid, memberid, nodeid):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"DeleteNode", "Network {0} not found.".format(networkid)
)
if memberid not in self.members:
raise ResourceNotFoundException(
"DeleteNode", "Member {0} not found.".format(memberid)
)
if nodeid not in self.nodes:
raise ResourceNotFoundException(
"DeleteNode", "Node {0} not found.".format(nodeid)
)
self.nodes.get(nodeid).delete()
def update_node(self, networkid, memberid, nodeid, logpublishingconfiguration):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"UpdateNode", "Network {0} not found.".format(networkid)
)
if memberid not in self.members:
raise ResourceNotFoundException(
"UpdateNode", "Member {0} not found.".format(memberid)
)
if nodeid not in self.nodes:
raise ResourceNotFoundException(
"UpdateNode", "Node {0} not found.".format(nodeid)
)
self.nodes.get(nodeid).update(logpublishingconfiguration)
managedblockchain_backends = {}
for region in Session().get_available_regions("managedblockchain"):

View File

@ -11,6 +11,7 @@ from .utils import (
proposalid_from_managedblockchain_url,
invitationid_from_managedblockchain_url,
memberid_from_managedblockchain_url,
nodeid_from_managedblockchain_url,
)
@ -324,3 +325,103 @@ class ManagedBlockchainResponse(BaseResponse):
self.backend.delete_member(network_id, member_id)
headers["content-type"] = "application/json"
return 200, headers, ""
@classmethod
def node_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._node_response(request, full_url, headers)
def _node_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
network_id = networkid_from_managedblockchain_url(full_url)
member_id = memberid_from_managedblockchain_url(full_url)
if method == "GET":
status = None
if "status" in querystring:
status = querystring["status"][0]
return self._all_nodes_response(network_id, member_id, status, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._node_response_post(
network_id, member_id, json_body, querystring, headers
)
def _all_nodes_response(self, network_id, member_id, status, headers):
nodes = self.backend.list_nodes(network_id, member_id, status)
response = json.dumps({"Nodes": [node.to_dict() for node in nodes]})
headers["content-type"] = "application/json"
return 200, headers, response
def _node_response_post(
self, network_id, member_id, json_body, querystring, headers
):
instancetype = json_body["NodeConfiguration"]["InstanceType"]
availabilityzone = json_body["NodeConfiguration"]["AvailabilityZone"]
logpublishingconfiguration = json_body["NodeConfiguration"][
"LogPublishingConfiguration"
]
response = self.backend.create_node(
network_id,
member_id,
availabilityzone,
instancetype,
logpublishingconfiguration,
)
return 200, headers, json.dumps(response)
@classmethod
def nodeid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._nodeid_response(request, full_url, headers)
def _nodeid_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
network_id = networkid_from_managedblockchain_url(full_url)
member_id = memberid_from_managedblockchain_url(full_url)
node_id = nodeid_from_managedblockchain_url(full_url)
if method == "GET":
return self._nodeid_response_get(network_id, member_id, node_id, headers)
elif method == "PATCH":
json_body = json.loads(body.decode("utf-8"))
return self._nodeid_response_patch(
network_id, member_id, node_id, json_body, headers
)
elif method == "DELETE":
return self._nodeid_response_delete(network_id, member_id, node_id, headers)
def _nodeid_response_get(self, network_id, member_id, node_id, headers):
node = self.backend.get_node(network_id, member_id, node_id)
response = json.dumps({"Node": node.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response
def _nodeid_response_patch(
self, network_id, member_id, node_id, json_body, headers
):
logpublishingconfiguration = json_body
self.backend.update_node(
network_id, member_id, node_id, logpublishingconfiguration,
)
return 200, headers, ""
def _nodeid_response_delete(self, network_id, member_id, node_id, headers):
self.backend.delete_node(network_id, member_id, node_id)
headers["content-type"] = "application/json"
return 200, headers, ""

View File

@ -13,4 +13,7 @@ url_paths = {
"{0}/invitations/(?P<invitationid>[^/.]+)$": ManagedBlockchainResponse.invitationid_response,
"{0}/networks/(?P<networkid>[^/.]+)/members$": ManagedBlockchainResponse.member_response,
"{0}/networks/(?P<networkid>[^/.]+)/members/(?P<memberid>[^/.]+)$": ManagedBlockchainResponse.memberid_response,
"{0}/networks/(?P<networkid>[^/.]+)/members/(?P<memberid>[^/.]+)/nodes$": ManagedBlockchainResponse.node_response,
"{0}/networks/(?P<networkid>[^/.]+)/members/(?P<memberid>[^/.]+)/nodes?(?P<querys>[^/.]+)$": ManagedBlockchainResponse.node_response,
"{0}/networks/(?P<networkid>[^/.]+)/members/(?P<memberid>[^/.]+)/nodes/(?P<nodeid>[^/.]+)$": ManagedBlockchainResponse.nodeid_response,
}

View File

@ -104,3 +104,32 @@ def admin_password_ok(password):
return False
else:
return True
def nodeid_from_managedblockchain_url(full_url):
id_search = re.search("\/nd-[A-Z0-9]{26}", full_url, re.IGNORECASE)
return_id = None
if id_search:
return_id = id_search.group(0).replace("/", "")
return return_id
def get_node_id():
return "nd-" + "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
)
def number_of_nodes_in_member(nodes, memberid, node_status=None):
return len(
[
nodid
for nodid in nodes
if nodes.get(nodid).member_id == memberid
and (node_status is None or nodes.get(nodid).node_status == node_status)
]
)
def nodes_in_member(nodes, memberid):
return [nodid for nodid in nodes if nodes.get(nodid).member_id == memberid]

View File

@ -286,8 +286,7 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
}
# TODO add these to the keys and values functions / combine functions
# ELB
# ELB, resource type elasticloadbalancing:loadbalancer
def get_elbv2_tags(arn):
result = []
for key, value in self.elbv2_backend.load_balancers[elb.arn].tags.items():
@ -296,8 +295,8 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
if (
not resource_type_filters
or "elasticloadbalancer" in resource_type_filters
or "elasticloadbalancer:loadbalancer" in resource_type_filters
or "elasticloadbalancing" in resource_type_filters
or "elasticloadbalancing:loadbalancer" in resource_type_filters
):
for elb in self.elbv2_backend.load_balancers.values():
tags = get_elbv2_tags(elb.arn)
@ -306,6 +305,27 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
yield {"ResourceARN": "{0}".format(elb.arn), "Tags": tags}
# ELB Target Group, resource type elasticloadbalancing:targetgroup
def get_target_group_tags(arn):
result = []
for key, value in self.elbv2_backend.target_groups[
target_group.arn
].tags.items():
result.append({"Key": key, "Value": value})
return result
if (
not resource_type_filters
or "elasticloadbalancing" in resource_type_filters
or "elasticloadbalancing:targetgroup" in resource_type_filters
):
for target_group in self.elbv2_backend.target_groups.values():
tags = get_target_group_tags(target_group.arn)
if not tag_filter(tags): # Skip if no tags, or invalid filter
continue
yield {"ResourceARN": "{0}".format(target_group.arn), "Tags": tags}
# EMR Cluster
# Glacier Vault

View File

@ -1,8 +1,7 @@
from __future__ import unicode_literals
import datetime
from botocore.exceptions import ClientError
from nose.tools import assert_raises
import boto3
import sure # noqa
@ -57,3 +56,127 @@ def test_create_work_group():
work_group["Name"].should.equal("athena_workgroup")
work_group["Description"].should.equal("Test work group")
work_group["State"].should.equal("ENABLED")
@mock_athena
def test_create_and_get_workgroup():
client = boto3.client("athena", region_name="us-east-1")
create_basic_workgroup(client=client, name="athena_workgroup")
work_group = client.get_work_group(WorkGroup="athena_workgroup")["WorkGroup"]
del work_group["CreationTime"] # Were not testing creationtime atm
work_group.should.equal(
{
"Name": "athena_workgroup",
"State": "ENABLED",
"Configuration": {
"ResultConfiguration": {"OutputLocation": "s3://bucket-name/prefix/"}
},
"Description": "Test work group",
}
)
@mock_athena
def test_start_query_execution():
client = boto3.client("athena", region_name="us-east-1")
create_basic_workgroup(client=client, name="athena_workgroup")
response = client.start_query_execution(
QueryString="query1",
QueryExecutionContext={"Database": "string"},
ResultConfiguration={"OutputLocation": "string"},
WorkGroup="athena_workgroup",
)
assert "QueryExecutionId" in response
sec_response = client.start_query_execution(
QueryString="query2",
QueryExecutionContext={"Database": "string"},
ResultConfiguration={"OutputLocation": "string"},
)
assert "QueryExecutionId" in sec_response
response["QueryExecutionId"].shouldnt.equal(sec_response["QueryExecutionId"])
@mock_athena
def test_start_query_validate_workgroup():
client = boto3.client("athena", region_name="us-east-1")
with assert_raises(ClientError) as err:
client.start_query_execution(
QueryString="query1",
QueryExecutionContext={"Database": "string"},
ResultConfiguration={"OutputLocation": "string"},
WorkGroup="unknown_workgroup",
)
err.exception.response["Error"]["Code"].should.equal("InvalidRequestException")
err.exception.response["Error"]["Message"].should.equal("WorkGroup does not exist")
@mock_athena
def test_get_query_execution():
client = boto3.client("athena", region_name="us-east-1")
query = "SELECT stuff"
location = "s3://bucket-name/prefix/"
database = "database"
# Start Query
exex_id = client.start_query_execution(
QueryString=query,
QueryExecutionContext={"Database": database},
ResultConfiguration={"OutputLocation": location},
)["QueryExecutionId"]
#
details = client.get_query_execution(QueryExecutionId=exex_id)["QueryExecution"]
#
details["QueryExecutionId"].should.equal(exex_id)
details["Query"].should.equal(query)
details["StatementType"].should.equal("DDL")
details["ResultConfiguration"]["OutputLocation"].should.equal(location)
details["QueryExecutionContext"]["Database"].should.equal(database)
details["Status"]["State"].should.equal("QUEUED")
details["Statistics"].should.equal(
{
"EngineExecutionTimeInMillis": 0,
"DataScannedInBytes": 0,
"TotalExecutionTimeInMillis": 0,
"QueryQueueTimeInMillis": 0,
"QueryPlanningTimeInMillis": 0,
"ServiceProcessingTimeInMillis": 0,
}
)
assert "WorkGroup" not in details
@mock_athena
def test_stop_query_execution():
client = boto3.client("athena", region_name="us-east-1")
query = "SELECT stuff"
location = "s3://bucket-name/prefix/"
database = "database"
# Start Query
exex_id = client.start_query_execution(
QueryString=query,
QueryExecutionContext={"Database": database},
ResultConfiguration={"OutputLocation": location},
)["QueryExecutionId"]
# Stop Query
client.stop_query_execution(QueryExecutionId=exex_id)
# Verify status
details = client.get_query_execution(QueryExecutionId=exex_id)["QueryExecution"]
#
details["QueryExecutionId"].should.equal(exex_id)
details["Status"]["State"].should.equal("CANCELLED")
def create_basic_workgroup(client, name):
client.create_work_group(
Name=name,
Description="Test work group",
Configuration={
"ResultConfiguration": {"OutputLocation": "s3://bucket-name/prefix/",}
},
)

View File

@ -0,0 +1,143 @@
import boto3
from moto import mock_cloudformation, mock_ecs, mock_autoscaling, mock_s3
import json
depends_on_template_list = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"ECSCluster": {
"Type": "AWS::ECS::Cluster",
"Properties": {"ClusterName": "test-cluster"},
},
"AutoScalingGroup": {
"Type": "AWS::AutoScaling::AutoScalingGroup",
"Properties": {
"AutoScalingGroupName": "test-scaling-group",
"DesiredCapacity": 1,
"MinSize": 1,
"MaxSize": 50,
"LaunchConfigurationName": "test-launch-config",
"AvailabilityZones": ["us-east-1a"],
},
"DependsOn": ["ECSCluster", "LaunchConfig"],
},
"LaunchConfig": {
"Type": "AWS::AutoScaling::LaunchConfiguration",
"Properties": {"LaunchConfigurationName": "test-launch-config",},
},
},
}
depends_on_template_string = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"AutoScalingGroup": {
"Type": "AWS::AutoScaling::AutoScalingGroup",
"Properties": {
"AutoScalingGroupName": "test-scaling-group",
"DesiredCapacity": 1,
"MinSize": 1,
"MaxSize": 50,
"LaunchConfigurationName": "test-launch-config",
"AvailabilityZones": ["us-east-1a"],
},
"DependsOn": "LaunchConfig",
},
"LaunchConfig": {
"Type": "AWS::AutoScaling::LaunchConfiguration",
"Properties": {"LaunchConfigurationName": "test-launch-config",},
},
},
}
def make_chained_depends_on_template():
depends_on_template_linked_dependencies = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"Bucket1": {
"Type": "AWS::S3::Bucket",
"Properties": {"BucketName": "test-bucket-0-us-east-1"},
},
},
}
for i in range(1, 10):
depends_on_template_linked_dependencies["Resources"]["Bucket" + str(i)] = {
"Type": "AWS::S3::Bucket",
"Properties": {"BucketName": "test-bucket-" + str(i) + "-us-east-1"},
"DependsOn": ["Bucket" + str(i - 1)],
}
return json.dumps(depends_on_template_linked_dependencies)
depends_on_template_list_json = json.dumps(depends_on_template_list)
depends_on_template_string_json = json.dumps(depends_on_template_string)
@mock_cloudformation
@mock_autoscaling
@mock_ecs
def test_create_stack_with_depends_on():
boto3.client("cloudformation", region_name="us-east-1").create_stack(
StackName="depends_on_test", TemplateBody=depends_on_template_list_json
)
autoscaling = boto3.client("autoscaling", region_name="us-east-1")
autoscaling_group = autoscaling.describe_auto_scaling_groups()["AutoScalingGroups"][
0
]
assert autoscaling_group["AutoScalingGroupName"] == "test-scaling-group"
assert autoscaling_group["DesiredCapacity"] == 1
assert autoscaling_group["MinSize"] == 1
assert autoscaling_group["MaxSize"] == 50
assert autoscaling_group["AvailabilityZones"] == ["us-east-1a"]
launch_configuration = autoscaling.describe_launch_configurations()[
"LaunchConfigurations"
][0]
assert launch_configuration["LaunchConfigurationName"] == "test-launch-config"
ecs = boto3.client("ecs", region_name="us-east-1")
cluster_arn = ecs.list_clusters()["clusterArns"][0]
assert cluster_arn == "arn:aws:ecs:us-east-1:012345678910:cluster/test-cluster"
@mock_cloudformation
@mock_autoscaling
def test_create_stack_with_depends_on_string():
boto3.client("cloudformation", region_name="us-east-1").create_stack(
StackName="depends_on_string_test", TemplateBody=depends_on_template_string_json
)
autoscaling = boto3.client("autoscaling", region_name="us-east-1")
autoscaling_group = autoscaling.describe_auto_scaling_groups()["AutoScalingGroups"][
0
]
assert autoscaling_group["AutoScalingGroupName"] == "test-scaling-group"
assert autoscaling_group["DesiredCapacity"] == 1
assert autoscaling_group["MinSize"] == 1
assert autoscaling_group["MaxSize"] == 50
assert autoscaling_group["AvailabilityZones"] == ["us-east-1a"]
launch_configuration = autoscaling.describe_launch_configurations()[
"LaunchConfigurations"
][0]
assert launch_configuration["LaunchConfigurationName"] == "test-launch-config"
@mock_cloudformation
@mock_s3
def test_create_chained_depends_on_stack():
boto3.client("cloudformation", region_name="us-east-1").create_stack(
StackName="linked_depends_on_test",
TemplateBody=make_chained_depends_on_template(),
)
s3 = boto3.client("s3", region_name="us-east-1")
bucket_response = s3.list_buckets()["Buckets"]
assert sorted([bucket["Name"] for bucket in bucket_response]) == [
"test-bucket-" + str(i) + "-us-east-1" for i in range(1, 10)
]

View File

@ -49,7 +49,7 @@ from moto import (
from moto.core import ACCOUNT_ID
from moto.dynamodb2.models import Table
from .fixtures import (
from tests.test_cloudformation.fixtures import (
ec2_classic_eip,
fn_join,
rds_mysql_with_db_parameter_group,
@ -940,12 +940,10 @@ def test_iam_roles():
role_name_to_id = {}
for role_result in role_results:
role = iam_conn.get_role(role_result.role_name)
if "my-role" not in role.role_name:
# Role name is not specified, so randomly generated - can't check exact name
if "with-path" in role.role_name:
role_name_to_id["with-path"] = role.role_id
role.path.should.equal("my-path")
len(role.role_name).should.equal(
5
) # Role name is not specified, so randomly generated - can't check exact name
else:
role_name_to_id["no-path"] = role.role_id
role.role_name.should.equal("my-role-no-path-name")

View File

@ -62,10 +62,9 @@ def test_boto3_json_invalid_missing_resource():
cf_conn.validate_template(TemplateBody=dummy_bad_template_json)
assert False
except botocore.exceptions.ClientError as e:
assert (
str(e)
== "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack"
" with id Missing top level item Resources to file module does not exist"
str(e).should.contain(
"An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack"
" with id Missing top level"
)
assert True
@ -103,9 +102,8 @@ def test_boto3_yaml_invalid_missing_resource():
cf_conn.validate_template(TemplateBody=yaml_bad_template)
assert False
except botocore.exceptions.ClientError as e:
assert (
str(e)
== "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack"
" with id Missing top level item Resources to file module does not exist"
str(e).should.contain(
"An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack"
" with id Missing top level"
)
assert True

View File

@ -28,6 +28,17 @@ multiple_policy_actions = {
"Invitations": [{"Principal": "123456789012"}, {"Principal": "123456789013"}]
}
default_nodeconfiguration = {
"InstanceType": "bc.t3.small",
"AvailabilityZone": "us-east-1a",
"LogPublishingConfiguration": {
"Fabric": {
"ChaincodeLogs": {"Cloudwatch": {"Enabled": False}},
"PeerLogs": {"Cloudwatch": {"Enabled": False}},
}
},
}
def member_id_exist_in_list(members, memberid):
memberidxists = False
@ -65,3 +76,12 @@ def select_invitation_id_for_network(invitations, networkid, status=None):
if status is None or invitation["Status"] == status:
invitationsfornetwork.append(invitation["InvitationId"])
return invitationsfornetwork
def node_id_exist_in_list(nodes, nodeid):
nodeidxists = False
for node in nodes:
if node["Id"] == nodeid:
nodeidxists = True
break
return nodeidxists

View File

@ -3,7 +3,6 @@ from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers

View File

@ -3,7 +3,6 @@ from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers
@ -204,7 +203,7 @@ def test_create_another_member_withopts():
@mock_managedblockchain
def test_create_and_delete_member():
def test_invite_and_remove_member():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
@ -362,17 +361,14 @@ def test_create_too_many_members():
response["Invitations"], network_id, "PENDING"
)[0]
# Try to create member with already used invitation
# Try to create one too many members
response = conn.create_member.when.called_with(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember6", "admin", "Admin12345", False, "Test Member 6"
),
).should.throw(
Exception,
"5 is the maximum number of members allowed in a STARTER Edition network",
)
).should.throw(Exception, "is the maximum number of members allowed in a",)
@mock_managedblockchain

View File

@ -3,7 +3,6 @@ from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers

View File

@ -0,0 +1,477 @@
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto import mock_managedblockchain
from . import helpers
@mock_managedblockchain
def test_create_node():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create a node
response = conn.create_node(
NetworkId=network_id,
MemberId=member_id,
NodeConfiguration=helpers.default_nodeconfiguration,
)
node_id = response["NodeId"]
# Find node in full list
response = conn.list_nodes(NetworkId=network_id, MemberId=member_id)
nodes = response["Nodes"]
nodes.should.have.length_of(1)
helpers.node_id_exist_in_list(nodes, node_id).should.equal(True)
# Get node details
response = conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id)
response["Node"]["AvailabilityZone"].should.equal("us-east-1a")
# Update node
logconfignewenabled = not helpers.default_nodeconfiguration[
"LogPublishingConfiguration"
]["Fabric"]["ChaincodeLogs"]["Cloudwatch"]["Enabled"]
logconfignew = {
"Fabric": {"ChaincodeLogs": {"Cloudwatch": {"Enabled": logconfignewenabled}}}
}
conn.update_node(
NetworkId=network_id,
MemberId=member_id,
NodeId=node_id,
LogPublishingConfiguration=logconfignew,
)
# Delete node
conn.delete_node(
NetworkId=network_id, MemberId=member_id, NodeId=node_id,
)
# Find node in full list
response = conn.list_nodes(NetworkId=network_id, MemberId=member_id)
nodes = response["Nodes"]
nodes.should.have.length_of(1)
helpers.node_id_exist_in_list(nodes, node_id).should.equal(True)
# Find node in full list - only DELETED
response = conn.list_nodes(
NetworkId=network_id, MemberId=member_id, Status="DELETED"
)
nodes = response["Nodes"]
nodes.should.have.length_of(1)
helpers.node_id_exist_in_list(nodes, node_id).should.equal(True)
# But cannot get
response = conn.get_node.when.called_with(
NetworkId=network_id, MemberId=member_id, NodeId=node_id,
).should.throw(Exception, "Node {0} not found".format(node_id))
@mock_managedblockchain
def test_create_node_standard_edition():
conn = boto3.client("managedblockchain", region_name="us-east-1")
frameworkconfiguration = {"Fabric": {"Edition": "STANDARD"}}
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Instance type only allowed with standard edition
logconfigbad = dict(helpers.default_nodeconfiguration)
logconfigbad["InstanceType"] = "bc.t3.large"
response = conn.create_node(
NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad,
)
node_id = response["NodeId"]
# Get node details
response = conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id)
response["Node"]["InstanceType"].should.equal("bc.t3.large")
# Need another member so the network does not get deleted
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
# Remove member 1 - should remove nodes
conn.delete_member(NetworkId=network_id, MemberId=member_id)
# Should now be an exception
response = conn.list_nodes.when.called_with(
NetworkId=network_id, MemberId=member_id,
).should.throw(Exception, "Member {0} not found".format(member_id))
@mock_managedblockchain
def test_create_too_many_nodes():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create a node
response = conn.create_node(
NetworkId=network_id,
MemberId=member_id,
NodeConfiguration=helpers.default_nodeconfiguration,
)
# Create another node
response = conn.create_node(
NetworkId=network_id,
MemberId=member_id,
NodeConfiguration=helpers.default_nodeconfiguration,
)
# Find node in full list
response = conn.list_nodes(NetworkId=network_id, MemberId=member_id)
nodes = response["Nodes"]
nodes.should.have.length_of(2)
# Try to create one too many nodes
response = conn.create_node.when.called_with(
NetworkId=network_id,
MemberId=member_id,
NodeConfiguration=helpers.default_nodeconfiguration,
).should.throw(
Exception, "Maximum number of nodes exceeded in member {0}".format(member_id),
)
@mock_managedblockchain
def test_create_node_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_node.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeConfiguration=helpers.default_nodeconfiguration,
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_create_node_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.create_node.when.called_with(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeConfiguration=helpers.default_nodeconfiguration,
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_create_node_badnodeconfig():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Incorrect instance type
logconfigbad = dict(helpers.default_nodeconfiguration)
logconfigbad["InstanceType"] = "foo"
response = conn.create_node.when.called_with(
NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad,
).should.throw(Exception, "Requested instance foo isn't supported.")
# Incorrect instance type for edition
logconfigbad = dict(helpers.default_nodeconfiguration)
logconfigbad["InstanceType"] = "bc.t3.large"
response = conn.create_node.when.called_with(
NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad,
).should.throw(
Exception,
"Instance type bc.t3.large is not supported with STARTER Edition networks",
)
# Incorrect availability zone
logconfigbad = dict(helpers.default_nodeconfiguration)
logconfigbad["AvailabilityZone"] = "us-east-11"
response = conn.create_node.when.called_with(
NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad,
).should.throw(Exception, "Availability Zone is not valid")
@mock_managedblockchain
def test_list_nodes_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.list_nodes.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_list_nodes_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.list_nodes.when.called_with(
NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_node_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.get_node.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_node_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.get_node.when.called_with(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_node_badnode():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.get_node.when.called_with(
NetworkId=network_id,
MemberId=member_id,
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_delete_node_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.delete_node.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_delete_node_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.delete_node.when.called_with(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_delete_node_badnode():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.delete_node.when.called_with(
NetworkId=network_id,
MemberId=member_id,
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_update_node_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.update_node.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
LogPublishingConfiguration=helpers.default_nodeconfiguration[
"LogPublishingConfiguration"
],
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_update_node_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.update_node.when.called_with(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
LogPublishingConfiguration=helpers.default_nodeconfiguration[
"LogPublishingConfiguration"
],
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_update_node_badnode():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.update_node.when.called_with(
NetworkId=network_id,
MemberId=member_id,
NodeId="nd-ABCDEFGHIJKLMNOP0123456789",
LogPublishingConfiguration=helpers.default_nodeconfiguration[
"LogPublishingConfiguration"
],
).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found")

View File

@ -3,7 +3,6 @@ from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers

View File

@ -7,7 +7,6 @@ import sure # noqa
from freezegun import freeze_time
from nose import SkipTest
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain, settings
from . import helpers
@ -186,6 +185,18 @@ def test_vote_on_proposal_yes_greater_than():
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote no with member 2
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id2,
Vote="NO",
)
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("REJECTED")
@mock_managedblockchain
def test_vote_on_proposal_no_greater_than():
@ -310,6 +321,47 @@ def test_vote_on_proposal_expiredproposal():
with freeze_time("2015-02-01 12:00:00"):
# Vote yes - should set status to expired
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
).should.throw(
Exception,
"Proposal {0} is expired and you cannot vote on it.".format(proposal_id),
)
# Get proposal details - should be EXPIRED
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("EXPIRED")
@mock_managedblockchain
def test_vote_on_proposal_status_check():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create 2 more members
for counter in range(2, 4):
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
@ -317,9 +369,88 @@ def test_vote_on_proposal_expiredproposal():
Vote="YES",
)
# Get proposal details - should be EXPIRED
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("EXPIRED")
memberidlist = [None, None, None]
memberidlist[0] = member_id
for counter in range(2, 4):
# Get the invitation
response = conn.list_invitations()
invitation_id = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)[0]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember" + str(counter),
"admin",
"Admin12345",
False,
"Test Member " + str(counter),
),
)
member_id = response["MemberId"]
memberidlist[counter - 1] = member_id
# Should be no more pending invitations
response = conn.list_invitations()
pendinginvs = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)
pendinginvs.should.have.length_of(0)
# Create another proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes with member 1
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=memberidlist[0],
Vote="YES",
)
# Vote yes with member 2
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=memberidlist[1],
Vote="YES",
)
# Get proposal details - now approved (2 yes, 1 outstanding)
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("APPROVED")
# Should be one pending invitation
response = conn.list_invitations()
pendinginvs = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)
pendinginvs.should.have.length_of(1)
# Vote with member 3 - should throw an exception and not create a new invitation
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=memberidlist[2],
Vote="YES",
).should.throw(Exception, "and you cannot vote on it")
# Should still be one pending invitation
response = conn.list_invitations()
pendinginvs = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)
pendinginvs.should.have.length_of(1)
@mock_managedblockchain
@ -425,13 +556,21 @@ def test_vote_on_proposal_badvote():
def test_vote_on_proposal_alreadyvoted():
conn = boto3.client("managedblockchain", region_name="us-east-1")
votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 24,
"ThresholdComparator": "GREATER_THAN",
}
}
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
VotingPolicy=votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
@ -465,7 +604,6 @@ def test_vote_on_proposal_alreadyvoted():
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
member_id2 = response["MemberId"]
# Create another proposal
response = conn.create_proposal(
@ -495,7 +633,10 @@ def test_vote_on_proposal_alreadyvoted():
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
).should.throw(Exception, "Invalid request body")
).should.throw(
Exception,
"Member {0} has already voted on proposal {1}.".format(member_id, proposal_id),
)
@mock_managedblockchain

View File

@ -9,44 +9,6 @@ from moto import mock_resourcegroupstaggingapi
from moto import mock_s3
@mock_s3
@mock_resourcegroupstaggingapi
def test_get_resources_s3():
# Tests pagination
s3_client = boto3.client("s3", region_name="eu-central-1")
# Will end up having key1,key2,key3,key4
response_keys = set()
# Create 4 buckets
for i in range(1, 5):
i_str = str(i)
s3_client.create_bucket(
Bucket="test_bucket" + i_str,
CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},
)
s3_client.put_bucket_tagging(
Bucket="test_bucket" + i_str,
Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]},
)
response_keys.add("key" + i_str)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
resp = rtapi.get_resources(ResourcesPerPage=2)
for resource in resp["ResourceTagMappingList"]:
response_keys.remove(resource["Tags"][0]["Key"])
response_keys.should.have.length_of(2)
resp = rtapi.get_resources(
ResourcesPerPage=2, PaginationToken=resp["PaginationToken"]
)
for resource in resp["ResourceTagMappingList"]:
response_keys.remove(resource["Tags"][0]["Key"])
response_keys.should.have.length_of(0)
@mock_ec2
@mock_resourcegroupstaggingapi
def test_get_resources_ec2():
@ -233,12 +195,14 @@ def test_get_many_resources():
rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-east-1")
resp = rtapi.get_resources(ResourceTypeFilters=["elasticloadbalancer:loadbalancer"])
resp = rtapi.get_resources(
ResourceTypeFilters=["elasticloadbalancing:loadbalancer"]
)
resp["ResourceTagMappingList"].should.have.length_of(2)
resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("loadbalancer/")
resp = rtapi.get_resources(
ResourceTypeFilters=["elasticloadbalancer:loadbalancer"],
ResourceTypeFilters=["elasticloadbalancing:loadbalancer"],
TagFilters=[{"Key": "key_name"}],
)
@ -247,4 +211,85 @@ def test_get_many_resources():
{"Key": "key_name", "Value": "a_value"}
)
# TODO test pagenation
# TODO test pagination
@mock_ec2
@mock_elbv2
@mock_resourcegroupstaggingapi
def test_get_resources_target_group():
ec2 = boto3.resource("ec2", region_name="eu-central-1")
elbv2 = boto3.client("elbv2", region_name="eu-central-1")
vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
# Create two tagged target groups
for i in range(1, 3):
i_str = str(i)
target_group = elbv2.create_target_group(
Name="test" + i_str,
Protocol="HTTP",
Port=8080,
VpcId=vpc.id,
TargetType="instance",
)["TargetGroups"][0]
elbv2.add_tags(
ResourceArns=[target_group["TargetGroupArn"]],
Tags=[{"Key": "Test", "Value": i_str}],
)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
# Basic test
resp = rtapi.get_resources(ResourceTypeFilters=["elasticloadbalancing:targetgroup"])
resp["ResourceTagMappingList"].should.have.length_of(2)
# Test tag filtering
resp = rtapi.get_resources(
ResourceTypeFilters=["elasticloadbalancing:targetgroup"],
TagFilters=[{"Key": "Test", "Values": ["1"]}],
)
resp["ResourceTagMappingList"].should.have.length_of(1)
resp["ResourceTagMappingList"][0]["Tags"].should.contain(
{"Key": "Test", "Value": "1"}
)
@mock_s3
@mock_resourcegroupstaggingapi
def test_get_resources_s3():
# Tests pagination
s3_client = boto3.client("s3", region_name="eu-central-1")
# Will end up having key1,key2,key3,key4
response_keys = set()
# Create 4 buckets
for i in range(1, 5):
i_str = str(i)
s3_client.create_bucket(
Bucket="test_bucket" + i_str,
CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},
)
s3_client.put_bucket_tagging(
Bucket="test_bucket" + i_str,
Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]},
)
response_keys.add("key" + i_str)
rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1")
resp = rtapi.get_resources(ResourcesPerPage=2)
for resource in resp["ResourceTagMappingList"]:
response_keys.remove(resource["Tags"][0]["Key"])
response_keys.should.have.length_of(2)
resp = rtapi.get_resources(
ResourcesPerPage=2, PaginationToken=resp["PaginationToken"]
)
for resource in resp["ResourceTagMappingList"]:
response_keys.remove(resource["Tags"][0]["Key"])
response_keys.should.have.length_of(0)