Service: Comprehend (Entity Recognizer) (#5530)

This commit is contained in:
Bert Blommers 2022-10-04 20:26:17 +00:00 committed by GitHub
parent 82ee649242
commit 694ae84bc0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 792 additions and 3 deletions

View File

@ -1110,6 +1110,83 @@
- [ ] verify_user_attribute
</details>
## comprehend
<details>
<summary>11% implemented</summary>
- [ ] batch_detect_dominant_language
- [ ] batch_detect_entities
- [ ] batch_detect_key_phrases
- [ ] batch_detect_sentiment
- [ ] batch_detect_syntax
- [ ] classify_document
- [ ] contains_pii_entities
- [ ] create_document_classifier
- [ ] create_endpoint
- [X] create_entity_recognizer
- [ ] delete_document_classifier
- [ ] delete_endpoint
- [X] delete_entity_recognizer
- [ ] delete_resource_policy
- [ ] describe_document_classification_job
- [ ] describe_document_classifier
- [ ] describe_dominant_language_detection_job
- [ ] describe_endpoint
- [ ] describe_entities_detection_job
- [X] describe_entity_recognizer
- [ ] describe_events_detection_job
- [ ] describe_key_phrases_detection_job
- [ ] describe_pii_entities_detection_job
- [ ] describe_resource_policy
- [ ] describe_sentiment_detection_job
- [ ] describe_targeted_sentiment_detection_job
- [ ] describe_topics_detection_job
- [ ] detect_dominant_language
- [ ] detect_entities
- [ ] detect_key_phrases
- [ ] detect_pii_entities
- [ ] detect_sentiment
- [ ] detect_syntax
- [ ] import_model
- [ ] list_document_classification_jobs
- [ ] list_document_classifier_summaries
- [ ] list_document_classifiers
- [ ] list_dominant_language_detection_jobs
- [ ] list_endpoints
- [ ] list_entities_detection_jobs
- [ ] list_entity_recognizer_summaries
- [X] list_entity_recognizers
- [ ] list_events_detection_jobs
- [ ] list_key_phrases_detection_jobs
- [ ] list_pii_entities_detection_jobs
- [ ] list_sentiment_detection_jobs
- [X] list_tags_for_resource
- [ ] list_targeted_sentiment_detection_jobs
- [ ] list_topics_detection_jobs
- [ ] put_resource_policy
- [ ] start_document_classification_job
- [ ] start_dominant_language_detection_job
- [ ] start_entities_detection_job
- [ ] start_events_detection_job
- [ ] start_key_phrases_detection_job
- [ ] start_pii_entities_detection_job
- [ ] start_sentiment_detection_job
- [ ] start_targeted_sentiment_detection_job
- [ ] start_topics_detection_job
- [ ] stop_dominant_language_detection_job
- [ ] stop_entities_detection_job
- [ ] stop_events_detection_job
- [ ] stop_key_phrases_detection_job
- [ ] stop_pii_entities_detection_job
- [ ] stop_sentiment_detection_job
- [ ] stop_targeted_sentiment_detection_job
- [ ] stop_training_document_classifier
- [X] stop_training_entity_recognizer
- [X] tag_resource
- [X] untag_resource
- [ ] update_endpoint
</details>
## config
<details>
<summary>35% implemented</summary>
@ -2050,7 +2127,7 @@
- [X] modify_volume
- [ ] modify_volume_attribute
- [X] modify_vpc_attribute
- [ ] modify_vpc_endpoint
- [X] modify_vpc_endpoint
- [ ] modify_vpc_endpoint_connection_notification
- [X] modify_vpc_endpoint_service_configuration
- [ ] modify_vpc_endpoint_service_payer_responsibility
@ -6315,7 +6392,6 @@
- codestar-connections
- codestar-notifications
- cognito-sync
- comprehend
- comprehendmedical
- compute-optimizer
- connect

View File

@ -0,0 +1,110 @@
.. _implementedservice_comprehend:
.. |start-h3| raw:: html
<h3>
.. |end-h3| raw:: html
</h3>
==========
comprehend
==========
.. autoclass:: moto.comprehend.models.ComprehendBackend
|start-h3| Example usage |end-h3|
.. sourcecode:: python
@mock_comprehend
def test_comprehend_behaviour:
boto3.client("comprehend")
...
|start-h3| Implemented features for this service |end-h3|
- [ ] batch_detect_dominant_language
- [ ] batch_detect_entities
- [ ] batch_detect_key_phrases
- [ ] batch_detect_sentiment
- [ ] batch_detect_syntax
- [ ] classify_document
- [ ] contains_pii_entities
- [ ] create_document_classifier
- [ ] create_endpoint
- [X] create_entity_recognizer
The ClientRequestToken-parameter is not yet implemented
- [ ] delete_document_classifier
- [ ] delete_endpoint
- [X] delete_entity_recognizer
- [ ] delete_resource_policy
- [ ] describe_document_classification_job
- [ ] describe_document_classifier
- [ ] describe_dominant_language_detection_job
- [ ] describe_endpoint
- [ ] describe_entities_detection_job
- [X] describe_entity_recognizer
- [ ] describe_events_detection_job
- [ ] describe_key_phrases_detection_job
- [ ] describe_pii_entities_detection_job
- [ ] describe_resource_policy
- [ ] describe_sentiment_detection_job
- [ ] describe_targeted_sentiment_detection_job
- [ ] describe_topics_detection_job
- [ ] detect_dominant_language
- [ ] detect_entities
- [ ] detect_key_phrases
- [ ] detect_pii_entities
- [ ] detect_sentiment
- [ ] detect_syntax
- [ ] import_model
- [ ] list_document_classification_jobs
- [ ] list_document_classifier_summaries
- [ ] list_document_classifiers
- [ ] list_dominant_language_detection_jobs
- [ ] list_endpoints
- [ ] list_entities_detection_jobs
- [ ] list_entity_recognizer_summaries
- [X] list_entity_recognizers
Pagination is not yet implemented.
The following filters are not yet implemented: Status, SubmitTimeBefore, SubmitTimeAfter
- [ ] list_events_detection_jobs
- [ ] list_key_phrases_detection_jobs
- [ ] list_pii_entities_detection_jobs
- [ ] list_sentiment_detection_jobs
- [X] list_tags_for_resource
- [ ] list_targeted_sentiment_detection_jobs
- [ ] list_topics_detection_jobs
- [ ] put_resource_policy
- [ ] start_document_classification_job
- [ ] start_dominant_language_detection_job
- [ ] start_entities_detection_job
- [ ] start_events_detection_job
- [ ] start_key_phrases_detection_job
- [ ] start_pii_entities_detection_job
- [ ] start_sentiment_detection_job
- [ ] start_targeted_sentiment_detection_job
- [ ] start_topics_detection_job
- [ ] stop_dominant_language_detection_job
- [ ] stop_entities_detection_job
- [ ] stop_events_detection_job
- [ ] stop_key_phrases_detection_job
- [ ] stop_pii_entities_detection_job
- [ ] stop_sentiment_detection_job
- [ ] stop_targeted_sentiment_detection_job
- [ ] stop_training_document_classifier
- [X] stop_training_entity_recognizer
- [X] tag_resource
- [X] untag_resource
- [ ] update_endpoint

View File

@ -520,7 +520,7 @@ ec2
- [X] modify_volume
- [ ] modify_volume_attribute
- [X] modify_vpc_attribute
- [ ] modify_vpc_endpoint
- [X] modify_vpc_endpoint
- [ ] modify_vpc_endpoint_connection_notification
- [X] modify_vpc_endpoint_service_configuration

View File

@ -48,6 +48,7 @@ mock_cognitoidentity = lazy_load(
".cognitoidentity", "mock_cognitoidentity", boto3_name="cognito-identity"
)
mock_cognitoidp = lazy_load(".cognitoidp", "mock_cognitoidp", boto3_name="cognito-idp")
mock_comprehend = lazy_load(".comprehend", "mock_comprehend")
mock_config = lazy_load(".config", "mock_config")
mock_databrew = lazy_load(".databrew", "mock_databrew")
mock_datapipeline = lazy_load(".datapipeline", "mock_datapipeline")

View File

@ -27,6 +27,7 @@ backend_url_patterns = [
re.compile("https?://cognito-identity\\.(.+)\\.amazonaws.com"),
),
("cognito-idp", re.compile("https?://cognito-idp\\.(.+)\\.amazonaws.com")),
("comprehend", re.compile("https?://comprehend\\.(.+)\\.amazonaws\\.com")),
("config", re.compile("https?://config\\.(.+)\\.amazonaws\\.com")),
("databrew", re.compile("https?://databrew\\.(.+)\\.amazonaws.com")),
("datapipeline", re.compile("https?://datapipeline\\.(.+)\\.amazonaws\\.com")),

View File

@ -0,0 +1,5 @@
"""comprehend module initialization; sets value for base decorator."""
from .models import comprehend_backends
from ..core.models import base_decorator
mock_comprehend = base_decorator(comprehend_backends)

View File

@ -0,0 +1,10 @@
"""Exceptions raised by the comprehend service."""
from moto.core.exceptions import JsonRESTError
class ResourceNotFound(JsonRESTError):
def __init__(self):
super().__init__(
"ResourceNotFoundException",
"RESOURCE_NOT_FOUND: Could not find specified resource.",
)

131
moto/comprehend/models.py Normal file
View File

@ -0,0 +1,131 @@
"""ComprehendBackend class with methods for supported APIs."""
from moto.core import BaseBackend, BaseModel
from moto.core.utils import BackendDict
from moto.utilities.tagging_service import TaggingService
from .exceptions import ResourceNotFound
from typing import Dict
class EntityRecognizer(BaseModel):
def __init__(
self,
region_name,
account_id,
language_code,
input_data_config,
data_access_role_arn,
version_name,
recognizer_name,
volume_kms_key_id,
vpc_config,
model_kms_key_id,
model_policy,
):
self.name = recognizer_name
self.arn = f"arn:aws:comprehend:{region_name}:{account_id}:entity-recognizer/{recognizer_name}"
if version_name:
self.arn += f"/version/{version_name}"
self.language_code = language_code
self.input_data_config = input_data_config
self.data_access_role_arn = data_access_role_arn
self.version_name = version_name
self.volume_kms_key_id = volume_kms_key_id
self.vpc_config = vpc_config
self.model_kms_key_id = model_kms_key_id
self.model_policy = model_policy
self.status = "TRAINED"
def to_dict(self):
return {
"EntityRecognizerArn": self.arn,
"LanguageCode": self.language_code,
"Status": self.status,
"InputDataConfig": self.input_data_config,
"DataAccessRoleArn": self.data_access_role_arn,
"VersionName": self.version_name,
"VolumeKmsKeyId": self.volume_kms_key_id,
"VpcConfig": self.vpc_config,
"ModelKmsKeyId": self.model_kms_key_id,
"ModelPolicy": self.model_policy,
}
class ComprehendBackend(BaseBackend):
"""Implementation of Comprehend APIs."""
def __init__(self, region_name, account_id):
super().__init__(region_name, account_id)
self.recognizers: Dict[str, EntityRecognizer] = dict()
self.tagger = TaggingService()
def list_entity_recognizers(self, _filter):
"""
Pagination is not yet implemented.
The following filters are not yet implemented: Status, SubmitTimeBefore, SubmitTimeAfter
"""
if "RecognizerName" in _filter:
return [
entity
for entity in self.recognizers.values()
if entity.name == _filter["RecognizerName"]
]
return self.recognizers.values()
def create_entity_recognizer(
self,
recognizer_name,
version_name,
data_access_role_arn,
tags,
input_data_config,
language_code,
volume_kms_key_id,
vpc_config,
model_kms_key_id,
model_policy,
):
"""
The ClientRequestToken-parameter is not yet implemented
"""
recognizer = EntityRecognizer(
region_name=self.region_name,
account_id=self.account_id,
language_code=language_code,
input_data_config=input_data_config,
data_access_role_arn=data_access_role_arn,
version_name=version_name,
recognizer_name=recognizer_name,
volume_kms_key_id=volume_kms_key_id,
vpc_config=vpc_config,
model_kms_key_id=model_kms_key_id,
model_policy=model_policy,
)
self.recognizers[recognizer.arn] = recognizer
self.tagger.tag_resource(recognizer.arn, tags)
return recognizer.arn
def describe_entity_recognizer(self, entity_recognizer_arn) -> EntityRecognizer:
if entity_recognizer_arn not in self.recognizers:
raise ResourceNotFound
return self.recognizers[entity_recognizer_arn]
def stop_training_entity_recognizer(self, entity_recognizer_arn):
recognizer = self.describe_entity_recognizer(entity_recognizer_arn)
if recognizer.status == "TRAINING":
recognizer.status = "STOP_REQUESTED"
def list_tags_for_resource(self, resource_arn):
return self.tagger.list_tags_for_resource(resource_arn)["Tags"]
def delete_entity_recognizer(self, entity_recognizer_arn):
self.recognizers.pop(entity_recognizer_arn, None)
def tag_resource(self, resource_arn, tags):
self.tagger.tag_resource(resource_arn, tags)
def untag_resource(self, resource_arn, tag_keys):
self.tagger.untag_resource_using_names(resource_arn, tag_keys)
comprehend_backends = BackendDict(ComprehendBackend, "comprehend")

View File

@ -0,0 +1,97 @@
"""Handles incoming comprehend requests, invokes methods, returns responses."""
import json
from moto.core.responses import BaseResponse
from .models import comprehend_backends
class ComprehendResponse(BaseResponse):
"""Handler for Comprehend requests and responses."""
def __init__(self):
super().__init__(service_name="comprehend")
@property
def comprehend_backend(self):
"""Return backend instance specific for this region."""
return comprehend_backends[self.current_account][self.region]
def list_entity_recognizers(self):
params = json.loads(self.body)
_filter = params.get("Filter", {})
recognizers = self.comprehend_backend.list_entity_recognizers(_filter=_filter)
return json.dumps(
dict(EntityRecognizerPropertiesList=[r.to_dict() for r in recognizers])
)
def create_entity_recognizer(self):
params = json.loads(self.body)
recognizer_name = params.get("RecognizerName")
version_name = params.get("VersionName")
data_access_role_arn = params.get("DataAccessRoleArn")
tags = params.get("Tags")
input_data_config = params.get("InputDataConfig")
language_code = params.get("LanguageCode")
volume_kms_key_id = params.get("VolumeKmsKeyId")
vpc_config = params.get("VpcConfig")
model_kms_key_id = params.get("ModelKmsKeyId")
model_policy = params.get("ModelPolicy")
entity_recognizer_arn = self.comprehend_backend.create_entity_recognizer(
recognizer_name=recognizer_name,
version_name=version_name,
data_access_role_arn=data_access_role_arn,
tags=tags,
input_data_config=input_data_config,
language_code=language_code,
volume_kms_key_id=volume_kms_key_id,
vpc_config=vpc_config,
model_kms_key_id=model_kms_key_id,
model_policy=model_policy,
)
return json.dumps(dict(EntityRecognizerArn=entity_recognizer_arn))
def describe_entity_recognizer(self):
params = json.loads(self.body)
entity_recognizer_arn = params.get("EntityRecognizerArn")
recognizer = self.comprehend_backend.describe_entity_recognizer(
entity_recognizer_arn=entity_recognizer_arn,
)
return json.dumps(dict(EntityRecognizerProperties=recognizer.to_dict()))
def stop_training_entity_recognizer(self):
params = json.loads(self.body)
entity_recognizer_arn = params.get("EntityRecognizerArn")
self.comprehend_backend.stop_training_entity_recognizer(
entity_recognizer_arn=entity_recognizer_arn,
)
return json.dumps(dict())
def list_tags_for_resource(self):
params = json.loads(self.body)
resource_arn = params.get("ResourceArn")
tags = self.comprehend_backend.list_tags_for_resource(
resource_arn=resource_arn,
)
return json.dumps(dict(ResourceArn=resource_arn, Tags=tags))
def delete_entity_recognizer(self):
params = json.loads(self.body)
entity_recognizer_arn = params.get("EntityRecognizerArn")
self.comprehend_backend.delete_entity_recognizer(
entity_recognizer_arn=entity_recognizer_arn,
)
return "{}"
def tag_resource(self):
params = json.loads(self.body)
resource_arn = params.get("ResourceArn")
tags = params.get("Tags")
self.comprehend_backend.tag_resource(resource_arn, tags)
return "{}"
def untag_resource(self):
params = json.loads(self.body)
resource_arn = params.get("ResourceArn")
tag_keys = params.get("TagKeys")
self.comprehend_backend.untag_resource(resource_arn, tag_keys)
return "{}"

11
moto/comprehend/urls.py Normal file
View File

@ -0,0 +1,11 @@
"""comprehend base URL and path."""
from .responses import ComprehendResponse
url_bases = [
r"https?://comprehend\.(.+)\.amazonaws\.com",
]
url_paths = {
"{0}/$": ComprehendResponse.dispatch,
}

View File

@ -76,6 +76,20 @@ class VPCEndPoint(TaggedEC2Resource, CloudFormationModel):
self.created_at = utc_date_and_time()
def modify(self, policy_doc, add_subnets, add_route_tables, remove_route_tables):
if policy_doc:
self.policy_document = policy_doc
if add_subnets:
self.subnet_ids.extend(add_subnets)
if add_route_tables:
self.route_table_ids.extend(add_route_tables)
if remove_route_tables:
self.route_table_ids = [
rt_id
for rt_id in self.route_table_ids
if rt_id not in remove_route_tables
]
def get_filter_value(self, filter_name):
if filter_name in ("vpc-endpoint-type", "vpc_endpoint_type"):
return self.endpoint_type
@ -593,6 +607,12 @@ class VPCBackend:
return vpc_end_point
def modify_vpc_endpoint(
self, vpc_id, policy_doc, add_subnets, remove_route_tables, add_route_tables
):
endpoint = self.describe_vpc_endpoints(vpc_end_point_ids=[vpc_id])[0]
endpoint.modify(policy_doc, add_subnets, add_route_tables, remove_route_tables)
def delete_vpc_endpoints(self, vpce_ids=None):
for vpce_id in vpce_ids or []:
vpc_endpoint = self.vpc_end_points.get(vpce_id, None)

View File

@ -216,6 +216,22 @@ class VPCs(EC2BaseResponse):
template = self.response_template(CREATE_VPC_END_POINT)
return template.render(vpc_end_point=vpc_end_point)
def modify_vpc_endpoint(self):
vpc_id = self._get_param("VpcEndpointId")
add_subnets = self._get_multi_param("AddSubnetId")
add_route_tables = self._get_multi_param("AddRouteTableId")
remove_route_tables = self._get_multi_param("RemoveRouteTableId")
policy_doc = self._get_param("PolicyDocument")
self.ec2_backend.modify_vpc_endpoint(
vpc_id=vpc_id,
policy_doc=policy_doc,
add_subnets=add_subnets,
add_route_tables=add_route_tables,
remove_route_tables=remove_route_tables,
)
template = self.response_template(MODIFY_VPC_END_POINT)
return template.render()
def describe_vpc_endpoint_services(self):
vpc_end_point_services = self.ec2_backend.describe_vpc_endpoint_services(
dry_run=self._get_bool_param("DryRun"),
@ -609,6 +625,10 @@ CREATE_VPC_END_POINT = """ <CreateVpcEndpointResponse xmlns="http://monitoring.a
</vpcEndpoint>
</CreateVpcEndpointResponse>"""
MODIFY_VPC_END_POINT = """<ModifyVpcEndpointResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<return>true</return>
</ModifyVpcEndpointResponse>"""
DESCRIBE_VPC_ENDPOINT_SERVICES_RESPONSE = """<DescribeVpcEndpointServicesResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>19a9ff46-7df6-49b8-9726-3df27527089d</requestId>
<serviceNameSet>

View File

@ -11,6 +11,7 @@ PATCH="etc/0001-Patch-Hardcode-endpoints-to-local-server.patch"
(git apply $pwd/etc/0003-Patch-IAM-wait-times.patch > /dev/null 2>&1 && echo "Patched IAM") || echo "Not patching IAM - Directory was probably already patched."
(git apply $pwd/etc/0005-Route53-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched Route53") || echo "Not patching Route53 - Directory was probably already patched."
(git apply $pwd/etc/0006-CF-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched CF") || echo "Not patching CF - Directory was probably already patched."
(git apply $pwd/etc/0007-Comprehend-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched Comprehend") || echo "Not patching Comprehend - Directory was probably already patched."
)
(

View File

@ -0,0 +1,60 @@
From ba338bc6eff2276671b8fd8e61c5a1aceab96112 Mon Sep 17 00:00:00 2001
From: Bert Blommers <info@bertblommers.nl>
Date: Tue, 4 Oct 2022 19:00:11 +0000
Subject: [PATCH] Patch: Reduce Comprehend timings
---
internal/service/comprehend/common_model.go | 2 +-
internal/service/comprehend/consts.go | 6 +++---
internal/service/comprehend/entity_recognizer.go | 6 +++---
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/internal/service/comprehend/common_model.go b/internal/service/comprehend/common_model.go
index 7fa0c1022f..eb6b7c9265 100644
--- a/internal/service/comprehend/common_model.go
+++ b/internal/service/comprehend/common_model.go
@@ -57,7 +57,7 @@ func waitNetworkInterfaceCreated(ctx context.Context, conn *ec2.EC2, initialENII
Pending: []string{},
Target: []string{ec2.NetworkInterfaceStatusInUse},
Refresh: statusNetworkInterfaces(ctx, conn, initialENIIds, securityGroups, subnets),
- Delay: 4 * time.Minute,
+ Delay: 40 * time.Second,
MinTimeout: 10 * time.Second,
Timeout: timeout,
}
diff --git a/internal/service/comprehend/consts.go b/internal/service/comprehend/consts.go
index 8c926987a4..2420b78dcc 100644
--- a/internal/service/comprehend/consts.go
+++ b/internal/service/comprehend/consts.go
@@ -4,8 +4,8 @@ import (
"time"
)
-const iamPropagationTimeout = 2 * time.Minute
+const iamPropagationTimeout = 20 * time.Second
// Avoid service throttling
-const entityRegcognizerDelay = 1 * time.Minute
-const entityRegcognizerPollInterval = 1 * time.Minute
+const entityRegcognizerDelay = 10 * time.Second
+const entityRegcognizerPollInterval = 10 * time.Second
diff --git a/internal/service/comprehend/entity_recognizer.go b/internal/service/comprehend/entity_recognizer.go
index 119bf790db..3e953427af 100644
--- a/internal/service/comprehend/entity_recognizer.go
+++ b/internal/service/comprehend/entity_recognizer.go
@@ -42,9 +42,9 @@ func ResourceEntityRecognizer() *schema.Resource {
},
Timeouts: &schema.ResourceTimeout{
- Create: schema.DefaultTimeout(60 * time.Minute),
- Update: schema.DefaultTimeout(60 * time.Minute),
- Delete: schema.DefaultTimeout(30 * time.Minute),
+ Create: schema.DefaultTimeout(60 * time.Second),
+ Update: schema.DefaultTimeout(60 * time.Second),
+ Delete: schema.DefaultTimeout(30 * time.Second),
},
Schema: map[string]*schema.Schema{
--
2.25.1

View File

@ -75,6 +75,8 @@ cognitoidp:
- TestAccCognitoIDPUserPool_
- TestAccCognitoIDPUser_
- TestAccCognitoIDPUserPoolClients
comprehend:
- TestAccComprehendEntityRecognizer
dax:
- TestAccDAXCluster_basic
- TestAccDAXCluster_Encryption

View File

View File

@ -0,0 +1,200 @@
"""Unit tests for comprehend-supported APIs."""
import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from moto import mock_comprehend
# See our Development Tips on writing tests for hints on how to write good tests:
# http://docs.getmoto.org/en/latest/docs/contributing/development_tips/tests.html
INPUT_DATA_CONFIG = {
"DataFormat": "COMPREHEND_CSV",
"Documents": {
"InputFormat": "ONE_DOC_PER_LINE",
"S3Uri": "s3://tf-acc-test-1726651689102157637/documents.txt",
},
"EntityList": {"S3Uri": "s3://tf-acc-test-1726651689102157637/entitylist.csv"},
"EntityTypes": [{"Type": "ENGINEER"}, {"Type": "MANAGER"}],
}
@mock_comprehend
def test_list_entity_recognizers():
client = boto3.client("comprehend", region_name="us-east-2")
resp = client.list_entity_recognizers()
resp.should.have.key("EntityRecognizerPropertiesList").equals([])
client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="myname",
VersionName="version1",
)
resp = client.list_entity_recognizers(Filter={"RecognizerName": "unknown"})
resp.should.have.key("EntityRecognizerPropertiesList").equals([])
resp = client.list_entity_recognizers(Filter={"RecognizerName": "myname"})
resp.should.have.key("EntityRecognizerPropertiesList").length_of(1)
client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="myname",
VersionName="version2",
)
resp = client.list_entity_recognizers(Filter={"RecognizerName": "myname"})
resp.should.have.key("EntityRecognizerPropertiesList").length_of(2)
@mock_comprehend
def test_create_entity_recognizer():
client = boto3.client("comprehend", region_name="ap-southeast-1")
resp = client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="tf-acc-test-1726651689102157637",
VersionName="terraform-20221003201727469000000002",
)
resp.should.have.key("EntityRecognizerArn")
@mock_comprehend
def test_create_entity_recognizer_without_version():
client = boto3.client("comprehend", region_name="ap-southeast-1")
resp = client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="tf-acc-test-1726651689102157637",
)
resp.should.have.key("EntityRecognizerArn")
resp["EntityRecognizerArn"].should.equal(
"arn:aws:comprehend:ap-southeast-1:123456789012:entity-recognizer/tf-acc-test-1726651689102157637"
)
@mock_comprehend
def test_create_entity_recognizer_with_tags():
client = boto3.client("comprehend", region_name="ap-southeast-1")
arn = client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
Tags=[{"Key": "k1", "Value": "v1"}],
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="tf-acc-test-1726651689102157637",
)["EntityRecognizerArn"]
resp = client.list_tags_for_resource(ResourceArn=arn)
resp.should.have.key("ResourceArn").equals(arn)
resp.should.have.key("Tags").equals([{"Key": "k1", "Value": "v1"}])
@mock_comprehend
def test_describe_entity_recognizer():
client = boto3.client("comprehend", region_name="eu-west-1")
arn = client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="tf-acc-test-1726651689102157637",
VersionName="terraform-20221003201727469000000002",
)["EntityRecognizerArn"]
resp = client.describe_entity_recognizer(EntityRecognizerArn=arn)
resp.should.have.key("EntityRecognizerProperties")
props = resp["EntityRecognizerProperties"]
props.should.have.key("EntityRecognizerArn").equals(arn)
props.should.have.key("LanguageCode").equals("en")
props.should.have.key("Status").equals("TRAINED")
props.should.have.key("InputDataConfig").equals(INPUT_DATA_CONFIG)
props.should.have.key("DataAccessRoleArn").equals("iam_role_with_20_chars")
props.should.have.key("VersionName").equals("terraform-20221003201727469000000002")
@mock_comprehend
def test_describe_unknown_recognizer():
client = boto3.client("comprehend", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.describe_entity_recognizer(EntityRecognizerArn="unknown")
err = exc.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.equal(
"RESOURCE_NOT_FOUND: Could not find specified resource."
)
@mock_comprehend
def test_stop_training_entity_recognizer():
client = boto3.client("comprehend", region_name="eu-west-1")
arn = client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="tf-acc-test-1726651689102157637",
VersionName="terraform-20221003201727469000000002",
)["EntityRecognizerArn"]
client.stop_training_entity_recognizer(EntityRecognizerArn=arn)
props = client.describe_entity_recognizer(EntityRecognizerArn=arn)[
"EntityRecognizerProperties"
]
props.should.have.key("Status").equals("TRAINED")
@mock_comprehend
def test_list_tags_for_resource():
client = boto3.client("comprehend", region_name="us-east-2")
arn = client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="tf-acc-test-1726651689102157637",
VersionName="terraform-20221003201727469000000002",
)["EntityRecognizerArn"]
resp = client.list_tags_for_resource(ResourceArn=arn)
resp.should.have.key("ResourceArn").equals(arn)
resp.should.have.key("Tags").equals([])
client.tag_resource(ResourceArn=arn, Tags=[{"Key": "k1", "Value": "v1"}])
resp = client.list_tags_for_resource(ResourceArn=arn)
resp.should.have.key("Tags").equals([{"Key": "k1", "Value": "v1"}])
client.untag_resource(ResourceArn=arn, TagKeys=["k1"])
resp = client.list_tags_for_resource(ResourceArn=arn)
resp.should.have.key("Tags").equals([])
@mock_comprehend
def test_delete_entity_recognizer():
client = boto3.client("comprehend", region_name="ap-southeast-1")
arn = client.create_entity_recognizer(
DataAccessRoleArn="iam_role_with_20_chars",
InputDataConfig=INPUT_DATA_CONFIG,
LanguageCode="en",
RecognizerName="tf-acc-test-1726651689102157637",
VersionName="terraform-20221003201727469000000002",
)["EntityRecognizerArn"]
client.delete_entity_recognizer(EntityRecognizerArn=arn)
with pytest.raises(ClientError) as exc:
client.describe_entity_recognizer(EntityRecognizerArn=arn)
err = exc.value.response["Error"]
err["Code"].should.equal("ResourceNotFoundException")
err["Message"].should.equal(
"RESOURCE_NOT_FOUND: Could not find specified resource."
)

View File

@ -1096,6 +1096,50 @@ def retrieve_all_endpoints(ec2):
return all_endpoints
@mock_ec2
def test_modify_vpc_endpoint():
ec2 = boto3.client("ec2", region_name="us-west-1")
vpc_id = ec2.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]["VpcId"]
subnet_id1 = ec2.create_subnet(VpcId=vpc_id, CidrBlock="10.0.1.0/24")["Subnet"][
"SubnetId"
]
subnet_id2 = ec2.create_subnet(VpcId=vpc_id, CidrBlock="10.0.2.0/24")["Subnet"][
"SubnetId"
]
rt_id = ec2.create_route_table(VpcId=vpc_id)["RouteTable"]["RouteTableId"]
endpoint = ec2.create_vpc_endpoint(
VpcId=vpc_id,
ServiceName="com.tester.my-test-endpoint",
VpcEndpointType="interface",
SubnetIds=[subnet_id1],
)["VpcEndpoint"]
vpc_id = endpoint["VpcEndpointId"]
ec2.modify_vpc_endpoint(
VpcEndpointId=vpc_id,
AddSubnetIds=[subnet_id2],
)
endpoint = ec2.describe_vpc_endpoints(VpcEndpointIds=[vpc_id])["VpcEndpoints"][0]
endpoint["SubnetIds"].should.equal([subnet_id1, subnet_id2])
ec2.modify_vpc_endpoint(VpcEndpointId=vpc_id, AddRouteTableIds=[rt_id])
endpoint = ec2.describe_vpc_endpoints(VpcEndpointIds=[vpc_id])["VpcEndpoints"][0]
endpoint.should.have.key("RouteTableIds").equals([rt_id])
ec2.modify_vpc_endpoint(VpcEndpointId=vpc_id, RemoveRouteTableIds=[rt_id])
endpoint = ec2.describe_vpc_endpoints(VpcEndpointIds=[vpc_id])["VpcEndpoints"][0]
endpoint.shouldnt.have.key("RouteTableIds")
ec2.modify_vpc_endpoint(
VpcEndpointId=vpc_id,
PolicyDocument="doc",
)
endpoint = ec2.describe_vpc_endpoints(VpcEndpointIds=[vpc_id])["VpcEndpoints"][0]
endpoint.should.have.key("PolicyDocument").equals("doc")
@mock_ec2
def test_delete_vpc_end_points():
ec2 = boto3.client("ec2", region_name="us-west-1")