This commit is contained in:
2026-05-06 19:47:31 +07:00
parent 94d8682530
commit 12dbb7731b
9963 changed files with 2747894 additions and 0 deletions
@@ -0,0 +1,13 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@@ -0,0 +1,228 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for language detector."""
import enum
import os
from absl.testing import absltest
from absl.testing import parameterized
from mediapipe.tasks.python.components.containers import category
from mediapipe.tasks.python.components.containers import classification_result as classification_result_module
from mediapipe.tasks.python.core import base_options as base_options_module
from mediapipe.tasks.python.test import test_utils
from mediapipe.tasks.python.text import language_detector
LanguageDetectorResult = language_detector.LanguageDetectorResult
LanguageDetectorPrediction = (
language_detector.LanguageDetectorResult.Detection
)
_BaseOptions = base_options_module.BaseOptions
_Category = category.Category
_Classifications = classification_result_module.Classifications
_LanguageDetector = language_detector.LanguageDetector
_LanguageDetectorOptions = language_detector.LanguageDetectorOptions
_LANGUAGE_DETECTOR_MODEL = "language_detector.tflite"
_TEST_DATA_DIR = "mediapipe/tasks/testdata/text"
_SCORE_THRESHOLD = 0.3
_EN_TEXT = "To be, or not to be, that is the question"
_EN_EXPECTED_RESULT = LanguageDetectorResult(
[LanguageDetectorPrediction("en", 0.999856)]
)
_FR_TEXT = (
"Il y a beaucoup de bouches qui parlent et fort peu de têtes qui pensent."
)
_FR_EXPECTED_RESULT = LanguageDetectorResult(
[LanguageDetectorPrediction("fr", 0.999781)]
)
_RU_TEXT = "это какой-то английский язык"
_RU_EXPECTED_RESULT = LanguageDetectorResult(
[LanguageDetectorPrediction("ru", 0.993362)]
)
_MIXED_TEXT = "分久必合合久必分"
_MIXED_EXPECTED_RESULT = LanguageDetectorResult([
LanguageDetectorPrediction("zh", 0.505424),
LanguageDetectorPrediction("ja", 0.481617),
])
_TOLERANCE = 1e-6
class ModelFileType(enum.Enum):
FILE_CONTENT = 1
FILE_NAME = 2
class LanguageDetectorTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, _LANGUAGE_DETECTOR_MODEL)
)
def _expect_language_detector_result_correct(
self,
actual_result: LanguageDetectorResult,
expect_result: LanguageDetectorResult,
):
for i, prediction in enumerate(actual_result.detections):
expected_prediction = expect_result.detections[i]
self.assertEqual(
prediction.language_code,
expected_prediction.language_code,
)
self.assertAlmostEqual(
prediction.probability,
expected_prediction.probability,
delta=_TOLERANCE,
)
def test_create_from_file_succeeds_with_valid_model_path(self):
# Creates with default option and valid model file successfully.
with _LanguageDetector.create_from_model_path(self.model_path) as detector:
self.assertIsInstance(detector, _LanguageDetector)
def test_create_from_options_succeeds_with_valid_model_path(self):
# Creates with options containing model file successfully.
base_options = _BaseOptions(model_asset_path=self.model_path)
options = _LanguageDetectorOptions(base_options=base_options)
with _LanguageDetector.create_from_options(options) as detector:
self.assertIsInstance(detector, _LanguageDetector)
def test_create_from_options_fails_with_invalid_model_path(self):
with self.assertRaisesRegex(
RuntimeError, "Unable to open file at /path/to/invalid/model.tflite"
):
base_options = _BaseOptions(
model_asset_path="/path/to/invalid/model.tflite"
)
options = _LanguageDetectorOptions(base_options=base_options)
_LanguageDetector.create_from_options(options)
def test_create_from_options_succeeds_with_valid_model_content(self):
# Creates with options containing model content successfully.
with open(self.model_path, "rb") as f:
base_options = _BaseOptions(model_asset_buffer=f.read())
options = _LanguageDetectorOptions(base_options=base_options)
detector = _LanguageDetector.create_from_options(options)
self.assertIsInstance(detector, _LanguageDetector)
@parameterized.parameters(
(ModelFileType.FILE_NAME, _EN_TEXT, _EN_EXPECTED_RESULT),
(ModelFileType.FILE_CONTENT, _EN_TEXT, _EN_EXPECTED_RESULT),
(ModelFileType.FILE_NAME, _FR_TEXT, _FR_EXPECTED_RESULT),
(ModelFileType.FILE_CONTENT, _FR_TEXT, _FR_EXPECTED_RESULT),
(ModelFileType.FILE_NAME, _RU_TEXT, _RU_EXPECTED_RESULT),
(ModelFileType.FILE_CONTENT, _RU_TEXT, _RU_EXPECTED_RESULT),
(ModelFileType.FILE_NAME, _MIXED_TEXT, _MIXED_EXPECTED_RESULT),
(ModelFileType.FILE_CONTENT, _MIXED_TEXT, _MIXED_EXPECTED_RESULT),
)
def test_detect(self, model_file_type, text, expected_result):
# Creates detector.
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=self.model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(self.model_path, "rb") as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError("model_file_type is invalid.")
options = _LanguageDetectorOptions(
base_options=base_options, score_threshold=_SCORE_THRESHOLD
)
detector = _LanguageDetector.create_from_options(options)
# Performs language detection on the input.
text_result = detector.detect(text)
# Comparing results.
self._expect_language_detector_result_correct(text_result, expected_result)
# Closes the detector explicitly when the detector is not used in
# a context.
detector.close()
@parameterized.parameters(
(ModelFileType.FILE_NAME, _EN_TEXT, _EN_EXPECTED_RESULT),
(ModelFileType.FILE_NAME, _FR_TEXT, _FR_EXPECTED_RESULT),
(ModelFileType.FILE_NAME, _RU_TEXT, _RU_EXPECTED_RESULT),
(ModelFileType.FILE_CONTENT, _MIXED_TEXT, _MIXED_EXPECTED_RESULT),
)
def test_detect_in_context(self, model_file_type, text, expected_result):
# Creates detector.
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=self.model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(self.model_path, "rb") as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError("model_file_type is invalid.")
options = _LanguageDetectorOptions(
base_options=base_options, score_threshold=_SCORE_THRESHOLD
)
with _LanguageDetector.create_from_options(options) as detector:
# Performs language detection on the input.
text_result = detector.detect(text)
# Comparing results.
self._expect_language_detector_result_correct(
text_result, expected_result
)
def test_allowlist_option(self):
# Creates detector.
base_options = _BaseOptions(model_asset_path=self.model_path)
options = _LanguageDetectorOptions(
base_options=base_options,
score_threshold=_SCORE_THRESHOLD,
category_allowlist=["ja"],
)
with _LanguageDetector.create_from_options(options) as detector:
# Performs language detection on the input.
text_result = detector.detect(_MIXED_TEXT)
# Comparing results.
expected_result = LanguageDetectorResult(
[LanguageDetectorPrediction("ja", 0.481617)]
)
self._expect_language_detector_result_correct(
text_result, expected_result
)
def test_denylist_option(self):
# Creates detector.
base_options = _BaseOptions(model_asset_path=self.model_path)
options = _LanguageDetectorOptions(
base_options=base_options,
score_threshold=_SCORE_THRESHOLD,
category_denylist=["ja"],
)
with _LanguageDetector.create_from_options(options) as detector:
# Performs language detection on the input.
text_result = detector.detect(_MIXED_TEXT)
# Comparing results.
expected_result = LanguageDetectorResult(
[LanguageDetectorPrediction("zh", 0.505424)]
)
self._expect_language_detector_result_correct(
text_result, expected_result
)
if __name__ == "__main__":
absltest.main()
@@ -0,0 +1,231 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for text classifier."""
import enum
import os
from absl.testing import absltest
from absl.testing import parameterized
from mediapipe.tasks.python.components.containers import category
from mediapipe.tasks.python.components.containers import classification_result as classification_result_module
from mediapipe.tasks.python.core import base_options as base_options_module
from mediapipe.tasks.python.test import test_utils
from mediapipe.tasks.python.text import text_classifier
TextClassifierResult = classification_result_module.ClassificationResult
_BaseOptions = base_options_module.BaseOptions
_Category = category.Category
_Classifications = classification_result_module.Classifications
_TextClassifier = text_classifier.TextClassifier
_TextClassifierOptions = text_classifier.TextClassifierOptions
_BERT_MODEL_FILE = 'bert_text_classifier.tflite'
_REGEX_MODEL_FILE = 'test_model_text_classifier_with_regex_tokenizer.tflite'
_TEST_DATA_DIR = 'mediapipe/tasks/testdata/text'
_NEGATIVE_TEXT = 'What a waste of my time.'
_POSITIVE_TEXT = ('This is the best movie Ive seen in recent years.'
'Strongly recommend it!')
_BERT_NEGATIVE_RESULTS = TextClassifierResult(
classifications=[
_Classifications(
categories=[
_Category(
index=0,
score=0.9995,
display_name='',
category_name='negative'),
_Category(
index=1,
score=0.0005,
display_name='',
category_name='positive')
],
head_index=0,
head_name='probability')
],
timestamp_ms=0)
_BERT_POSITIVE_RESULTS = TextClassifierResult(
classifications=[
_Classifications(
categories=[
_Category(
index=1,
score=0.9995,
display_name='',
category_name='positive'),
_Category(
index=0,
score=0.0005,
display_name='',
category_name='negative')
],
head_index=0,
head_name='probability')
],
timestamp_ms=0)
_REGEX_NEGATIVE_RESULTS = TextClassifierResult(
classifications=[
_Classifications(
categories=[
_Category(
index=0,
score=0.81313,
display_name='',
category_name='Negative'),
_Category(
index=1,
score=0.1868704,
display_name='',
category_name='Positive')
],
head_index=0,
head_name='probability')
],
timestamp_ms=0)
_REGEX_POSITIVE_RESULTS = TextClassifierResult(
classifications=[
_Classifications(
categories=[
_Category(
index=1,
score=0.5134273,
display_name='',
category_name='Positive'),
_Category(
index=0,
score=0.486573,
display_name='',
category_name='Negative')
],
head_index=0,
head_name='probability')
],
timestamp_ms=0)
class ModelFileType(enum.Enum):
FILE_CONTENT = 1
FILE_NAME = 2
class TextClassifierTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, _BERT_MODEL_FILE))
def test_create_from_file_succeeds_with_valid_model_path(self):
# Creates with default option and valid model file successfully.
with _TextClassifier.create_from_model_path(self.model_path) as classifier:
self.assertIsInstance(classifier, _TextClassifier)
def test_create_from_options_succeeds_with_valid_model_path(self):
# Creates with options containing model file successfully.
base_options = _BaseOptions(model_asset_path=self.model_path)
options = _TextClassifierOptions(base_options=base_options)
with _TextClassifier.create_from_options(options) as classifier:
self.assertIsInstance(classifier, _TextClassifier)
def test_create_from_options_fails_with_invalid_model_path(self):
with self.assertRaisesRegex(
RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'):
base_options = _BaseOptions(
model_asset_path='/path/to/invalid/model.tflite')
options = _TextClassifierOptions(base_options=base_options)
_TextClassifier.create_from_options(options)
def test_create_from_options_succeeds_with_valid_model_content(self):
# Creates with options containing model content successfully.
with open(self.model_path, 'rb') as f:
base_options = _BaseOptions(model_asset_buffer=f.read())
options = _TextClassifierOptions(base_options=base_options)
classifier = _TextClassifier.create_from_options(options)
self.assertIsInstance(classifier, _TextClassifier)
@parameterized.parameters(
(ModelFileType.FILE_NAME, _BERT_MODEL_FILE, _NEGATIVE_TEXT,
_BERT_NEGATIVE_RESULTS), (ModelFileType.FILE_CONTENT, _BERT_MODEL_FILE,
_NEGATIVE_TEXT, _BERT_NEGATIVE_RESULTS),
(ModelFileType.FILE_NAME, _BERT_MODEL_FILE, _POSITIVE_TEXT,
_BERT_POSITIVE_RESULTS), (ModelFileType.FILE_CONTENT, _BERT_MODEL_FILE,
_POSITIVE_TEXT, _BERT_POSITIVE_RESULTS),
(ModelFileType.FILE_NAME, _REGEX_MODEL_FILE, _NEGATIVE_TEXT,
_REGEX_NEGATIVE_RESULTS), (ModelFileType.FILE_CONTENT, _REGEX_MODEL_FILE,
_NEGATIVE_TEXT, _REGEX_NEGATIVE_RESULTS),
(ModelFileType.FILE_NAME, _REGEX_MODEL_FILE, _POSITIVE_TEXT,
_REGEX_POSITIVE_RESULTS), (ModelFileType.FILE_CONTENT, _REGEX_MODEL_FILE,
_POSITIVE_TEXT, _REGEX_POSITIVE_RESULTS))
def test_classify(self, model_file_type, model_name, text,
expected_classification_result):
# Creates classifier.
model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, model_name))
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(model_path, 'rb') as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError('model_file_type is invalid.')
options = _TextClassifierOptions(base_options=base_options)
classifier = _TextClassifier.create_from_options(options)
# Performs text classification on the input.
text_result = classifier.classify(text)
# Comparing results.
test_utils.assert_proto_equals(self, text_result.to_pb2(),
expected_classification_result.to_pb2())
# Closes the classifier explicitly when the classifier is not used in
# a context.
classifier.close()
@parameterized.parameters((ModelFileType.FILE_NAME, _BERT_MODEL_FILE,
_NEGATIVE_TEXT, _BERT_NEGATIVE_RESULTS),
(ModelFileType.FILE_CONTENT, _BERT_MODEL_FILE,
_NEGATIVE_TEXT, _BERT_NEGATIVE_RESULTS))
def test_classify_in_context(self, model_file_type, model_name, text,
expected_classification_result):
# Creates classifier.
model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, model_name))
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(model_path, 'rb') as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError('model_file_type is invalid.')
options = _TextClassifierOptions(base_options=base_options)
with _TextClassifier.create_from_options(options) as classifier:
# Performs text classification on the input.
text_result = classifier.classify(text)
# Comparing results.
test_utils.assert_proto_equals(self, text_result.to_pb2(),
expected_classification_result.to_pb2())
if __name__ == '__main__':
absltest.main()
@@ -0,0 +1,326 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for text embedder."""
import enum
import os
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from mediapipe.tasks.python.components.containers import embedding_result as embedding_result_module
from mediapipe.tasks.python.core import base_options as base_options_module
from mediapipe.tasks.python.test import test_utils
from mediapipe.tasks.python.text import text_embedder
_BaseOptions = base_options_module.BaseOptions
_Embedding = embedding_result_module.Embedding
_TextEmbedder = text_embedder.TextEmbedder
_TextEmbedderOptions = text_embedder.TextEmbedderOptions
_BERT_MODEL_FILE = 'mobilebert_embedding_with_metadata.tflite'
_REGEX_MODEL_FILE = 'regex_one_embedding_with_metadata.tflite'
_USE_MODEL_FILE = 'universal_sentence_encoder_qa_with_metadata.tflite'
_TEST_DATA_DIR = 'mediapipe/tasks/testdata/text'
# Tolerance for embedding vector coordinate values.
_EPSILON = 1e-4
# Tolerance for cosine similarity evaluation.
_SIMILARITY_TOLERANCE = 1e-3
class ModelFileType(enum.Enum):
FILE_CONTENT = 1
FILE_NAME = 2
class TextEmbedderTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, _BERT_MODEL_FILE))
def test_create_from_file_succeeds_with_valid_model_path(self):
# Creates with default option and valid model file successfully.
with _TextEmbedder.create_from_model_path(self.model_path) as embedder:
self.assertIsInstance(embedder, _TextEmbedder)
def test_create_from_options_succeeds_with_valid_model_path(self):
# Creates with options containing model file successfully.
base_options = _BaseOptions(model_asset_path=self.model_path)
options = _TextEmbedderOptions(base_options=base_options)
with _TextEmbedder.create_from_options(options) as embedder:
self.assertIsInstance(embedder, _TextEmbedder)
def test_create_from_options_fails_with_invalid_model_path(self):
with self.assertRaisesRegex(
RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'):
base_options = _BaseOptions(
model_asset_path='/path/to/invalid/model.tflite')
options = _TextEmbedderOptions(base_options=base_options)
_TextEmbedder.create_from_options(options)
def test_create_from_options_succeeds_with_valid_model_content(self):
# Creates with options containing model content successfully.
with open(self.model_path, 'rb') as f:
base_options = _BaseOptions(model_asset_buffer=f.read())
options = _TextEmbedderOptions(base_options=base_options)
embedder = _TextEmbedder.create_from_options(options)
self.assertIsInstance(embedder, _TextEmbedder)
def _check_embedding_value(self, result, expected_first_value):
# Check embedding first value.
self.assertAlmostEqual(
result.embeddings[0].embedding[0], expected_first_value, delta=_EPSILON)
def _check_embedding_size(self, result, quantize, expected_embedding_size):
# Check embedding size.
self.assertLen(result.embeddings, 1)
embedding_result = result.embeddings[0]
self.assertLen(embedding_result.embedding, expected_embedding_size)
if quantize:
self.assertEqual(embedding_result.embedding.dtype, np.uint8)
else:
self.assertEqual(embedding_result.embedding.dtype, float)
def _check_cosine_similarity(self, result0, result1, expected_similarity):
# Checks cosine similarity.
similarity = _TextEmbedder.cosine_similarity(result0.embeddings[0],
result1.embeddings[0])
self.assertAlmostEqual(
similarity, expected_similarity, delta=_SIMILARITY_TOLERANCE)
@parameterized.parameters(
(
False,
False,
_BERT_MODEL_FILE,
ModelFileType.FILE_NAME,
0.969514,
512,
(19.9016, 22.626251),
),
(
True,
False,
_BERT_MODEL_FILE,
ModelFileType.FILE_NAME,
0.969514,
512,
(0.0585837, 0.0723035),
),
(
False,
False,
_REGEX_MODEL_FILE,
ModelFileType.FILE_NAME,
0.999937,
16,
(0.0309356, 0.0312863),
),
(
True,
False,
_REGEX_MODEL_FILE,
ModelFileType.FILE_CONTENT,
0.999937,
16,
(0.549632, 0.552879),
),
(
False,
False,
_USE_MODEL_FILE,
ModelFileType.FILE_NAME,
0.851961,
100,
(1.422951, 1.404664),
),
(
True,
False,
_USE_MODEL_FILE,
ModelFileType.FILE_CONTENT,
0.851961,
100,
(0.127049, 0.125416),
),
)
def test_embed(self, l2_normalize, quantize, model_name, model_file_type,
expected_similarity, expected_size, expected_first_values):
# Creates embedder.
model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, model_name))
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(model_path, 'rb') as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError('model_file_type is invalid.')
options = _TextEmbedderOptions(
base_options=base_options, l2_normalize=l2_normalize, quantize=quantize)
embedder = _TextEmbedder.create_from_options(options)
# Extracts both embeddings.
positive_text0 = "it's a charming and often affecting journey"
positive_text1 = 'what a great and fantastic trip'
result0 = embedder.embed(positive_text0)
result1 = embedder.embed(positive_text1)
# Checks embeddings and cosine similarity.
expected_result0_value, expected_result1_value = expected_first_values
self._check_embedding_size(result0, quantize, expected_size)
self._check_embedding_size(result1, quantize, expected_size)
self._check_embedding_value(result0, expected_result0_value)
self._check_embedding_value(result1, expected_result1_value)
self._check_cosine_similarity(result0, result1, expected_similarity)
# Closes the embedder explicitly when the embedder is not used in
# a context.
embedder.close()
@parameterized.parameters(
(
False,
False,
_BERT_MODEL_FILE,
ModelFileType.FILE_NAME,
0.969514,
512,
(19.9016, 22.626251),
),
(
True,
False,
_BERT_MODEL_FILE,
ModelFileType.FILE_NAME,
0.969514,
512,
(0.0585837, 0.0723035),
),
(
False,
False,
_REGEX_MODEL_FILE,
ModelFileType.FILE_NAME,
0.999937,
16,
(0.0309356, 0.0312863),
),
(
True,
False,
_REGEX_MODEL_FILE,
ModelFileType.FILE_CONTENT,
0.999937,
16,
(0.549632, 0.552879),
),
(
False,
False,
_USE_MODEL_FILE,
ModelFileType.FILE_NAME,
0.851961,
100,
(1.422951, 1.404664),
),
(
True,
False,
_USE_MODEL_FILE,
ModelFileType.FILE_CONTENT,
0.851961,
100,
(0.127049, 0.125416),
),
)
def test_embed_in_context(self, l2_normalize, quantize, model_name,
model_file_type, expected_similarity, expected_size,
expected_first_values):
# Creates embedder.
model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, model_name))
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(model_path, 'rb') as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError('model_file_type is invalid.')
options = _TextEmbedderOptions(
base_options=base_options, l2_normalize=l2_normalize, quantize=quantize)
with _TextEmbedder.create_from_options(options) as embedder:
# Extracts both embeddings.
positive_text0 = "it's a charming and often affecting journey"
positive_text1 = 'what a great and fantastic trip'
result0 = embedder.embed(positive_text0)
result1 = embedder.embed(positive_text1)
# Checks embeddings and cosine similarity.
expected_result0_value, expected_result1_value = expected_first_values
self._check_embedding_size(result0, quantize, expected_size)
self._check_embedding_size(result1, quantize, expected_size)
self._check_embedding_value(result0, expected_result0_value)
self._check_embedding_value(result1, expected_result1_value)
self._check_cosine_similarity(result0, result1, expected_similarity)
@parameterized.parameters(
# TODO: The similarity should likely be lower
(_BERT_MODEL_FILE, 0.98077),
(_USE_MODEL_FILE, 0.780334),
)
def test_embed_with_different_themes(self, model_file, expected_similarity):
# Creates embedder.
model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, model_file)
)
base_options = _BaseOptions(model_asset_path=model_path)
options = _TextEmbedderOptions(base_options=base_options)
embedder = _TextEmbedder.create_from_options(options)
# Extracts both embeddings.
text0 = (
'When you go to this restaurant, they hold the pancake upside-down '
"before they hand it to you. It's a great gimmick."
)
result0 = embedder.embed(text0)
text1 = "Let's make a plan to steal the declaration of independence."
result1 = embedder.embed(text1)
similarity = _TextEmbedder.cosine_similarity(
result0.embeddings[0], result1.embeddings[0]
)
self.assertAlmostEqual(
similarity, expected_similarity, delta=_SIMILARITY_TOLERANCE
)
# Closes the embedder explicitly when the embedder is not used in
# a context.
embedder.close()
if __name__ == '__main__':
absltest.main()