模型:
sonoisa/sentence-bert-base-ja-mean-tokens-v2
这是一个日本的Sentence-BERT模型。
这是一个用于日本语的Sentence-BERT模型(版本2)。
这是使用比 バージョン1 更好的损失函数 MultipleNegativesRankingLoss 进行训练的改进版。使用本地未公开数据集时,相比版本1,我们获得了1.5到2个百分点更高的准确性。
我们使用了预训练模型 cl-tohoku/bert-base-japanese-whole-word-masking 。因此,在执行推理时需要安装fugashi和ipadic(pip install fugashi ipadic)。
https://qiita.com/sonoisa/items/1df94d0a98cd4f209051
将模型名称更改为“sonoisa/sentence-bert-base-ja-mean-tokens-v2”,则可以使用该模型行为。
from transformers import BertJapaneseTokenizer, BertModel import torch class SentenceBertJapanese: def __init__(self, model_name_or_path, device=None): self.tokenizer = BertJapaneseTokenizer.from_pretrained(model_name_or_path) self.model = BertModel.from_pretrained(model_name_or_path) self.model.eval() if device is None: device = "cuda" if torch.cuda.is_available() else "cpu" self.device = torch.device(device) self.model.to(device) def _mean_pooling(self, model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) @torch.no_grad() def encode(self, sentences, batch_size=8): all_embeddings = [] iterator = range(0, len(sentences), batch_size) for batch_idx in iterator: batch = sentences[batch_idx:batch_idx + batch_size] encoded_input = self.tokenizer.batch_encode_plus(batch, padding="longest", truncation=True, return_tensors="pt").to(self.device) model_output = self.model(**encoded_input) sentence_embeddings = self._mean_pooling(model_output, encoded_input["attention_mask"]).to('cpu') all_embeddings.extend(sentence_embeddings) # return torch.stack(all_embeddings).numpy() return torch.stack(all_embeddings) MODEL_NAME = "sonoisa/sentence-bert-base-ja-mean-tokens-v2" # <- v2です。 model = SentenceBertJapanese(MODEL_NAME) sentences = ["暴走したAI", "暴走した人工知能"] sentence_embeddings = model.encode(sentences, batch_size=8) print("Sentence embeddings:", sentence_embeddings)