from transformers import BertTokenizer, BertModel
from sklearn.metrics.pairwise import cosine_similarity
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
model = BertModel.from_pretrained('bert-base-chinese')
def get_bert_embedding(text):
# 将文本转换为BERT输入格式
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
# 获取BERT模型的输出(所有层的隐层表示)
with torch.no_grad():
outputs = model(**inputs)
# 获取[CLS]标记的向量表示
embedding = outputs.last_hidden_state[:, 0, :]
return embedding
def calculate_bert_similarity(text1, text2):
# 获取BERT的向量表示
embedding1 = get_bert_embedding(text1)
embedding2 = get_bert_embedding(text2)
# 计算余弦相似度
similarity = cosine_similarity(embedding1, embedding2)
return similarity[0][0]
if __name__ == "__main__":
print(calculate_bert_similarity("计算相似度","查询相似度"))
python transformers 计算两条中文语句的相似度
最新推荐文章于 2025-04-11 17:36:31 发布