from transformers import BertModel, BertTokenizer
import torch
tokenizer = BertTokenizer.from_pretrained('./pycharm/K-Adapter/pretrain_bert_en')
model = BertModel.from_pretrained('./pycharm/K-Adapter/pretrain_bert_en')
text = 'I wish you have a good day'
text_ids = tokenizer.encode(text)# 分词+映射,会自动添加[101][102]
print(text_ids)
# [101, 1045, 4299, 2017, 2031, 1037, 2204, 2154, 102]
text_tokenizer = tokenizer.convert_ids_to_tokens(text_ids)
# ['[CLS]', 'i', 'wish', 'you', 'have', 'a', 'good', 'day', '[SEP]']
print(text_tokenizer)
a=torch.tensor(text_ids).unsqueeze(0) # [1, 9] [bs, seq_len]
print(a.shape)
output = model(torch.tensor(text_ids).unsqueeze(0))
# 包含两项['last_hidden_state', 'pooler_output'] 分别是[1, 9,768] 和 [1, 768]
print(len(output),output.keys())
print(output['last_hidden_state'].shape,output['pooler_output'].shape)
直接使用transformer中实现好的bert就行。关键是预加载模型这里,老是搞半天,这下总归记住了吧。