我是跟着b站一个up主的博客学的代码,然后自己添加了一些注释
https://wmathor.com/index.php/archives/1442/
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data as Data
dtype = torch.FloatTensor
sentences = ['i like cat', 'i love coffee', 'i hate milk']
#将上面的单词逐个分开
word_list = " ".join(sentences).split()
#将分词后的结果去重
word_list = list(set(word_list))
#对单词建立索引,for循环里面是先取索引,再取单词
word_dict = {w:i for i, w in enumerate(word_list)}
#反向建立索引
number_dict = {i:w for i, w in enumerate(word_list)}
#计算词典长度
n_class = len(word_dict)
#NNLM的计算步长
n_step = len(sentences[0].split())-1
#隐藏层的参数量
n_hidden = 2
#嵌入词向量的维度
m = 2
#构建输入输出数据
def make_batch(sentences):
input_batch = []
target_batch = []
for sen in sentences:
word = sen.split()#将句子中每个词分词
#:-1表示取每个句子里面的前两个单词作为输入
#然后通过