1、pytorch创建RNN模型和训练模型的代码
import torch
from torch import nn
from torch.autograd import Variable
#定义RNN模型
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN(
input_size=1,
hidden_size=32,
num_layers=2,
batch_first=True,
)
self.linear = nn.Linear(32, 1)
def forward(self, x, h_n):
r_out, h_n = self.rnn(x, h_n)
outs = []
for step in range(r_out.size(1)):
outs.append(self.linear(r_out[:, step, :]))
return torch.stack(outs, dim=1), h_n
#创建模型
net = RNN()
#========以下是训练过程===========
#获取样本和标签(此处为numpy格式,shape=(batch_size, time_step, feature))
sample = ……
label = ……
#将样本和标签转化为Variable
x = Variable(torch.Tensor(sample).type(torch.FloatTensor))
y = Variable(torch.Tensor(label).type(torch.FloatTensor))
#优化器(举例)
optimizer = torch.optim.Adam(net.parameters(), lr=0.02)
#损失函数(举例)
loss_func = nn.MSELoss()
#初次传入RNN的隐藏层状态
h_n = None
#训练100轮
for i in range(100):
#将样本和隐藏层状态输入到模型中,得到模型输出值和更新后的隐藏层状态
prediction, h_n = net(x, h_n)
h_n = h_n.data
#计算损失值
loss = loss_func(prediction, y)
#输出损失值
print('round ' + str(i) + ' ' +str(loss))
optimizer.zero_grad()
loss.backward()
optimizer.step()
#每训练100轮保存1次模型
if (i+1)%100 == 0:
torch.save(net, 'net.pkl')
2、代码的分析