import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# 超参数设置
EPOCH = 1 # 训练批次
BATCH_SIZE = 64 # 批次,N
# TIME_STEP = 28 #
# INPUT_SIZE = 128
LR = 0.01
DOWNLOAD_MNIST = False
# 下载数据集
train_data = dsets.MNIST(root='./mnist/', train=True, transform=transforms.ToTensor(), download=DOWNLOAD_MNIST)
# 设置dataloader
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
# 转化成为样本数据
test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
test_x = test_data.test_data.type(torch.FloatTensor)[:2000] / 255.
test_y = test_data.test_labels.numpy()[:2000]
# 定义循环神经网络
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.input = nn.Linear(28,128)
self.rnn = nn.LSTM(
input_size=128,
hidden_size=64,
num_layers=2,
# batch_first=False # (time_step,batch,input)
batch_first = True # (batch,time_step,input)
)
self.out = nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1,28) # NHWC -> (N*H,W*C)
x = self.input(x)
x = x.view(-1,28,128) # (N*H,W*C) -> (N,time_step,input)
r_out, (h_n, h_c) = self.rnn(x, None)
out = self.out(r_out[:, -1, :])
return out
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss()
# 训练网络
for epoch in range(EPOCH):
for step, (b_x, b_y) in enumerate(train_loader):
print(b_x.size()) # [64,1,28,28]
b_x = b_x.permute(0, 2, 3, 1) # NCHW -> NHWC
output = rnn(b_x)
loss = loss_func(output, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
python pytorch LSTM
最新推荐文章于 2025-09-15 09:00:34 发布
本博客介绍了一个使用PyTorch实现的循环神经网络(RNN),应用于MNIST手写数字数据集的分类任务。通过定义RNN模型结构,设置超参数,下载并预处理MNIST数据集,然后进行模型训练和测试,展示了如何从零开始构建一个简单的RNN分类器。
部署运行你感兴趣的模型镜像
您可能感兴趣的与本文相关的镜像
PyTorch 2.5
PyTorch
Cuda
PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理
299

被折叠的 条评论
为什么被折叠?



