DL学习8--Lenet-Mnist

该代码实现了一个基于PyTorch的卷积神经网络(LeNet)来识别Fashion-MNIST数据集,包含两个卷积层和三个全连接层。数据预处理、数据加载、模型定义、训练参数设置、模型训练以及在GPU上计算精度的过程都被详细展示。经过10个训练轮次,模型的测试精度逐渐提高。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

模型定义以及layer层参数设置,mnist输入为28,28,1维度的数据,它包含10个类别,使用了两个卷积层和三个全连接层进行识别。

import torch
import torch.nn as nn

class reshape_data(nn.Module):
    def forward(self,X):
        return X.view(-1,1,28,28)

class Lenet(nn.Module):
    def __init__(self):
        super().__init__()
        self.net = nn.Sequential(
            nn.Conv2d(in_channels=1,out_channels=6,kernel_size=5,padding=2),
            nn.Sigmoid(),
            nn.AvgPool2d(2,2),

            nn.Conv2d(in_channels=6,out_channels=16,kernel_size=5),
            nn.Sigmoid(),
            nn.AvgPool2d(2,2),

            nn.Flatten(),

            nn.Linear(5*5*16,120),
            nn.Sigmoid(),

            nn.Linear(120,84),
            nn.Sigmoid(),

            nn.Linear(84,10),
        )
        self.reshape_data = reshape_data()
    def forward(self,X):
        X = self.reshape_data(X)
        output = self.net(X)
        return output

数据下载和一个组块设置。

from torch.utils import data
from torchvision import transforms
import torchvision


def load_data_fashion_mnist(batch_size, resize=None):
    """下载Fashion-MNIST数据集,然后将其加载到内存中"""
    trans = [transforms.ToTensor()]
    if resize:
        trans.insert(0, transforms.Resize(resize))
    trans = transforms.Compose(trans)
    mnist_train = torchvision.datasets.FashionMNIST(
        root="../data", train=True, transform=trans, download=True)
    mnist_test = torchvision.datasets.FashionMNIST(
        root="../data", train=False, transform=trans, download=True)
    return (data.DataLoader(mnist_train, batch_size, shuffle=True),
            data.DataLoader(mnist_test, batch_size, shuffle=False))

batch_size = 256
train_iter, test_iter = load_data_fashion_mnist(batch_size)

def Accuracy(y_hat,y):
    if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
        y_hat = y_hat.argmax(axis=1)
    cmp = y_hat.type(y.dtype) == y
    # print(y_hat.shape)
    return float(cmp.type(y.dtype).sum())
def evaluate_accuracy_gpu(net,data_iter,device=None):
    # 使用CPU计算模型在数据集上的精度
    if isinstance(net,nn.Module):
        net.eval()
        if not device:
            device = next(iter(net.parameters())).device
    metric = d2l.Accumulator(2)
    for X,y in data_iter:
        if isinstance(X, list):
            X = [x.to(device) for x in X]
        else:
            X = X.to(device)
        y = y.to(device)
        metric.add(Accuracy(net(X),y),y.numel())
    return metric[0] / metric[1]

模型训练参数设置,其中包括优化器,loss以及将模型载入到gpu

optimizer = torch.optim.SGD(net.parameters(), lr=0.9)
loss = nn.CrossEntropyLoss()
epochs = 10
device = torch.device('cuda')
net = net.to('cuda')

from d2l import torch as d2l

模型训练,值得注意的是,在训练之前,我们使用了xavier对模型权重进行了初始化。

def train():

    def init_weights(m):
        if(type(m) == nn.Linear or type(m) == nn.Conv2d):
            nn.init.xavier_normal_(m.weight)
    net.apply(init_weights)

    if net == nn.Module:
        net.train()
    for epoch in range(epochs):
        acc_train,acc_train_len = 0, 0
        acc_test,acc_test_len = 0, 0
        for X,y in train_iter:
            X, y = X.to(device), y.to(device)
            optimizer.zero_grad()
            preb = net(X)
            l = loss(preb,y)
            l.backward()
            optimizer.step()
            acc_train += Accuracy(preb,y)
            acc_train_len += len(X)
            # acc_train2 =d2l.accuracy(net(X), y)
            # print(Accuracy(preb,y),acc_train2)
        test_acc = evaluate_accuracy_gpu(net,test_iter)
        print(f"Epoch is {epoch} , train acc is {float(acc_train/acc_train_len)}, test acc is {test_acc}")

train()

训练结果

Epoch is 0 , train acc is 0.13701666666666668, test acc is 0.4172
Epoch is 1 , train acc is 0.5538833333333333, test acc is 0.6117
Epoch is 2 , train acc is 0.68355, test acc is 0.7133
Epoch is 3 , train acc is 0.7314833333333334, test acc is 0.7556
Epoch is 4 , train acc is 0.7565, test acc is 0.7639
Epoch is 5 , train acc is 0.7816666666666666, test acc is 0.766
Epoch is 6 , train acc is 0.7963, test acc is 0.7981
Epoch is 7 , train acc is 0.8121666666666667, test acc is 0.7948
Epoch is 8 , train acc is 0.8213333333333334, test acc is 0.8083
Epoch is 9 , train acc is 0.8311833333333334, test acc is 0.7698
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值