MNIST数字识别pytorch

该代码示例展示了如何用PyTorch实现一个简单的卷积神经网络(CNN)模型来处理MNIST数据集。模型包括两个卷积层,最大池化层,以及全连接层。在训练过程中,使用Adam优化器和交叉熵损失函数,监控并记录了训练过程中的损失和准确性。
部署运行你感兴趣的模型镜像

 利用CNN

import torch
from torchvision.datasets import MNIST
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np

BATCH_SIZE = 150
LEARNING_RATE = 0.005
class DR(nn.Module):
    def __init__(self):
        super(DR, self).__init__()
        self.forwardSeq = nn.Sequential(
            nn.Conv2d(
                in_channels=1,#输入通道数
                out_channels=16,#输出通道数
                kernel_size=3,#卷积核大小
                stride=1,#滑动步长
                padding=2,#窗口扩充
                padding_mode="zeros"#对扩充的窗口以常量0进行填充
            ),#输入28*28,输出28*28
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2),##输入28*28,输出14*14
            nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5,stride=1,padding=2),#输入14*14,输出14*14
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2),#输入14*14,输出7*7
        )
        self.linear = nn.Linear(32*7*7, 10)
    def forward(self, input):
        out = self.forwardSeq(input)
        out = out.view(input.size(0), -1)#全展开,但要注意保留batchsize的维度即input.size(0)
        return self.linear(out)
pass

def training(epoch, model, criterion, optimaizer, loader):
    rights = []
    losses = []
    maxAcc = {"idx":0, "num":0}
    minLoss = {"idx":0, "num":np.inf}
    for i in range(epoch):
        print("epoch:", i)
        for idx, (data, target) in enumerate(loader):
            out = model(data)
            loss = criterion(out, target)
            loss.backward()
            optimaizer.step()
            optimaizer.zero_grad()
            if idx % 25 == 0:
                pre = out.max(dim=-1)[1]
                acc = pre.eq(target).float().mean()
                rights.append(acc.item())
                losses.append(loss.item())
                maxAcc = {"idx":len(rights) * 25, "num":acc.item()} if maxAcc["num"] <= acc else maxAcc
                minLoss = {"idx":len(rights) * 25, "num":loss.item()} if minLoss["num"] >= loss else minLoss
                print("index({}) in epoch({}), loss is {}, acc is {}".format(idx, i, loss, acc))
    rights = np.array(rights)
    losses = np.array(losses)
    length = len(rights) * 25
    return rights,losses,maxAcc,minLoss,length
pass
sampleTR = MNIST(download=True, root="./data", transform=transforms.ToTensor(), train=True)#训练
sampleTE = MNIST(download=True, root="./data", transform=transforms.ToTensor(), train=False)#测试
loader = DataLoader(dataset=sampleTR, batch_size=BATCH_SIZE, shuffle=True)
model = DR()
criterion = nn.CrossEntropyLoss()
optimaizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
args = training(3, model, criterion, optimaizer, loader)
fig,ax = plt.subplots()
ax.plot(np.array([0, length]), np.array([1, 1]), c="red", label="one")
ax.plot(np.linspace(0, length, rights.size), rights, label="trainAcc", c="blue")
ax.plot(np.linspace(0, length, losses.size), losses, label="trainLoss", c="green")
ax.scatter(np.array(maxAcc["idx"]), np.array(maxAcc["num"]), c="blue", s=30, label="maxRight[index:{} number:{}]".format(maxAcc["idx"], maxAcc["num"]))
ax.scatter(np.array(minLoss["idx"]), np.array(minLoss["num"]), c="green", s=30, label="minLoss[index:{} number:{}]".format(minLoss["idx"], minLoss["num"]))
ax.legend()
plt.yticks(np.linspace(0, losses.max(), 10))
plt.xticks(np.linspace(0, length, 20), rotation=50)
plt.xlabel("Unit:BATCH, BATCH_SIZE:{} Samples".format(BATCH_SIZE))
plt.show()

利用普通神经网络:

import numpy as np
import torch
from torchvision.datasets import MNIST
import matplotlib.pyplot as plt
from torchvision import transforms
from torch import nn
from torch.utils.data import DataLoader
'''获取数据'''
mnist_tr = MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())#单通道28*28,共60000个样本
mnist_te = MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor())#共10000个样本
BatchSize = 256
LearningRate = 0.005
class DR(nn.Module):
    def __init__(self):
        super(DR, self).__init__()
        self.linear_1 = nn.Linear(28*28, 28)#设置全连接层1
        self.linear_2 = nn.Linear(28, 10)#设置全连接层2
    def forward(self, input):
        out = self.linear_1(input)
        nn.ReLU(inplace=True)(out)
        return self.linear_2(out)
pass
dr = DR()
criterion = nn.CrossEntropyLoss()#定义损失函数
optimal = torch.optim.Adam(dr.parameters(), lr=LearningRate)#定义优化算法
loader = DataLoader(dataset=mnist_tr, shuffle=True, batch_size=BatchSize, drop_last=True)
def train(epoch):
    accs = []
    losses = []
    for i in range(epoch):
        print("epoch:{}-----------------------------------------\n".format(i))
        for index, (data, target) in enumerate(loader):
            out = dr(data.view(BatchSize, 28*28))
            loss = criterion(out, target)
            loss.backward()
            optimal.step()
            optimal.zero_grad()
            if index % 5 == 0:
                pre = out.max(dim=-1)[1]
                acc = pre.eq(target).float().mean()
                accs.append(acc.item())
                losses.append(loss.item())
                print("epoch: {}, index: {}, loss: {}, accuracy: {}".format(i, index, loss, acc))
    print("accs长度:", len(accs))
    plt.plot(np.linspace(0, len(losses)*25, len(losses)), np.array(losses), label="loss curve")
    plt.plot(np.linspace(0, len(accs)*25, len(accs)), np.array(accs), label="acc curve")
    plt.legend()
    plt.show()
pass

train(3)

您可能感兴趣的与本文相关的镜像

PyTorch 2.7

PyTorch 2.7

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值