李沐 Softmax 回归 + 损失函数 + 图片分类数据集

这篇博客详细展示了如何从头开始使用PyTorch实现FashionMNIST数据集上的softmax回归,包括数据加载、模型定义、训练和评估。首先,博主介绍了手动实现softmax函数和交叉熵损失,接着展示了自定义训练循环。然后,通过引入`nn.Sequential`简化模型定义,并使用`nn.CrossEntropyLoss`和`nn.BatchNorm1d`替代自定义的损失和激活函数,实现了更简洁的版本。整个过程中,还用到了Accumulator类来计算训练和测试的准确率。最后,博主强调了简洁实现的训练模块与之前手动实现的等价性。
部署运行你感兴趣的模型镜像

跟着学习,稍微记录一下,也比较方便
这是softmax的从头实现的过程,只能说看的不是很懂,代码敲起来也好麻烦哎
如果有人需要,拿去跑一下也行,稍微加了点注释将就着看看
以后回顾起来也蛮好的

%matplotlib inline
import torch
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l
from IPython import display

d2l.use_svg_display()


def get_dataloader_workers():
    return 4


def load_data_fashion_mnist(batch_size,resize=None):
    trans=[transforms.ToTensor()]
    if resize:
        trans.insert(0,transforms.Resize(resize))
        
    trans=transforms.Compose(trans)
    mnist_train=torchvision.datasets.FashionMNIST(root="./data",train=True,transform=trans,download=False)
    mnist_test=torchvision.datasets.FashionMNIST(root="./data",train=False,transform=trans,download=False)
    
    return (data.DataLoader(mnist_train,batch_size,shuffle=True,num_workers=get_dataloader_workers()),
            data.DataLoader(mnist_test,batch_size,shuffle=False,num_workers=get_dataloader_workers()))

batch_size=256
train_iter,test_iter=load_data_fashion_mnist(batch_size)


num_inputs=784#28*28空间拉长,softmax回归需要空间向量,这样子会损失一部分空间信息,交给后面的卷积
num_outputs=10#10个类别 
w=torch.normal(0,0.01,size=(num_inputs,num_outputs),requires_grad=True)
b=torch.zeros(num_outputs,requires_grad=True)

def softmax(x):
    x_exp=torch.exp(x)
    partition=x_exp.sum(1,keepdim=True)
    return x_exp/partition#广播机制

def net(x):
    return  softmax(torch.matmul(x.reshape((-1,w.shape[0])),w)+b)

def cross_entropy(y_hat,y):
    return -torch.log(y_hat[range(len(y_hat)),y])

def accuracy(y_hat,y):#找出预测正确的样本
    if(len(y_hat.shape)>1 and   y_hat.shape[1]>1):#输入是矩阵
        y_hat=y_hat.argmax(axis=1)#将预测结果概率值最大的值取出
    cmp=y_hat.type(y.dtype)==y#y_hat转化为y的类型 
    return float(cmp.type(y.dtype).sum())#求和

class Accumulator:#累加器
    def __init__(self,n):
        self.data=[0.0]*n
    def add(self,*args):
        self.data=[a+float(b) for a,b in zip(self.data,args)]
    def reset(self):
        self.data=[0.0]*len(self.data)
    def __getitem__(self,idx):
        return self.data[idx]
    
def evaluate_accuracy(net,data_iter):
    if isinstance(net,torch.nn.Module):
        net.eval()#将模型设置成评估模式
    metric=Accumulator(2)#正确预测数,预测总数
    for x,y in data_iter:
        metric.add(accuracy(net(x),y),y.numel())
    return metric[0]/metric[1]

def train_epoch_ch3(net,train_iter,loss,updater):
    if isinstance(net,torch.nn.Module):
        net.train()
    metric=Accumulator(3)#大小为3的累加器储存数据 
    for x,y in train_iter:
        y_hat=net(x)
        l=loss(y_hat,y)
        if isinstance(updater,torch.optim.Optimizer):#两种实现 
            updater.zero_grad()
            l.backward()
            updater.step()
            metric.add(float(l)*len(y),accuracy(y_hat,y),y.size().numel())#记录分类
        else:
            l.sum().backward()
            updater(x.shape[0])
            metric.add(float(l.sum()),accuracy(y_hat,y),y.numel())
    return metric[0]/metric[2],metric[1]/metric[2]#正确与错误的概率


class Animator:#不重要,实时显示用的
    def __init__(self,xlabel=None,ylabel=None,legend=None,xlim=None,ylim=None,xscale='linear',yscale='linear',
                fmts=('-','m--','g-.','r:'),nrows=1,ncols=1,figsize=(3.5,2.5)):
        if legend is  None:
            legend=[]
        d2l.use_svg_display()
        self.fig,self.axes=d2l.plt.subplots(nrows,ncols,figsize=figsize)
        if nrows*ncols==1:
            self.axes=[self.axes,]
        self.config_axes=lambda:d2l.set_axes(self.axes[0],xlabel,ylabel,xlim,ylim,xscale,yscale,legend)
        self.x,self.y,self.fmts=None,None,fmts
        
    def add(self,x,y):
        if not hasattr(y,"_len_"):
            y=[y]
        n=len(y)
        if not hasattr(x,"_len_"):
            x=[x]*n
        if not self.x:
            self.x=[[] for _ in range(n)]
        if not self.y:
            self.y=[[] for _ in range(n)]
        for i,(a,b) in enumerate(zip(x,y)):
            if a is not None and b is not None:
                self.x[i].append(a)
                self.y[i].append(b)
        self.axes[0].cla()
        for x,y,fmt in zip(self.x,self.y,self.fmts):
            self.axes[0].plot(x,y,fmt)
        self.config_axes()
        display.display(self.fig)
        display.clear_output(wait=True)

    
def train_ch3(net,train_iter,test_iter,loss,num_epochs,updater):
    animator=Animator(xlabel='epoch',xlim=[1,num_epochs],ylim=[0.3,0.9],legend=['trans loss','trans acc','test acc'])
    for epoch in range(num_epochs):
        train_metrics=train_epoch_ch3(net,train_iter,loss,updater)
        test_acc=evaluate_accuracy(net,test_iter)
        animator.add(epoch+1,train_metrics+(test_acc,))
    train_loss,train_acc=train_metrics
    assert  train_loss<0.5,train_loss
    assert train_acc<=1 and train_acc>0.7, train_acc
    assert test_acc<=1 and test_acc>0.7,test_acc
    
lr=0.1
def updater(batch_size):
    return d2l.sgd([w,b],lr,batch_size)

num_epochs=10
train_ch3(net,train_iter,test_iter,cross_entropy,num_epochs,updater)

之后的简单实现就是之下两行

net=nn.Sequential(nn.Flatten(),nn.Linear(784,10))#flatten 将任意维度的tensor展开成2d的tensor
def init_weights(m):
    if type(m)==nn.Linear:
        nn.init.normal_(m.weight,std=0.01)
net.apply(init_weights)

其实我也 不太明白为啥是简单实现 ,因为他训练模块还是之前的trains_ch3,而且这个 训练模块又包含了之前的模块,简单说就是 少了之下的代码,简化了softmax函数和交叉损失,其他没变

import torch
from torch import nn
from d2l import torch as d2l
import torch
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l
from IPython import display

def load_data_fashion_mnist(batch_size, resize=None):
    # ToTensor操作
    trans = [transforms.ToTensor()]
    if resize:
        trans.insert(0, transforms.Resize(resize))
    # 这个transforms.Compose()类的主要作用是串联多个图片变换的操作
    trans = transforms.Compose(trans)
    # 下载数据集
    mnist_train = torchvision.datasets.FashionMNIST(
        root="./data", train=True, transform=trans, download=False)
    mnist_test = torchvision.datasets.FashionMNIST(
        root="./data", train=False, transform=trans, download=False)
    # 返回构造的迭代器对象
    return (data.DataLoader(mnist_train, batch_size, shuffle=True,
                            num_workers=get_dataloader_workers()),
            data.DataLoader(mnist_test, batch_size, shuffle=True,
                            num_workers=get_dataloader_workers()))
 
 
 
batch_size = 256
train_iter, test_iter = load_data_fashion_mnist(batch_size)
def init_weights(m):
    if type(m) == torch.nn.Linear:
        torch.nn.init.normal_(m.weight, std=0.01)
net = torch.nn.Sequential(torch.nn.Flatten(), torch.nn.Linear(784, 10))
net.apply(init_weights)

loss = torch.nn.CrossEntropyLoss()

trainer = torch.optim.SGD(net.parameters(), lr=0.1)

def accuracy(y_hat, y):
    """计算预测正确的数量"""
    if (len(y_hat.shape) > 1 and y_hat.shape[1] > 1):  # len(y_hat.shape)输出为y_hat的行向量的个数
        y_hat = y_hat.argmax(axis=1)
    cmp = y_hat.type(y.dtype) == y
    return float(cmp.type(y.dtype).sum())
 
 
# 我们可以评估再任意模型net的准确率
def evaluate_accuracy(net, data_iter):
    """计算在指定数据集上模型的精度"""
    if isinstance(net, torch.nn.Module):  # isinstance() 函数来判断一个对象是否是一个已知的类型
        net.eval()  # 将模型设置为评估模式
    metric = Accumulator(2)  # 累加器,正确预测数metric[0]、预测总数metric[1]
    for X, y in data_iter:
        metric.add(accuracy(net(X), y), y.numel())
    return metric[0] / metric[1]  # 返回的是分类正确的样本数量/总样本数量
 
 
class Accumulator:
    """在n个变量上累加"""
 
    def __init__(self, n):
        self.data = [0.0] * n
 
    def add(self, *args):
        self.data = [a + float(b) for a, b in zip(self.data, args)]
 
    def reset(self):
        self.data = [0.0] * len(self.data)
 
    def __getitem__(self, idx):
        return self.data[idx]
#单个epoch训练
def train_epoch_ch3(net, train_iter, loss, updater):
    if isinstance(net, torch.nn.Module):  # 如果是torch.nn.Module则开始训练
        net.train();
    metric = Accumulator(3)  # 用长度为3的迭代器来累加loss,accuracy, y.numel
    for X, y in train_iter:  # 接下来扫一遍数据
        y_hat = net(X)  # 计算y_hat
        l = loss(y_hat, y)  # 通过交叉熵损失函数来计算l
        if isinstance(updater, torch.optim.Optimizer):  # 如果是pytorch的optimizer的话,就
            updater.zero_grad()  # 先把梯度置为零
            l.backward()  # 然后计算梯度
            updater.step()  # 更新参数
            metric.add(  # 累加loss,准确数,样本数
                float(l) * len(y), accuracy(y_hat, y),
                y.size().numel())
        else:  # 如果全部自己实现的话
            l.sum().backward()  # 则l是一个向量,计算梯度并求和
            updater(X.shape[0])  # 更具批量大小更新参数
            metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())  # 记录分类的损失和,准确数,样本数
    return metric[0] / metric[2], metric[1] / metric[2]  # 返回的是  loss/样本数   所有正确的样本/样本数
 
 
 
# 训练函数
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):  # @save
    """训练模型"""
    for epoch in range(num_epochs):
        train_loss, train_acc= train_epoch_ch3(net, train_iter, loss, updater)
        test_acc = evaluate_accuracy(net, test_iter)
        print('epoch: %d, loss: %.4f, train_acc: %.3f, test_acc: %.3f' %(epoch+1, train_loss, train_acc, test_acc))
 
 
num_epochs = 10
train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)

感觉有点听的头晕
pytorch一堆函数都不知道啥意思,见一个去查一下

您可能感兴趣的与本文相关的镜像

PyTorch 2.5

PyTorch 2.5

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值