Pytorch学习--神经网络--完整的模型训练套路

一、下载数据集

train_data = torchvision.datasets.CIFAR10(root="datasets",train=True,transform=torchvision.transforms.ToTensor(),download=True)
train_data = torchvision.datasets.CIFAR10(root="datasets",train=False,transform=torchvision.transforms.ToTensor(),download=True)

二、数据大小

train_data_len = len(train_data)
test_data_len = len(test_data)
print(train_data_len)
print(test_data_len)

三、数据加载

train = DataLoader(train_data,batch_size=64)
test = DataLoader(test_data,batch_size=64)

四、模型定义

class Mary(nn.Module):
    def __init__(self):
        super(Mary,self).__init__()
        self.model1 = nn.Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )
    def forward(self,x):
        x = self.model1(x)
        return x

五、定义对象

Yorelee = Mary()

六、损失函数

loss_fn = nn.CrossEntropyLoss()

七、优化器

learning_rate = 0.01
optimizer = torch.optim.SGD(Yorelee.parameters(),lr=learning_rate)

八、训练过程

epoch = 10
for i in range(epoch):
    print("***********第{}轮训练***********".format(i+1))
    index = 1
    for data in train:
        imgs,targets = data
        output = Yorelee(imgs)
        loss = loss_fn(output,targets)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print("({}) ".format(index),loss.item())
        index += 1

九、训练过程+测试过程

# 训练过程
epoch = 10
for i in range(epoch):
    print("***********第{}轮训练***********".format(i+1))
    index = 1
    for data in train:
        imgs,targets = data
        output = Yorelee(imgs)
        loss = loss_fn(output,targets)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print("({}) ".format(index),loss.item())
        index += 1

    #测试过程
    total_loss = 0
    with torch.no_grad():
        for data in test:
            imgs,targets = data
            output = Yorelee(imgs)
            loss = loss_fn(output,targets)
            total_loss += loss
    print("测试: ",total_loss.item())

十、Tensorboard 可视化

#  Tensorboard 可视化
writer = SummaryWriter("logs")

# 训练过程
epoch = 10
for i in range(epoch):
    print("***********第{}轮训练***********".format(i+1))
    index = 1
    for data in train:
        imgs,targets = data
        output = Yorelee(imgs)
        loss = loss_fn(output,targets)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print("({}) ".format(index),loss.item())
        if index % 100 == 0:
            writer.add_scalar("train_loss",loss,index)
        index += 1

    #测试过程
    total_loss = 0
    with torch.no_grad():
        for data in test:
            imgs,targets = data
            output = Yorelee(imgs)
            loss = loss_fn(output,targets)
            total_loss += loss
    print("测试: ",total_loss.item())
    writer.add_scalar("test_loss", total_loss, i)
writer.close()

十一、对于分类任务

可以用准确率来衡量,而不是用 total_loss 来衡量效果
先来举个简单的例子:
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
代码如下:

import torch

output = torch.tensor([[0.1,0.2],
                       [0.3,0.4]])
output = torch.argmax(output,dim=1)
preds = torch.tensor([0,1])
print((output==preds).sum())

输出:

tensor(1)

十二、对测试过程的重新改进

#  Tensorboard 可视化
writer = SummaryWriter("logs")

# 训练过程
epoch = 10
for i in range(epoch):
    print("***********第{}轮训练***********".format(i+1))
    index = 1
    for data in train:
        imgs,targets = data
        output = Yorelee(imgs)
        loss = loss_fn(output,targets)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print("({}) ".format(index),loss.item())
        if index % 100 == 0:
            writer.add_scalar("train_loss",loss,index)
        index += 1

    #测试过程
    total_loss = 0
    with torch.no_grad():
        find_true = 0
        for data in test:
            imgs,targets = data
            output = Yorelee(imgs)
            # loss = loss_fn(output,targets)
            # total_loss += loss
            output = torch.argmax(output,dim=1)
            find_true += (output == targets).sum()
    print("测试: ",find_true/test_data_len.item())
    writer.add_scalar("test_loss", find_true/test_data_len, i)

十三、总结

model.py

import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear


class Mary(nn.Module):
    def __init__(self):
        super(Mary,self).__init__()
        self.model1 = nn.Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )
    def forward(self,x):
        x = self.model1(x)
        return x

if __name__ == '__main__':
    input = torch.ones((64,3,32,32))
    Yorelee = Mary()
    output = Yorelee(input)
    print(output.shape)

train.py

import torch
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

from model import *

# 数据集
train_data = torchvision.datasets.CIFAR10(root="datasets",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="datasets",train=False,transform=torchvision.transforms.ToTensor(),download=True)

# 数据大小
train_data_len = len(train_data)
test_data_len = len(test_data)
print(train_data_len)
print(test_data_len)

# 数据加载
train = DataLoader(train_data,batch_size=64)
test = DataLoader(test_data,batch_size=64)

# 定义对象
Yorelee = Mary()

# 损失函数
loss_fn = nn.CrossEntropyLoss()

# 优化器
learning_rate = 0.01
optimizer = torch.optim.SGD(Yorelee.parameters(),lr=learning_rate)

#  Tensorboard 可视化
writer = SummaryWriter("logs")

# 训练过程
epoch = 10
for i in range(epoch):
    print("***********第{}轮训练***********".format(i+1))
    index = 1
    for data in train:
        imgs,targets = data
        output = Yorelee(imgs)
        loss = loss_fn(output,targets)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print("({}) ".format(index),loss.item())
        if index % 100 == 0:
            writer.add_scalar("train_loss",loss,index)
        index += 1

    #测试过程
    total_loss = 0
    with torch.no_grad():
        find_true = 0
        for data in test:
            imgs,targets = data
            output = Yorelee(imgs)
            # loss = loss_fn(output,targets)
            # total_loss += loss
            output = torch.argmax(output,dim=1)
            find_true += (output == targets).sum()
    print("测试: ",find_true/test_data_len.item())
    writer.add_scalar("test_loss", find_true/test_data_len, i)
writer.close()

十四、XX.train() 和 XX.eval() 的说明

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值