PyTorch学习笔记——一个简易的残差块网络实现

跟着小土堆的视频学的pytorch,然后改了改用上残差块,用于图像分类,精度不高

模型:

import torch
from torch import nn


class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(ResidualBlock, self).__init__()

        self.sequential = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=5, padding=2, bias=False),
            nn.ReLU(),
            nn.Conv2d(out_channels, out_channels, kernel_size=5, padding=2, bias=False),
        )
        self.identity_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, bias=False)
    def forward(self, x):
        identity = self.identity_conv(x)
        out = self.sequential(x)
        return torch.add(out, identity)

class TrainNnGPU(nn.Module):
    def __init__(self):
        super(TrainNnGPU, self).__init__()
        self.sequential1  = nn.Sequential(
            ResidualBlock(3, 32),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.sequential2 = nn.Sequential(
            ResidualBlock(32, 32),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.sequential3 = nn.Sequential(
            ResidualBlock(32, 16),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.sequential4 = nn.Sequential(
            ResidualBlock(16, 16),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.flatten = nn.Flatten()
        self.linear1 = nn.Linear(16 * 4 * 4, 64)
        self.act = nn.ReLU()
        self.dropout = nn.Dropout(0.5)
        self.linear2 = nn.Linear(64, 10)

    def forward(self, x):
        x = self.sequential1(x)
        x = self.sequential2(x)
        x = self.sequential3(x)
        x = self.sequential4(x)
        x = self.flatten(x)
        # print(x.shape)
        # x = self.linear1(x)
        x = self.act(x)
        x = self.dropout(x)
        x = self.linear2(x)
        return x

训练:

import os
import torch
import torch.optim
import torchvision
from os.path import dirname, join
from torch import nn
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter


from model_gpu_to import TrainNnGPU

this_dir = dirname(join(__file__))
parent_dir = dirname(this_dir)
data_dir = join(parent_dir, 'data')
weights_dir = join(this_dir, 'trained_weights')
if not os.path.exists(weights_dir):
    os.makedirs(weights_dir)

train_data = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=False, transform=torchvision.transforms.ToTensor())
test_data = torchvision.datasets.CIFAR10(root=data_dir, train=False, download=False, transform=torchvision.transforms.ToTensor())


train_size = len(train_data)
test_size = len(test_data)
print(f'训练集长度:{train_size}')
print(f'测试集长度:{test_size}')


train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)

my_net = TrainNnGPU()
print(f'cuda availability:{torch.cuda.is_available()}')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
my_net = my_net.to(device)
# 这里的cuda函数只有三个有:网络模型、损失函数、数据

learning_rate = 0.01
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)
optimizer = torch.optim.SGD(my_net.parameters(), lr=learning_rate,)
# scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=100, eta_min=0)


total_train_step = 0
total_test_step = 0


epochs = 100
writer = SummaryWriter(join(this_dir, 'logs'))

for epoch in range(epochs):
    # print(f'epoch: {epoch + 1}')

    # my_net.train()
    # 某些特殊的层需要调用,见官方文档torch.nn.module
    for data in train_dataloader:
        img, label = data
        img = img.to(device)
        label = label.to(device)
        output = my_net(img)
        loss = loss_fn(output, label)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        total_train_step = total_train_step + 1

        if total_train_step % 100 == 0:
            writer.add_scalar('Loss/train', loss.item(), total_train_step)

    total_test_loss = 0
    total_test_accuracy_items = 0
    # my_net.eval()
    with torch.no_grad():
        for data_t in test_dataloader:
            img_t, label_t = data_t
            img_t = img_t.to(device)
            label_t = label_t.to(device)
            output_t = my_net(img_t)
            loss = loss_fn(output_t, label_t)
            total_test_loss = total_test_loss + loss.item()

            acc = (output_t.argmax(1) == label_t).sum()
            total_test_accuracy_items = total_test_accuracy_items + acc

    print(f'epoch{epoch} lr={optimizer.param_groups[0]["lr"]}')
    print(f'test loss: {total_test_loss}')
    print(f'test accuracy: {total_test_accuracy_items/test_size}\n')
    writer.add_scalar('Loss/epoch', total_test_loss, epoch + 1)
    writer.add_scalar('Accuracy/epoch', total_test_accuracy_items/test_size, epoch + 1)

    # if epoch % 10 == 0:
    #     torch.save(my_net, join(this_dir, 'trained_weights', f'model_epoch{epoch}.pth'))
    #     print(f'-----epoch{epoch + 1} weights saved-----\n')

    scheduler.step()

writer.close()

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值