Pytorch实现102类鲜花分类(102 Category Flower Dataset)

本文介绍了使用Pytorch在102 Category Flower Dataset上实现图像分类的过程,涉及VGG19和ResNet152模型的搭建、参数调整、训练与验证。
部署运行你感兴趣的模型镜像

Pytorch实现102类鲜花分类(VGG19和ResNet152模型)

本文主要讲解该算法的实现过程,原理部分需读者自行研究,可以找一些论文之类的。


实验环境

python3.6+pytorch1.2+cuda10.1


数据集

102 Category Flower Dataset数据集由102类产自英国的花卉组成,每类由40-258张图片组成

至于数据集我放个链接给大家,并且是划分好的数据集https://download.youkuaiyun.com/download/ntntg/15535184?spm=1001.2014.3001.5503


接下来是代码的实现过程

导入需要的库

import pandas as pd
import torch
import torch.nn as nn
import torch.utils.data as D
import torchvision
from torchvision import transforms
import time
import os
import matplotlib.pyplot as plt

数据加载

# 读取文件
train_path = pd.read_csv('E:/花卉分类考核项目/train.txt', sep=' ', names=['name', 'classes'])
test_path = pd.read_csv('E:/花卉分类考核项目/test.txt', sep=' ', names=['name', 'classes'])
valid_path = pd.read_csv('E:/花卉分类考核项目/valid.txt', sep=' ', names=['name', 'classes'])
# 数据增强
data_transforms = {
    'train': transforms.Compose([
        transforms.RandomRotation(45),  # 随机旋转,-45到45度之间随机
        transforms.CenterCrop(224),  # 从中心开始裁剪
        transforms.RandomHorizontalFlip(p=0.5),  # 随机水平翻转 选择一个概率
        transforms.RandomVerticalFlip(p=0.5),  # 随机垂直翻转
        transforms.ColorJitter(brightness=0.2, contrast=0.1, saturation=0.1, hue=0.1),  # 参数1为亮度,参数2为对比度,参数3为饱和度,参数4为色相
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406],
                             [0.229, 0.224, 0.225])  # 均值,标准差
    ]),
    'valid': transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406],
                             [0.229, 0.224, 0.225])
    ]),
    'test': transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406],
                             [0.229, 0.224, 0.225])
    ])
}

# 获取三个数据集
data_dir = 'E:/花卉分类考核项目/data'

image_datasets = {x: torchvision.datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in
                  ['train', 'valid', 'test']}
traindataset = image_datasets['train']
validdataset = image_datasets['valid']
testdataset = image_datasets['test']

batch_size = 1
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,
                                              shuffle=True) for x in ['train', 'valid', 'test']}
print(dataloaders)
traindataloader = dataloaders['train']
validdataloader = dataloaders['valid']
testdataloader = dataloaders['test']

dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid', 'test']}

是否用GPU训练

# 是否用GPU训练
train_on_gpu = torch.cuda.is_available()

if not train_on_gpu:
    print('CUDA is not available.  Training on CPU ...')
else:
    print('CUDA is available!  Training on GPU ...')

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

搭建网络模型

使用VGG19模型
class Net(nn.Module):
    def __init__(self, model):
        super(Net, self).__init__()
        self.features = model.features
        # for p in self.parameters():
        #     p.requires_grad = False
        self.classifier = nn.Sequential(
            nn.Linear(25088, 4096, bias=True),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5, inplace=False),
            nn.Linear(4096, 4096, bias=True),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5, inplace=False),
            nn.Linear(4096, 102, bias=True)
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.shape[0], -1)
        x = self.classifier(x)
        return x


vgg = torchvision.models.vgg19(pretrained=True)
net = Net(vgg)

使用ResNet152模型
#使用resnet152的网络结构
class Net(nn.Module):
    def __init__(self,model):
        super(Net,self).__init__()
        self.resnet = nn.Sequential(*list(model.children())[:-1])
        self.fc = nn.Linear(in_features=2048,out_features=102)
    def forward(self,x):
        x = self.resnet(x)
        x = x.view(x.shape[0], -1)
        x = self.fc(x)
        return x
resnet152 = torchvision.models.resnet152(pretrained=True)#使用resnet的预训练模型

net = Net(resnet152)

调定参数

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=0.0001, momentum=0.9)
loss_list = []  # 保存每一次的损失

或者

#学习率
LR=0.0001
#损失函数
criterion = nn.CrossEntropyLoss()
#优化器
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),lr=LR,momentum=0.9)

这部分根据个人调参

测试集检验

def valid_model(model, criterion):
    best_acc = 0.0
    print('-' * 10)

    running_loss = 0.0
    running_corrects = 0
    model = model.to(device)
    for inputs, labels in validdataloader:
        inputs = inputs.to(device)
        labels = labels.to(device)
        model.eval()
        with torch.no_grad():
            outputs = model(inputs)
        loss = criterion(outputs, labels)

        _, preds = torch.max(outputs, 1)
        running_loss += loss.item()
        running_corrects += torch.sum(preds == labels)
    epoch_loss = running_loss / dataset_sizes['valid']
    print(running_corrects.double())
    epoch_acc = running_corrects.double() / dataset_sizes['valid']
    print('{} Loss: {:.4f} Acc: {:.4f}'.format(
        'valid', epoch_loss, epoch_acc))
    print('-' * 10)
    print()

验证集检验

def test_model(model, criterion):
    best_acc = 0.0
    print('-' * 10)

    running_loss = 0.0
    running_corrects = 0
    model = model.to(device)
    for inputs, labels in testdataloader:
        inputs = inputs.to(device)
        labels = labels.to(device)
        model.eval()
        with torch.no_grad():
            outputs = model(inputs)
        loss = criterion(outputs, labels)

        loss_list.append(loss.item())

        _, preds = torch.max(outputs, 1)
        running_loss += loss.item()
        running_corrects += torch.sum(preds == labels)
    epoch_loss = running_loss / dataset_sizes['test']
    print(running_corrects.double())
    epoch_acc = running_corrects.double() / dataset_sizes['test']
    print('{} Loss: {:.4f} Acc: {:.4f}'.format(
        'test', epoch_loss, epoch_acc))
    print('-' * 10)
    print()

训练模型


def train_model(model, criterion, optimizer, num_epochs=5):
    since = time.time()
    best_acc = 0.0
    for epoch in range(num_epochs):
        if (epoch + 1) % 5 == 0:
            test_model(model, criterion)
        print('-' * 10)
        print('Epoch {}/{}'.format(epoch + 1, num_epochs))

        running_loss = 0.0
        running_corrects = 0
        model = model.to(device)
        for inputs, labels in traindataloader:
            inputs = inputs.to(device)
            labels = labels.to(device)
            model.train()
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            _, preds = torch.max(outputs, 1)
            running_loss += loss.item()
            running_corrects += torch.sum(preds == labels)
        epoch_loss = running_loss / dataset_sizes['train']
        print(dataset_sizes['train'])
        print(running_corrects.double())
        epoch_acc = running_corrects.double() / dataset_sizes['train']
        best_acc = max(best_acc, epoch_acc)
        print('{} Loss: {:.4f} Acc: {:.4f}'.format(
            'train', epoch_loss, epoch_acc))

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    return model

开始训练

epochs = 50
model = train_model(net, criterion, optimizer, epochs)

valid_model(model, criterion)
plt.figure(figsize=(50, 8))
plt.plot(range(len(loss_list)), loss_list)
plt.show()
torch.save(model, 'model.pkl')

您可能感兴趣的与本文相关的镜像

PyTorch 2.5

PyTorch 2.5

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

评论 2
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值