VGG 迁移学习,使用自己的数据,使用预训练参数并修改VGG输出分类数

使用pytorch 加载预训练VGG模型,以及参数,将参数保存到本地文件***.pth
修改VGG最后一层分类层为3,原本训练分类数为10类,所以预训练模型的参数不能完全匹配新建的模型结构,但除最后一层外,其余参数可以重用

	model = VGG(make_layers(cfg,batch_norm=True),**kwargs)
    net16 = models.vgg16_bn()
    print(model)
    print(net16)
    net16.load_state_dict(torch.load(model_path))
    print(net16.state_dict().keys())
    net16_dict = net16.state_dict()
    model_dict = model.state_dict()
    print(model_dict.keys())
    net16_pre_dict = {k: v for k, v in net16_dict.items() if k in model_dict and 'classifier.6' not in k}
    #print(net16_pre_dict.keys())
    model_dict.update(net16_pre_dict)
    model.load_state_dict(model_dict)

上面代码VGG为我们自己修改原始vgg模型新创建的模型,之后将原始模型最终分类10,改为分类为3,加载原始模型中除最后一层参数后使用自己的数据进行重新训练,
当然这里可以将前面加载模型参数require_grad=False进行设定,最后一层的参数设定为required_grad=True,表示只训练最后一层参数。本文代码中没有这样设置,而是使用默认值,使新的网络架构参数都进行重新训练。

下面是完整代码,图像路径,需要修改为自己训练集与测试集路径。

#coding=utf-8
import torch
import torch.nn as nn
import math
import torchvision.transforms as transforms
import torchvision as tv
from torch.utils.data import DataLoader,Subset
from torch.utils.data import Dataset
import torchvision.models as models
import numpy as np
import pdb
import matplotlib.pyplot as plt
import copy
from torch.optim import lr_scheduler
from PIL import Image
model_path = './model_pth/vgg16_bn-6c64b313.pth'
BATCH_SIZE = 4
LR = 0.001
EPOCH = 5
class VGG(nn.Module):
    def __init__(self,features,num_classes=3):
        super(VGG,self).__init__()
        #网络结构(仅包含卷积层和池化层,不包含分类器)
        self.features = features
        self.classifier = nn.Sequential(#分类器结构
            #fc6
            nn.Linear(512*7*7,4096),
            nn.ReLU(),
            nn.Dropout(),
            #fc7
            nn.Linear(4096,4096),
            nn.ReLU(),
            nn.Dropout(),
            #fc8
            nn.Linear(4096,num_classes)
        )
        self._initialize_weights()
    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0),-1)
        x = self.classifier(x)
        return x
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m,nn.Conv2d):
                n = m.kernel_size[0]*m.kernel_size[1]*m.out_channels
                m.weight.data.normal_(0,math.sqrt(2./n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m,nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m,nn.Linear):
                m.weight.data.normal_(0,0.01)
                m.bias.data.zero_()
cfg = [64,64,'M',128,128,'M',256,256,256,'M',512,512,512,'M',512,512,512,'M']
#生成网络每层的信息
def make_layers(cfg,batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers+=[nn.MaxPool2d(kernel_size=2,stride=2)]
        else:
            #设定卷积层的输出数量
            conv2d = nn.Conv2d(in_channels,v,3,padding=1)
            if batch_norm:
                layers +=[conv2d,nn.BatchNorm2d(v),nn.ReLU(inplace=True)]
            else:
                layers+=[conv2d,nn.ReLU(inplace=True)]
            in_channels=v
    return nn.Sequential(*layers)#返回一个包含了网络结构的时序容器
def vgg16(**kwargs):
    model = VGG(make_layers(cfg,batch_norm=True),**kwargs)
    net16 = models.vgg16_bn()
    print(model)
    print(net16)
    net16.load_state_dict(torch.load(model_path))
    print(net16.state_dict().keys())
    net16_dict = net16.state_dict()
    model_dict = model.state_dict()
    print(model_dict.keys())
    net16_pre_dict = {k: v for k, v in net16_dict.items() if k in model_dict and 'classifier.6' not in k}
    #print(net16_pre_dict.keys())
    model_dict.update(net16_pre_dict)
    model.load_state_dict(model_dict)
    #model.load_state_dict(torch.load(model_path))
    return model
def getMyData():
    transform = transforms.Compose([
        transforms.Resize(size=(224,224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
    ])
    transform_test = transforms.Compose([
        transforms.Resize(size=(224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
    ])
    root="Imagestrain/"
    trainset = tv.datasets.ImageFolder(root,transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,batch_size=BATCH_SIZE,shuffle=True,num_workers=2)
    root = "Imagestest/"
    testset = tv.datasets.ImageFolder(root, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
    return trainloader,testloader
def trainandsave():
    trainloader,testloader = getMyData()
    net = vgg16()
    net.train()
    #print(net)
    use_gpu = True
    if torch.cuda.is_available() and use_gpu:
        net=net.cuda()
    #Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    #optimizer = torch.optim.Adam(net.parameters(),lr=LR)
    optimizer = torch.optim.SGD(net.parameters(),lr=LR,momentum=0.9)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
    fig = plt.figure(figsize=[15,7])
    plt_left=fig.add_subplot(1,2,1)
    plt_right = fig.add_subplot(1,2,2)
    plt_left.set_xlim(0,EPOCH)
    plt_left.set_ylim(0,1000)
    plt_left.set_xlabel("epoch")
    plt_left.set_ylabel("loss")
    plt_right.set_xlim(0, len(trainloader.dataset.imgs)+1)
    plt_right.set_ylim(0, 1)
    plt_right.set_xlabel("step")
    plt_right.set_ylabel("acc")
    plt.ion()
    #plt.show()
    loss_total=[]
    x=[]
    weight_wb=copy.deepcopy(net.state_dict())
    best_acc=0.0
    for epoch in range(EPOCH):
        exp_lr_scheduler.step()
        running_loss=0.0
        acc_x=[]
        acc_y=[]
        for i,data in enumerate(trainloader):
            #pdb.set_trace()
            inputs,labels=data
            optimizer.zero_grad()
            #with torch.set_grad_enabled(True):
            if use_gpu and torch.cuda.is_available():
                inputs = inputs.cuda()
            outputs = net(inputs)
            if use_gpu and torch.cuda.is_available():
                outputs = outputs.cpu()
            loss = criterion(outputs,labels)
            loss.backward()
            optimizer.step()
            running_loss+=loss.item()*BATCH_SIZE
        acc = test(net, testloader,use_gpu)
        if acc>best_acc:
            best_acc=acc
            weight_wb = copy.deepcopy(net.state_dict())
        acc_x.append(epoch)
        acc_y.append(acc)
        try:
            plt_right.lines.remove(lines[0])
        except:
            pass
        lines = plt_right.plot(acc_x, acc_y)
        plt.pause(0.01)
        temp=float(running_loss)/len(trainloader.dataset.imgs)
        print('Epoch', epoch, 'loss:%.4f' % temp, 'test accuracy:%.4f' % acc)
        loss_total.append(running_loss)
        print('Epoch', epoch, '总loss:%.4f' % running_loss)
        x.append(epoch)
        try:
            plt_left.lines.remove(line_left[0])
        except:
            pass
        line_left = plt_left.plot(x,loss_total)
        plt.pause(0.01)
        #plt.show()
    plt.ioff()
    plt.show()
    print("best acc:%.4f"%best_acc)
    print('Finished Training')
    net.load_state_dict(weight_wb)
    torch.save(net,'net.pkl')
    torch.save(net.state_dict(),'net_params.pkl')
    visualize_model(net,testloader)
    return net
def test(net,testdata,use_gpu):
    correct,total=.0,.0
    net.eval()
    for inputs ,labels in testdata:
        if use_gpu and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs = net(inputs)
        if use_gpu and torch.cuda.is_available():
            outputs = outputs.cpu()
        _,predicted = torch.max(outputs,1)
        total+=labels.size(0)
        correct+=(predicted==labels).sum()
    net.train()
    return float(correct)/total
def imshow(inp,title=None):
    """Imshow for Tensor"""
    inp = inp.numpy().transpose((1,2,0))
    mean = np.array([0.485,0.456,0.406])
    std = np.array([0.229,0.224,0.225])
    inp = std*inp+mean
    inp = np.clip(inp,0,1)
    plt.imshow(inp)
    if title is not None:
        plt.title(title)
    plt.pause(0.001)
def visualize_model(model,testloader,num_images=6):
    was_training = model.training
    model.eval()
    images_so_far=0
    fig = plt.figure()
    with torch.no_grad():
        for i,(inputs,labels) in enumerate(testloader):
            inputs = inputs.cuda()
            labels = labels.cuda()
            outputs = model(inputs)
            _,preds = torch.max(outputs,1)
            for j in range(inputs.size()[0]):
                images_so_far+=1
                ax = plt.subplot(num_images//2,2,images_so_far)
                ax.axis('off')
                ax.set_title('predicted:{}'.format(preds[j]))
                imshow(inputs.cpu().data[j])
                if images_so_far==num_images:
                    model.train(mode=was_training)
                    return
        model.train(model=was_training)
if __name__ =='__main__':
    #net = train()
    net = trainandsave()
    if torch.cuda.is_available():
        net = net.cpu()
    torch.save(net,'vgg.model')

评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值