我的电脑训练5个epoch都很慢,不知道怎么回事 macbook pro 2017
"""使用VGG预训练网络"""
import torch
import torchvision.models
from torch import nn
from torchvision import transforms
import os
import matplotlib.pyplot as plt
"""预训练网络是一个保存好的之前已在大型数据集上训练好的卷积神经网络,那么可以有效的去提取视觉特征
VGG11——VGG19 前几层是用3*3的卷积核来增加网络深度,通过max pooling 以此减少每层的神经元数量,
最后三层分别是2个有4096个神经元的全链接层和softmax层
网络太深,导致梯度消失或者梯度爆炸"""
#图片大小不能太小,经过几层pool之后可能会比卷积核大小还小就会报错
base_dir = r'./dataset/4weather'
#os.path.join()方法用于路径拼接文件路径,可以传入多个路径,不存在/则会自动创建
#博客地址:https://blog.youkuaiyun.com/swan777/article/details/89040802
train_dir = os.path.join(base_dir, 'train')
test_dir = os.path.join(base_dir, 'test')
transform = transforms.Compose([transforms.Resize((196,196)),
transforms.ToTensor(),
transforms.Normalize(mean =[0.5,0.5,0.5],std=[0.5,0.5,0.5])
])
#创建dataset
train_ds = torchvision.datasets.ImageFolder(
train_dir,
transform = transform
)
test_ds = torchvision.datasets.ImageFolder(
test_dir,
transform =transform
)
#创建dataloader
batch = 8
train_dl =torch .utils.data.DataLoader(
train_ds, batch_size = batch,
shuffle = True
)
test_dl = torch.utils.data.DataLoader(
test_ds,
batch_size = batch)
imgs,labels =next(iter(train_dl))
#创建模型
#加载模型,pretraind表示是否使用预训练模型(会加载权重参数)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = torchvision.models.vgg16(pretrained=True).to(device)
#卷积基部分不变(冻结),初始化新分类器
for p in model.features.parameters():
p.requires_grad = False
model.classifier[-1].out_features =4
# 优化器优化的参数是分类器
optim = torch.optim.Adam(model.classifier.parameters(),lr=0.001)
loss_fn = nn.CrossEntropyLoss()
def fit(epoch,model,trainloader,testloader):
correct = 0
total = 0
running_loss =0
model.train() #指明这是train模式需要bn和drop
for x,y in trainloader:
if torch.cuda.is_available():
x,y =x.to('cuda'),y.to('cuda')
y_pred =model(x)
loss = loss_fn(y_pred,y)
optim.zero_grad()
loss.backward()
optim.step()
with torch.no_grad():
y_pred = torch.argmax(y_pred,dim=1)
correct +=(y_pred==y).sum().item()
total += y.size(0)
running_loss += loss.item()
epoch_loss = running_loss/len(trainloader)
epoch_acc =correct/total
test_correct = 0
test_total = 0
test_running_loss =0
model.eval()
with torch.no_grad():
for x,y in testloader:
if torch.cuda.is_available():
x,y = x.to('cuda'),y.to('cuda')
y_pred =model(x)
loss = loss_fn(y_pred,y)
y_pred = torch.argmax(y_pred,dim=1)
test_correct +=(y_pred==y).sum().item()
test_total +=y.size(0)
test_running_loss +=loss.item()
epoch_tst_loss =test_running_loss/len(testloader)
epoch_tst_acc = test_correct/test_total
print('epoch',epoch,'loss',round(epoch_loss,3),
'acc:',round(epoch_acc,3),
'test_loss:',round(epoch_tst_loss,3),
'test_acc:',round(epoch_tst_acc,3))
return epoch_loss ,epoch_acc,epoch_tst_loss,epoch_tst_acc
#打印训练epoch
epochs =30
train_loss =[]
train_acc =[]
test_loss =[]
test_acc=[]
for epoch in range(epochs):
epoch_loss,epoch_acc,epoch_tst_loss,epoch_tst_acc =fit(epoch,model,train_dl,test_dl)
train_loss.append(epoch_loss)
train_acc.append(epoch_acc)
test_loss.append(epoch_tst_loss)
test_acc.append(epoch_tst_acc)
#绘制图像:
plt.plot(range(1,epochs+1),train_loss,label='train_loss')
plt.plot(range(1,epochs+1),test_loss,label = 'test_loss' )
plt.legend()
plt.show()