展开
一、ResNet-18网络结构

ResNet全名Residual Network残差网络。Kaiming He 的《Deep Residual Learning for Image Recognition》获得了CVPR最佳论文。他提出的深度残差网络在2015年可以说是洗刷了图像方面的各大比赛,以绝对优势取得了多个比赛的冠军。而且它在保证网络精度的前提下,将网络的深度达到了152层,后来又进一步加到1000的深度。论文的开篇先是说明了深度网络的好处:特征等级随着网络的加深而变高,网络的表达能力也会大大提高。因此论文中提出了一个问题:是否可以通过叠加网络层数来获得一个更好的网络呢?作者经过实验发现,单纯的把网络叠起来的深层网络的效果反而不如合适层数的较浅的网络效果。因此何恺明等人在普通平原网络的基础上增加了一个shortcut, 构成一个residual block。
此时拟合目标就变为F(x),F(x)就是残差: 
如果深层网络的后面那些层是恒等映射,那么模型就退化为一个浅层网络。那现在要解决的就是学习恒等映射函数了。 但是直接让一些层去拟合一个潜在的恒等映射函数H(x) = x,比较困难,这可能就是深层网络难以训练的原因。但是,如果把网络设计为H(x) = F(x) + x,如下图。我们可以转换为学习一个残差函数F(x) = H(x) - x. 只要F(x)=0,就构成了一个恒等映射H(x) = x. 而且,拟合残差肯定更加容易。
二、实验环境:
Pytorch 0.4.0
torchvision 0.2.1
Python 3.6
CUDA8+cuDNN v7 (可选)
Win10+Pycharm
三、Pytorch上搭建ResNet-18
-
'''ResNet-18 Image classfication for cifar-10 with PyTorch -
Author 'Sun-qian'. -
''' -
import torch -
import torch.nn as nn -
import torch.nn.functional as F -
class ResidualBlock(nn.Module): -
def __init__(self, inchannel, outchannel, stride=1): -
super(ResidualBlock, self).__init__() -
self.left = nn.Sequential( -
nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False), -
nn.BatchNorm2d(outchannel), -
nn.ReLU(inplace=True), -
nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False), -
nn.BatchNorm2d(outchannel) -
) -
self.shortcut = nn.Sequential() -
if stride != 1 or inchannel != outchannel: -
self.shortcut = nn.Sequential( -
nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False), -
nn.BatchNorm2d(outchannel) -
) -
def forward(self, x): -
out = self.left(x) -
out += self.shortcut(x) -
out = F.relu(out) -
return out -
class ResNet(nn.Module): -
def __init__(self, ResidualBlock, num_classes=10): -
super(ResNet, self).__init__() -
self.inchannel = 64 -
self.conv1 = nn.Sequential( -
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False), -
nn.BatchNorm2d(64), -
nn.ReLU(), -
) -
self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1) -
self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2) -
self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2) -
self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2) -
self.fc = nn.Linear(512, num_classes) -
def make_layer(self, block, channels, num_blocks, stride): -
strides = [stride] + [1] * (num_blocks - 1) #strides=[1,1] -
layers = [] -
for stride in strides: -
layers.append(block(self.inchannel, channels, stride)) -
self.inchannel = channels -
return nn.Sequential(*layers) -
def forward(self, x): -
out = self.conv1(x) -
out = self.layer1(out) -
out = self.layer2(out) -
out = self.layer3(out) -
out = self.layer4(out) -
out = F.avg_pool2d(out, 4) -
out = out.view(out.size(0), -1) -
out = self.fc(out) -
return out -
def ResNet18(): -
return ResNet(ResidualBlock)
四、Pytorch上训练
所选数据集为Cifar-10,该数据集共有60000张带标签的彩色图像,这些图像尺寸32*32,分为10个类,每类6000张图。这里面有50000张用于训练,每个类5000张,另外10000用于测试,每个类1000张。训练时人为修改学习率,当epoch:[1-135] ,lr=0.1;epoch:[136-185], lr=0.01;epoch:[186-240] ,lr=0.001。训练代码如下:
-
import torch -
import torch.nn as nn -
import torch.optim as optim -
import torchvision -
import torchvision.transforms as transforms -
import argparse -
from resnet import ResNet18 -
# 定义是否使用GPU -
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -
# 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多 -
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') -
parser.add_argument('--outf', default='./model/', help='folder to output images and model checkpoints') #输出结果保存路径 -
parser.add_argument('--net', default='./model/Resnet18.pth', help="path to net (to continue training)") #恢复训练时的模型路径 -
args = parser.parse_args() -
# 超参数设置 -
EPOCH = 135 #遍历数据集次数 -
pre_epoch = 0 # 定义已经遍历数据集的次数 -
BATCH_SIZE = 128 #批处理尺寸(batch_size) -
LR = 0.1 #学习率 -
# 准备数据集并预处理 -
transform_train = transforms.Compose([ -
transforms.RandomCrop(32, padding=4), #先四周填充0,在吧图像随机裁剪成32*32 -
transforms.RandomHorizontalFlip(), #图像一半的概率翻转,一半的概率不翻转 -
transforms.ToTensor(), -
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #R,G,B每层的归一化用到的均值和方差 -
]) -
transform_test = transforms.Compose([ -
transforms.ToTensor(), -
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), -
]) -
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) #训练数据集 -
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) #生成一个个batch进行批训练,组成batch的时候顺序打乱取 -
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) -
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2) -
# Cifar-10的标签 -
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') -
# 模型定义-ResNet -
net = ResNet18().to(device) -
# 定义损失函数和优化方式 -
criterion = nn.CrossEntropyLoss() #损失函数为交叉熵,多用于多分类问题 -
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4) #优化方式为mini-batch momentum-SGD,并采用L2正则化(权重衰减) -
# 训练 -
if __name__ == "__main__": -
best_acc = 85 #2 初始化best test accuracy -
print("Start Training, Resnet-18!") # 定义遍历数据集的次数 -
with open("acc.txt", "w") as f: -
with open("log.txt", "w")as f2: -
for epoch in range(pre_epoch, EPOCH): -
print('\nEpoch: %d' % (epoch + 1)) -
net.train() -
sum_loss = 0.0 -
correct = 0.0 -
total = 0.0 -
for i, data in enumerate(trainloader, 0): -
# 准备数据 -
length = len(trainloader) -
inputs, labels = data -
inputs, labels = inputs.to(device), labels.to(device) -
optimizer.zero_grad() -
# forward + backward -
outputs = net(inputs) -
loss = criterion(outputs, labels) -
loss.backward() -
optimizer.step() -
# 每训练1个batch打印一次loss和准确率 -
sum_loss += loss.item() -
_, predicted = torch.max(outputs.data, 1) -
total += labels.size(0) -
correct += predicted.eq(labels.data).cpu().sum() -
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% ' -
% (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total)) -
f2.write('%03d %05d |Loss: %.03f | Acc: %.3f%% ' -
% (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total)) -
f2.write('\n') -
f2.flush() -
# 每训练完一个epoch测试一下准确率 -
print("Waiting Test!") -
with torch.no_grad(): -
correct = 0 -
total = 0 -
for data in testloader: -
net.eval() -
images, labels = data -
images, labels = images.to(device), labels.to(device) -
outputs = net(images) -
# 取得分最高的那个类 (outputs.data的索引号) -
_, predicted = torch.max(outputs.data, 1) -
total += labels.size(0) -
correct += (predicted == labels).sum() -
print('测试分类准确率为:%.3f%%' % (100 * correct / total)) -
acc = 100. * correct / total -
# 将每次测试结果实时写入acc.txt文件中 -
print('Saving model......') -
torch.save(net.state_dict(), '%s/net_%03d.pth' % (args.outf, epoch + 1)) -
f.write("EPOCH=%03d,Accuracy= %.3f%%" % (epoch + 1, acc)) -
f.write('\n') -
f.flush() -
# 记录最佳测试分类准确率并写入best_acc.txt文件中 -
if acc > best_acc: -
f3 = open("best_acc.txt", "w") -
f3.write("EPOCH=%d,best_acc= %.3f%%" % (epoch + 1, acc)) -
f3.close() -
best_acc = acc -
print("Training Finished, TotalEPOCH=%d" % EPOCH)
五、实验结果
best_acc= 95.170%

转自:https://blog.youkuaiyun.com/sunqiande88/article/details/80100891
7222





