- 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
- 🍖 原作者:K同学啊
理论知识
GoogLeNet首次出现就在2014年的ILSVRC比赛中获得冠军,最初的版本为InceptionV1。共有22层深,参数量5M。
可以达到同时期VGGNet的性能,但是参数量更少。
Inception
GoogleLeNet的核心模块是Inception模块。主要的思路是将提取特征的卷积并行,在同一层进行多种尺度的卷积计算。
初始版本(上图左侧所示)就是沿着上述的思路设计的,后面在初始版本的基础上,借鉴了Network-in-Network的思想,使用了1x1卷积实现降维操作,减小网络的参数量和计算量。如上图右侧所示。
卷积计算
卷积的参数量计算可以通俗的来讲为(原图通道数*目标通道数*卷积核宽*卷积核高 + 目标通道数)
例如:对100x100x128做通道为256的5x5卷积(填充方式为same,不改变图像尺寸)。参数量为128x256x5x5+256 = 819456
如果加入了一个通道为32的1x1卷积后,再做通道为256的5x5卷积。参数量就是1283211 + 32 + 3255256 + 256 = 209184 ,由此可见,加入1x1卷积参数量减少到原来的1/4左右
模型结构
InceptionV1的完整模块和结构图如下
模型实现
inception 结构
class inception_block(nn.Module):
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
super().__init__()
# 1x1 分支
self.branch1 = nn.Sequential(
nn.Conv2d(in_channels, ch1x1, kernel_size=1),
nn.BatchNorm2d(ch1x1),
nn.ReLU(inplace=True)
)
# 1x1 -> 3x3 分支
self.branch2 = nn.Sequential(
nn.Conv2d(in_channels, ch3x3red, kernel_size=1),
nn.BatchNorm2d(ch3x3red),
nn.ReLU(inplace=True),
nn.Conv2d(ch3x3red, ch3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(ch3x3),
nn.ReLU(inplace=True)
)
# 1x1 -> 5x5 分支
self.branch3 = nn.Sequential(
nn.Conv2d(in_channels, ch5x5red, kernel_size=1),
nn.BatchNorm2d(ch5x5red),
nn.ReLU(inplace=True),
nn.Conv2d(ch5x5red, ch5x5, kernel_size=5, padding=2),
nn.BatchNorm2d(ch5x5),
nn.ReLU(inplace=True)
)
# 3x3 -> 1x3 分支
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_channels, pool_proj, kernel_size=1),
nn.BatchNorm2d(pool_proj),
nn.ReLU(inplace=True)
)
def forward(self, x):
branch1_output = self.branch1(x)
branch2_output = self.branch2(x)
branch3_output = self.branch3(x)
branch4_output = self.branch4(x)
outputs = [branch1_output, branch2_output, branch3_output, branch4_output]
return torch.cat(outputs, 1)
GoogLeNet模型
class InceptionV1(nn.Module):
def __init__(self, num_classes=1000):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=1, stride=1, padding=0)
self.conv3 = nn.Conv2d(64, 192, kernel_size=3, stride=1, padding=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
self.inception5b = nn.Sequential(
inception_block(832, 384, 192, 384, 48, 128, 128),
nn.AvgPool2d(kernel_size=7, stride=1, padding=0),
nn.Dropout(0.4)
)
# 全连接层
self.classifier = nn.Sequential(
nn.Linear(in_features=1024, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=num_classes),
nn.S