再次更新:出现这个问题 大家可以看看自己的数据集在训练集和测试集 我就是的 多了一个空白的文件夹 才导致着也得错误
首先先感谢 这为老哥带给我启发 为照顾和我一样的新手小白 采用喂饭式教程
(5条消息) 解决IndexError: Target 11 is out of bounds.问题_weixin_55191433的博客-优快云博客
在之前的视频中 这位博主的模型一直跑不通 会出现Target 5 is out of bounds 的问题 困扰了我好久在解决完了就有了这个文章
在Alex模型中对他进行分类 原模型为
之后我们将他只改一个值
就可以正常跑通了 我们之前写的num_classes=1000是不行的 应为我们只分为5类 至于为啥是6而不是5 我确实不知道 后面会进行相关了解 也希望了解的人可以在评论区告诉大家和我
在举一个例子在 Google模型中 我们不仅要把所有的全连接层改了如图
我直接写了一个变量 省的麻烦 但是这里6不行 要一个个试最后8才可以
辅助分类器也要改
最后把我的写的源码附上
import torch
from torch import nn
import torch.nn.functional as F
class GoogLeNet(nn.Module):
def __init__(self,num_classes=5,aux_logits=True,init_weights=False,linear_num_classes=8):# 分类类别 是否使用辅助分类器 是否初始化参数
super(GoogLeNet, self).__init__()
self.aux_logits = aux_logits
# input 224 * 224 output 112 * 112
self.conv1 = BasicConv2d(3, 64,kernel_size=7,stride= 2, padding=3)
self.maxpool1 = nn.MaxPool2d(kernel_size=3 ,stride=2,ceil_mode=True) # ceil_mode=True 有小数时向上取整
self.conv2 = BasicConv2d(64,64,kernel_size=1)
self.conv3 = BasicConv2d(64,192,kernel_size=3,padding=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=3,stride=2,ceil_mode=True)
self.inception3a = Inception(192,64,96,128,16,32,32)
self.inception3b = Inception(256,128,128,192,32,96,64)
self.maxpool3 = nn.MaxPool2d(kernel_size=3,stride=2,ceil_mode=True)
self.inception4a = Inception(480,192,96,208,16,48,64)
self.inception4b = Inception(512,160,112,224,24,64,64)
self.inception4c = Inception(512,128,128,256,24,64,64)
self.inception4d = Inception(512,112,144,288,32,64,64)
self.inception4e = Inception(528,256,160,320,32,128,128)
self.maxpool4 = nn.MaxPool2d(kernel_size=3,stride=2,ceil_mode=True)
self.inception5a = Inception(832,256,160,320,32,128,128)
self.inception5b = Inception(832,384,192,384,48,128,128)
if self.aux_logits:
self.aux1 = InceptionAux(512,linear_num_classes)
self.aux2 = InceptionAux(528,linear_num_classes)
self.averagepool1 = nn.AdaptiveAvgPool2d((1,1)) #自适应的平均下采样可以将不同大小的图片转换为我们指定的大小
self.dropout = nn.Dropout(0.4)
self.fc = nn.Linear(1024,linear_num_classes)
if init_weights:
self._initialize_weights()
def forward(self,x):
x = self.conv1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.maxpool2(x)
x = self.inception3a(x)
x = self.inception3b(x)
x = self.maxpool3(x)
x = self.inception4a(x)
if self.aux_logits and self.training : # 判断是否是训练层 是才启用辅助分类器
aux1 = self.aux1(x) # 得到分类器1的结果
x = self.inception4b(x)
x = self.inception4c(x)
x = self.inception4d(x)
if self.training and self.aux_logits :
aux2 = self.aux2(x)
x = self.inception4e(x)
x = self.maxpool4(x)
x = self.inception5a(x)
x = self.inception5b(x)
# n x 1024 x 7 x 7
x = self.averagepool1(x)
# n x 1024 x 1 x 1
x = torch.flatten(x,1)
x = self.dropout(x)
x = self.fc(x)
# n x 1000(num_classes)
if self.training and self.aux_logits:
return x , aux2, aux1 # 在训练集返回
return x # 在测试集返回
def _initialize_weights(self):
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias,0)
elif isinstance(m,nn.Linear):
nn.init.normal_(m.weight,0,0.01)
nn.init.constant_(m.bias,0)
class Inception(nn.Module):
def __init__(self,in_channels,ch1x1,ch3x3red,ch3x3,ch5x5red,ch5x5,pool_proj):
super(Inception, self).__init__()
self.banch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
self.banch2 = nn.Sequential(
BasicConv2d(in_channels,ch3x3red,kernel_size=1),
BasicConv2d(ch3x3red,ch3x3,kernel_size=3,padding=1))
self.banch3 = nn.Sequential(
BasicConv2d(in_channels,ch5x5red,kernel_size=1),
BasicConv2d(ch5x5red,ch5x5,kernel_size=5,padding=2))
self.banch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3,stride=1,padding=1),
BasicConv2d(in_channels,pool_proj,kernel_size=1))
def forward(self,x):
banch1 = self.banch1(x)
banch2 = self.banch2(x)
banch3 = self.banch3(x)
banch4 = self.banch4(x)
outputs = [banch1,banch2,banch3,banch4]
return torch.cat(outputs, 1) # 将他们以深度的方式连接在一起
class InceptionAux(nn.Module):
def __init__(self,in_channels,linear_num_classes):
super(InceptionAux, self).__init__()
self.averagePool = nn.AvgPool2d(kernel_size=5,stride=3)
self.conv = BasicConv2d(in_channels,128,kernel_size=1) # 输出【batch,128,4,4】
self.fc1 = nn.Linear(2048,1024)
self.fc2 = nn.Linear(1024,linear_num_classes)
def forward(self,x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = self.averagePool(x)
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
x = F.dropout(x, 0.5, training=self.training)
# N x 2048
# x = F.relu(self.fc1(x), inplace=True)
x = F.relu(self.fc1(x), inplace=True)
x = F.dropout(x, 0.5, training=self.training)
# N x 1024
x = self.fc2(x)
# N x num_classes
return x
# # n * 512 * 14 * 14
# x = self.averagePool(x)
# # n * 512 * 4 *4
# x = self.conv(x)
# x = torch.flatten(x,1) # 代表从维度开始展平
# x = F.dropout(x,0.5,training=self.training)
# x = F.relu(self.fc1(x),inplace=True)
# x = F.dropout(x,0.5,training=self.training)
# x = self.fc2(x)
# return x
class BasicConv2d(nn.Module):
def __init__(self,in_channels,out_channels,**kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels,out_channels, **kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x