深度学习源码中经常出现的logits其实和统计中定义的logit函数=log(p/1-p)没什么太大关系,logits可以看成神经网络最后一层全连接的输出*权重参数。该输出一般会再接一个softmax layer输出normalize 后的概率,用于多分类。
pre-logits就是一个全连接层+tanh激活函数。
举个例子
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, (3, 3))
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2 = nn.Conv2d(32, 64, (3, 3))
self.pool2 = nn.MaxPool2d((2, 2))
self.fc1 = nn.Linear(64 * 7 * 7, 128)
self.fc2 = nn.Linear(128, 2)
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = x.view(-1, 64 * 7 * 7)
x = self.fc1(x)
x = self.fc2(x)
return x
model = Net()
# The output of the last layer is the logits.
logits = model(x)
self.fc2(x)
层的输出就是 logits