前言:
RPN的主要功能是生成候选边框,并根据loss来进行优化。生成候选边框(proposals)主要是根据生成的锚点(anchors)加上与基准边框(ground truth box)之间的偏差。因此RPN的主要功能是选择靠谱的锚点(anchors),这就需要一个特征图来描述每个锚点的贡献值,1为百分百为含有目标的锚点,0为背景,这就是cls_logits特征图,其通道数为特征图上每个点代表的锚点类型数,特征图的每个点代表锚点在原图上的特定位置,不同的特征图象征不同的锚点大小。同样的我们也要一个衡量边框预测bbox_pred的网络层,不同的是其通道将是cls_logits特征图的4倍。通过这两个结构我们可以进一步进行计算,从而得到预测边框,并计算损失值。
其代码详解为:
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.rpn.retinanet.retinanet import build_retinanet
from .loss import make_rpn_loss_evaluator
from .anchor_generator import make_anchor_generator
from .inference import make_rpn_postprocessor
class RPNHeadConvRegressor(nn.Module):
"""
A simple RPN Head for classification and bbox regression
本类是一个简单的RPN层,用于将从FPN层提取的多层特征图中提取锚点的分类信息以及边框回归信息
"""
def __init__(self, cfg, in_channels, num_anchors):
"""
参数:
cfg : config配置文件(包含所有默认参数)
in_channels (int): number of channels of the input feature输入特征的通道数
num_anchors (int): number of anchors to be predicted特征图上每一个点代表的锚点个数,
让通道数等于锚点个数,每一个通道代表一种锚点的权重
"""
super(RPNHeadConvRegressor, self).__init__()
# 用1*1的卷积核对输入层进行通道调整
# 产生两个分支:锚点的分类信息层、锚点的边框(×4)回归信息
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
# 初始化网络参数
for l in [self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
assert isinstance(x, (list, tuple))
logits = [self.cls_logits(y) for y in x]
bbox_reg = [self.bbox_pred(y) for y in x]
return logits, bbox_reg
class RPNHeadFeatureSingleConv(nn.Module):
"""
Adds a simple RPN Head with one conv to extract the feature
增加一个单一的RPN层,使用卷积操作来提取特征
"""
def __init__(self, cfg, in_channels):
"""
参数:
cfg : config配置文件(包含所有默认参数)
in_channels (int): number of channels of the input feature输入特征的通道数
"""
super(RPNHeadFeatureSingleConv, self).__init__()
# 对输入数据进行卷积操作,不改变其通道
self.conv = nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
# 初始化网络参数
for l in [self.conv]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
self.out_channels = in_channels
def forward(self, x):
assert isinstance(x, (list, tuple))
x = [F.relu(self.conv(z)) for z in x]
return x
@registry.RPN_HEADS.register("SingleConvRPNHead")
class RPNHead(nn.Module):
"""
Adds a simple RPN Head with classification and regression heads
本类是一个简单的RPN层,用于将从FPN层提取的多层特征图中提取锚点的分类信息以及边框回归信息
不同于RPNHeadConvRegressor和RPNHeadFeatureSingleConv,本类更像是二者的结合
先将从Backbone获得的特征图进行特征提取(不改变其通道),再分支为分类信息层和边框回归层网络结构
"""
def __init__(self, cfg, in_channels, num_anchors):
"""
参数:
cfg : config配置文件(包含所有默认参数)
in_channels (int): number of channels of the input feature输入特征的通道数
num_anchors (int): number of anchors to be predicted特征图上每一个点代表的锚点个数,
让通道数等于锚点个数,每一个通道代表一种锚点的权重
"""
super(RPNHead, self).__init__()
# 对输入数据进行卷积操作,不改变其通道,对特征进行进一步提取
self.conv = nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
# 用1*1的卷积核对输入层进行通道调整
# 产生两个分支:锚点的分类信息层、锚点的边框(×4)回归信息
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
# 初始化网络参数
for l in [self.conv, self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
logits = []
bbox_reg = []
for feature in x:
t = F.relu(self.conv(feature))
logits.append(self.cls_logits(t))
bbox_reg.append(self.bbox_pred(t))
return logits, bbox_reg
class RPNModule(torch.nn.Module):
"""
Module for RPN computation. Takes feature maps from the backbone and RPN
proposals and losses. Works for both FPN and non-FPN.
本类为RPN计算的网络模型,从backbone获得特征图并计算预测边框以及相关的损失函数,可用于FPN和非FPN网络结构
"""
def __init__(self, cfg, in_channels):
super(RPNModule, self).__init__()
self.cfg = cfg.clone()
# 获得生成锚点的方法
anchor_generator = make_anchor_generator(cfg)
# 获得RPN层,即锚点的类别得分层和边框回归层
rpn_head = registry.RPN_HEADS[cfg.MODEL.RPN.RPN_HEAD]
head = rpn_head(
cfg, in_channels, anchor_generator.num_anchors_per_location()[0]
)
# 获得边框编码器,主要用于计算边框偏差以及利用偏差计算预测边框
rpn_box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
# 指定获得预测边框的工具类,包括训练阶段和测试阶段
box_selector_train = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=True)
box_selector_test = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=False)
# 指定误差值计算的工具类
loss_evaluator = make_rpn_loss_evaluator(cfg, rpn_box_coder)
# 给私有属性复制
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
"""
参数:
images (ImageList): images for which we want to compute the predictions图片列表
features (list[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels从图片中获得的特征图,用于预测边框。每一个张量
都在列表中,切对应着一个特征层
targets (list[BoxList): ground-truth boxes present in the image (optional)图片中带有的基准边框
返回值:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.获得的预测边框
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.损失函数值
"""
# 通过head获得锚点的类别得分特征图以及锚点的边框回归特征图
objectness, rpn_box_regression = self.head(features)
# 在各个特征图上生成锚点
anchors = self.anchor_generator(images, features)
# 根据训练阶段的不同,执行不同的操作
if self.training:
return self._forward_train(anchors, objectness, rpn_box_regression, targets)
else:
return self._forward_test(anchors, objectness, rpn_box_regression)
def _forward_train(self, anchors, objectness, rpn_box_regression, targets):
if self.cfg.MODEL.RPN_ONLY:
# When training an RPN-only model, the loss is determined by the当处在只有RPN的模式时,损失值有由预测的目标得分层和边框
# predicted objectness and rpn_box_regression values and there is回归层决定,没有必要吧锚点转换为预测边框,这个优化分避免了
# no need to transform the anchors into predicted boxes; this is an没必要的转化
# optimization that avoids the unnecessary transformation.
boxes = anchors
else:
# For end-to-end models, anchors must be transformed into boxes and
# sampled into a training batch.从各个特征层获得预测边框
with torch.no_grad():
# 获得预测边框
boxes = self.box_selector_train(
anchors, objectness, rpn_box_regression, targets
)
# 计算loss值
loss_objectness, loss_rpn_box_reg = self.loss_evaluator(
anchors, objectness, rpn_box_regression, targets
)
losses = {
"loss_objectness": loss_objectness,
"loss_rpn_box_reg": loss_rpn_box_reg,
}
return boxes, losses
# 测试阶段
def _forward_test(self, anchors, objectness, rpn_box_regression):
boxes = self.box_selector_test(anchors, objectness, rpn_box_regression)
if self.cfg.MODEL.RPN_ONLY:
# For end-to-end models, the RPN proposals are an intermediate state
# and don't bother to sort them in decreasing score order. For RPN-only
# models, the proposals are the final output and we return them in
# high-to-low confidence order.
inds = [
box.get_field("objectness").sort(descending=True)[1] for box in boxes
]
boxes = [box[ind] for box, ind in zip(boxes, inds)]
return boxes, {}
def build_rpn(cfg, in_channels):
"""
This gives the gist of it. Not super important because it doesn't change as much
创造RPN名并指定,相应的的参数。
"""
# 当是retinanet时
if cfg.MODEL.RETINANET_ON:
return build_retinanet(cfg, in_channels)
return RPNModule(cfg, in_channels)