关于YOLO的理论知识个人转载的一篇博客有具体讲解:点击打开链接,本文主要对YOLO的回归函数与代码对应起来讲解,便于更加深入的理解YOLO的实现过程。
if(state.train){
float avg_iou = 0;
float avg_cat = 0;
float avg_allcat = 0;
float avg_obj = 0;
float avg_anyobj = 0;
int count = 0;
*(l.cost) = 0;
int size = l.inputs * l.batch;
memset(l.delta, 0, size * sizeof(float));
for (b = 0; b < l.batch; ++b){
int index = b*l.inputs;
for (i = 0; i < locations; ++i) {
int truth_index = (b*locations + i)*(1+l.coords+l.classes);
int is_obj = state.truth[truth_index];//表示这个cell是否是object,数据存放在state里面
for (j = 0; j < l.n; ++j) {
int p_index = index + locations*l.classes + i*l.n + j;
l.delta[p_index] = l.noobject_scale*(0 - l.output[p_index]);//没有目标物体项
*(l.cost) += l.noobject_scale*pow(l.output[p_index], 2);
avg_anyobj += l.output[p_index];
}
int best_index = -1;
float best_iou = 0;
float best_rmse = 20;
if (!is_obj){
continue;
}
//类别项
int class_index = index + i*l.classes;
for(j = 0; j < l.classes; ++j) {
l.delta[class_index+j] = l.class_scale * (state.truth[truth_index+1+j] - l.output[class_index+j]);
*(l.cost) += l.class_scale * pow(state.truth[truth_index+1+j] - l.output[class_index+j], 2);
if(state.truth[truth_index + 1 + j]) avg_cat += l.output[class_index+j];
avg_allcat += l.output[class_index+j];
}
box truth = float_to_box(state.truth + truth_index + 1 + l.classes);
truth.x /= l.side;
truth.y /= l.side;
for(j = 0; j < l.n; ++j){
int box_index = index + locations*(l.classes + l.n) + (i*l.n + j) * l.coords;
box out = float_to_box(l.output + box_index);
out.x /= l.side;
out.y /= l.side;
if (l.sqrt){
out.w = out.w*out.w;
out.h = out.h*out.h;
}
float iou = box_iou(out, truth);
//iou = 0;
float rmse = box_rmse(out, truth);
if(best_iou > 0 || iou > 0){
if(iou > best_iou){
best_iou = iou;
best_index = j;
}
}else{
if(rmse < best_rmse){
best_rmse = rmse;
best_index = j;
}
}
}
if(l.forced){
if(truth.w*truth.h < .1){
best_index = 1;
}else{
best_index = 0;
}
}
if(l.random && *(state.net.seen) < 64000){
best_index = rand()%l.n;
}
int box_index = index + locations*(l.classes + l.n) + (i*l.n + best_index) * l.coords;
int tbox_index = truth_index + 1 + l.classes;
box out = float_to_box(l.output + box_index);
out.x /= l.side;
out.y /= l.side;
if (l.sqrt) {
out.w = out.w*out.w;
out.h = out.h*out.h;
}
float iou = box_iou(out, truth);
//printf("%d,", best_index);
//对应于论文里面的第三项
int p_index = index + locations*l.classes + i*l.n + best_index;//这里揭开前面的问题,这里相当于已经判定是objcet,就把之前算在里面noobjcet的减掉
*(l.cost) -= l.noobject_scale * pow(l.output[p_index], 2);
*(l.cost) += l.object_scale * pow(1-l.output[p_index], 2);
avg_obj += l.output[p_index];
l.delta[p_index] = l.object_scale * (1.-l.output[p_index]);//这里就覆盖到之前被noobject赋值的数
if(l.rescore){
l.delta[p_index] = l.object_scale * (iou - l.output[p_index]);
}
//下面的四个对应于论文里面损失的第一项和第二项, 即坐标的回归。
l.delta[box_index+0] = l.coord_scale*(state.truth[tbox_index + 0] - l.output[box_index + 0]);//deltax
l.delta[box_index+1] = l.coord_scale*(state.truth[tbox_index + 1] - l.output[box_index + 1]);//deltay
l.delta[box_index+2] = l.coord_scale*(state.truth[tbox_index + 2] - l.output[box_index + 2]);//deltaw
l.delta[box_index+3] = l.coord_scale*(state.truth[tbox_index + 3] - l.output[box_index + 3]);//deltah
if(l.sqrt){
l.delta[box_index+2] = l.coord_scale*(sqrt(state.truth[tbox_index + 2]) - l.output[box_index + 2]);//sqrt-deltaw;
l.delta[box_index+3] = l.coord_scale*(sqrt(state.truth[tbox_index + 3]) - l.output[box_index + 3]);//sqrt-deltah;
}
*(l.cost) += pow(1-iou, 2);
avg_iou += iou;
++count;
}
}
从detect_layer.c中网络训练的代码发现,目标物体的GT维度为25,分别为类别维度(20),置信度(1),坐标(4);yolo实质上每格小格只预测一个box,因为仅选择预测到的两个box中IOU较大的作为最终的box回归。通过这种回归模式,显然对于目标物体存在重叠度较大,或者目标物体较为密集的情形下,YOLO的这种预测模式会存在较大的漏检,效果有待进一步改进。网络输出数据格式和标签的格式如下图所示:
若有不当之处,请指教,谢谢!