Caffe FPN Test流程

本文详细介绍了使用Caffe框架进行目标检测的具体流程,包括网络初始化、数据预处理、前向传播以及结果后处理等关键步骤。通过示例代码解释了如何加载模型、配置参数并进行图像预测。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Test_net.py

if __name__ == '__main__':
    args = parse_args()


    print('Called with args:')
    print(args)


    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)


    cfg.GPU_ID = args.gpu_id


    print('Using config:')
    pprint.pprint(cfg)


    while not os.path.exists(args.caffemodel) and args.wait:
        print('Waiting for {} to exist...'.format(args.caffemodel))
        time.sleep(10)


    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)
    net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)  初始化net,内部调用Net::Init,需要看cpp源码
    net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]


    imdb = get_imdb(args.imdb_name)    与train相同,从set里面取对应的数据集
    imdb.competition_mode(args.comp_mode)
    if not cfg.TEST.HAS_RPN:
        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)  与train相同,决定gt / selective search的方法取roidb


    test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis)

==========================================

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True

            self.config['cleanup'] = True



def test_net(net, imdb, max_per_image=1000, thresh=0.05, vis=False):

  核心函数scores, boxes = im_detect(net, im, box_proposals,imdb.num_classes),其他用来统计mAP等结果,此处省略



def im_detect(net, im, boxes=None,num_classes=21):
    """Detect object classes in an image given object proposals.


    Arguments:
        net (caffe.Net): Fast R-CNN network to use
        im (ndarray): color image to test (in BGR order)
        boxes (ndarray): R x 4 array of object proposals or None (for RPN)


    Returns:
        scores (ndarray): R x K array of object class scores (K includes
            background as object category 0)
        boxes (ndarray): R x (4*K) array of predicted bounding boxes
    """
    blobs, im_scales = _get_blobs(im, boxes)


    # When mapping from image ROIs to feature map ROIs, there's some aliasing
    # (some distinct image ROIs get mapped to the same feature ROI).
    # Here, we identify duplicate feature ROIs, so we only compute features
    # on the unique subset.
    if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
        v = np.array([1, 1e3, 1e6, 1e9, 1e12])
        hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
        _, index, inv_index = np.unique(hashes, return_index=True,
                                        return_inverse=True)
        blobs['rois'] = blobs['rois'][index, :]
        boxes = boxes[index, :]


    if cfg.TEST.HAS_RPN:
        im_blob = blobs['data']
        blobs['im_info'] = np.array(
            [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
            dtype=np.float32)


    # reshape network inputs
    net.blobs['data'].reshape(*(blobs['data'].shape))
    if cfg.TEST.HAS_RPN:
        net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
    else:
        net.blobs['rois'].reshape(*(blobs['rois'].shape))


    # do forward
    forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
    if cfg.TEST.HAS_RPN:
        forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
    else:
        forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
    blobs_out = net.forward(**forward_kwargs)






    if cfg.TEST.HAS_RPN:
        assert len(im_scales) == 1, "Only single-image batch implemented"
        rois = net.blobs['rois'].data.copy()
        # unscale back to raw image space
        boxes = rois[:, 1:5] 
        index= np.where(np.sum(boxes,axis=1)!=0)[0]
        boxes = boxes[index,:]
     
# / im_scales[0]
    if cfg.TEST.SVM:
        # use the raw scores before softmax under the assumption they
        # were trained as linear SVMs
        scores = net.blobs['cls_score'].data
    else:
        # use softmax estimated probabilities
        scores = blobs_out['cls_prob']
        scores = scores[index]


      #  print scores[0:10]
    
    if cfg.TEST.BBOX_REG:
        # Apply bounding-box regression deltas
        box_deltas = blobs_out['bbox_pred']
    
        box_deltas = box_deltas[index,:]
     


        if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
            means = np.tile(
                    np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (num_classes, 1)).ravel()
            stds = np.tile(
                    np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (num_classes, 1)).ravel()
      #  Optionally normalize targets by a precomputed mean and stdev
            box_deltas = box_deltas * stds + means
        


      #  print boxes.shape,box_deltas.shape
        pred_boxes = bbox_transform_inv(boxes, box_deltas)
        s = (blobs['data'].astype(np.float32, copy=False).shape[2],blobs['data'].astype(np.float32, copy=False).shape[3],blobs['data'].astype(np.float32, copy=False).shape[1])
 
        pred_boxes = clip_boxes(pred_boxes, s)
    else:
        # Simply repeat the boxes, once for each class
        pred_boxes = np.tile(boxes, (1, scores.shape[1]))


    if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
        # Map scores and predictions back to the original set of boxes
        scores = scores[inv_index, :]
        pred_boxes = pred_boxes[inv_index, :]


    vis = False
    if vis:
        vis_rois_detection(blobs['data'].astype(np.float32, copy=False),pred_boxes/ im_scales[0])
  


    return scores, pred_boxes/ im_scales[0]


评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值