Faster RCNN中间层可视化

Refer to 点击打开链接



1 Copy and rename the file 'feature_visualize.py' in tools dir;

#!/usr/bin/env python

# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------

"""
Demo script showing detections in sample images.

See README.md for installation instructions before running.
"""

import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
import math

CLASSES = ('__background__',
           'aeroplane', 'bicycle', 'bird', 'boat',
           'bottle', 'bus', 'car', 'cat', 'chair',
           'cow', 'diningtable', 'dog', 'horse',
           'motorbike', 'person', 'pottedplant',
           'sheep', 'sofa', 'train', 'tvmonitor')

NETS = {'vgg16': ('VGG16',
                  'VGG16_faster_rcnn_final.caffemodel'),
        'zf': ('ZF',
                  'ZF_faster_rcnn_final.caffemodel')}


def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor='red', linewidth=3.5)
            )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(class_name, score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

    ax.set_title(('{} detections with '
                  'p({} | box) >= {:.1f}').format(class_name, class_name,
                                                  thresh),
                  fontsize=14)
    plt.axis('off')
    plt.tight_layout()
    #plt.draw()
def save_feature_picture(data, name, image_name=None, padsize = 1, padval = 1):
    data = data[0]
    #print "data.shape1: ", data.shape
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = ((0, n ** 2 - data.shape[0]), (0, 0), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
    #print "padding: ", padding
    data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
    #print "data.shape2: ", data.shape
    
    data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    #print "data.shape3: ", data.shape, n
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
    #print "data.shape4: ", data.shape
    plt.figure()
    plt.imshow(data,cmap='gray')
    plt.axis('off')
    #plt.show()
    if image_name == None:
        img_path = './data/feature_picture/' 
    else:
        img_path = './data/feature_picture/' + image_name + "/"
        check_file(img_path)
    plt.savefig(img_path + name + ".jpg", dpi = 400, bbox_inches = "tight")
def check_file(path):
    if not os.path.exists(path):
        os.mkdir(path)
def demo(net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im)
    for k, v in net.blobs.items():
        if k.find("conv")>-1 or k.find("pool")>-1 or k.find("rpn")>-1:
            save_feature_picture(v.data, k.replace("/", ""), image_name)#net.blobs["conv1_1"].data, "conv1_1") 
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1 # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(im, cls, dets, thresh=CONF_THRESH)

def parse_args():
    """Parse input arguments."""
    parser = argparse.ArgumentParser(description='Faster R-CNN demo')
    parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
                        default=0, type=int)
    parser.add_argument('--cpu', dest='cpu_mode',
                        help='Use CPU mode (overrides --gpu)',
                        action='store_true')
    parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
                        choices=NETS.keys(), default='vgg16')

    args = parser.parse_args()

    return args

def print_param(net):
    for k, v in net.blobs.items():
	print (k, v.data.shape)
    print ""
    for k, v in net.params.items():
	print (k, v[0].data.shape)  

if __name__ == '__main__':
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    args = parse_args()

    prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
                            'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
    #print "prototxt: ", prototxt
    caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
                              NETS[args.demo_net][1])

    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))

    if args.cpu_mode:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()
        caffe.set_device(args.gpu_id)
        cfg.GPU_ID = args.gpu_id
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)
    
    #print_param(net)

    print '\n\nLoaded network {:s}'.format(caffemodel)

    # Warmup on a dummy image
    im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    for i in xrange(2):
        _, _= im_detect(net, im)

    im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
                '001763.jpg', '004545.jpg']
    for im_name in im_names:
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Demo for data/demo/{}'.format(im_name)
        demo(net, im_name)

    #plt.show()




2 Set your own info, e.g. CLASSES, NETS, im_file, im_names ;




3 Open terminal at root-dir and run:
$ python tools/feature_visualize.py 




4 Visual images are saved in data/feature_picture .




### 目标检测模型中间层可视化方法 目标检测模型的中间层可视化通常涉及提取特定卷积层的输出并将其转化为可理解的形式。以下是实现这一过程的具体方式: #### 1. 使用钩子函数捕获特征图 PyTorch 提供了一种灵活的方式,即通过注册 `hook` 函数来捕获神经网络中的中间层输出。具体来说,可以利用 `register_forward_hook()` 方法将指定的卷积层输出保存下来[^2]。 ```python import torch from torchvision import models def forward_hook(module, input, output): global features features = output.detach() # 加载预训练的目标检测模型 model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True) # 注册 hook 到某一层 (例如 backbone 的 conv 层) model.backbone.body.layer2[-1].register_forward_hook(forward_hook) # 前向传播输入张量 input_tensor = torch.rand(1, 3, 224, 224) _ = model(input_tensor) print(features.shape) # 输出该层的特征图形状 ``` 上述代码展示了如何为 Faster R-CNN 模型的某一卷积层设置前向传播钩子,并存储其输出以便后续分析。 --- #### 2. 查阅相关卷积层名称 为了确定需要可视化的具体卷积层,可以通过遍历模型参数或结构来查找合适的层名。这一步骤对于不同框架下的模型尤为重要。 ```python for name, param in model.named_parameters(): print(name) ``` 此脚本会打印出整个模型的所有参数及其对应的层次路径,从而帮助开发者精确定位感兴趣的卷积层位置。 --- #### 3. 特征图的颜色映射与叠加 一旦获得了所需的特征图数据,下一步就是对其进行颜色编码并与原始图像叠加形成最终的可视化效果。常见的做法是先对特征图取平均值或者最大响应值,再归一化至 `[0, 1]` 范围内,接着应用伪彩色调色板完成渲染[^1]。 ```python import matplotlib.pyplot as plt import numpy as np # 将特征图转为 NumPy 数组 feature_map = features.squeeze().cpu().numpy() avg_feature_map = feature_map.mean(axis=0) # 对通道维度求均值 # 归一化处理 normalized_map = (avg_feature_map - avg_feature_map.min()) / \ (avg_feature_map.max() - avg_feature_map.min()) # 显示热力图 plt.imshow(normalized_map, cmap='jet') plt.axis('off') # 关闭坐标轴 plt.show() ``` 如果希望进一步增强直观感受,则还可以尝试把生成的热力图叠加上实际图片之上。 --- #### 4. 实际应用场景实例——遥感图像目标检测 在某些领域比如遥感数据分析中,这种技术被广泛应用于提升算法性能评估效率以及辅助人工审核工作流之中[^3]。例如,在电力设施巡检场景下,通过对输电线路上的关键设备(如铁塔)实施精确标注之后绘制边界框GeoJSON文件形式表达出来;与此同时借助地理信息系统工具库Geopandas执行空间矢量化操作进而得到标准化COCO格式样本集合用于机器学习训练阶段准备素材之需。 --- ### 总结 综上所述,目标检测模型中间层可视化主要包括以下几个方面:一是运用 PyTorch 钩子机制截取所需特征信息;二是探索适合自己的卷积单元作为观察对象;三是设计合理的色彩呈现策略使得抽象数值变得易于解读;四是结合行业需求打造定制化解决方案服务于实践环节当中去。
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值