基于点云分割网络RandLA-Net的端到端部署

基于点云分割网络RandLA-Net的端到端部署

准备工作

  1. 基于开源代码仓库地址:https://github.com/liuxuexun/RandLA-Net-Pytorch-New.git。
  2. 已经基于该库复现了s3dis公共数据集或者自己的s3dis格式数据集,并导出了模型checkpoint.tar格式。自定义数据集标注,制作成s3dis格式以及进行完整训练将在下一篇文章进行详细介绍。
  3. 熟悉RandLA-Net训练策略。

完整推理过程

准备:

# 换成你训练的类别
gt_class2label = {0: 'back', 1: 'box'} 
label_values = np.sort([k for k, v in gt_class2label.items()])
# 训练好的模型
model_path = r'./checkpoint.tar'
# 推理点云文件,即在网络训练时输入的点云格式
point_path = r'./Scene_1.txt'
# 推理前准备操作
device = torch.device('cuda:0')
net = Network(cfg)
optimizer = Adam(net.parameters(), lr=cfg.learning_rate)
checkpoint = torch.load(model_path, map_location='cpu')
net.load_state_dict(checkpoint['model_state_dict'])
net.to(device)
net.eval()
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

数据载入:


pc = np.loadtxt(r"./Scene_1.txt")
xyz_min = np.min(pc, axis=0)[0:3]
xyz, colors = (pc[:, :3] - xyz_min).astype(np.float32), pc[:, 3:6].astype(np.float32)
# 我的数据grid_sub_sampling操作前后数据一样,windows暂且使用该方式
sub_xyz, sub_colors = xyz, colors
# linux 使用该方式
# sub_xyz, sub_colors = DP.grid_sub_sampling(points=xyz, features=colors, grid_size=cfg.sub_grid_size)

# 替换KDTree使用cKDTree加速
search_tree = cKDTree(sub_xyz)
sub_colors = sub_colors / 255.0
input_trees = search_tree
input_colors = sub_colors
input_names = 'infer'
proj_idx = search_tree.query(xyz, k=1)[1].squeeze()
# 获得每个点云的最近邻索引
val_proj = proj_idx.astype(np.int32)

# 初始化每个点云属于每个类别的概率
test_probs = [np.zeros(shape=[input_colors.shape[0], len(gt_class2label)], dtype=np.float32)]

预处理过程:

# 基本同仓库代码一样
def tf_map(batch_xyz, batch_features, batch_pc_idx, batch_cloud_idx):
    batch_features = np.concatenate([batch_xyz, batch_features], axis=-1)
    input_points = []
    input_neighbors = []
    input_pools = []
    input_up_samples = []
    for i in range(cfg.num_layers):
        neighbour_idx = DP.knn_search(batch_xyz, batch_xyz, cfg.k_n)
        sub_points = batch_xyz[:, :batch_xyz.shape[1] // cfg.sub_sampling_ratio[i], :]
        pool_i = neighbour_idx[:, :batch_xyz.shape[1] // cfg.sub_sampling_ratio[i], :]
        # 这里进行了knn优化加速
        up_i = DP.knn_search(sub_points, batch_xyz, 1)

        input_points.append(batch_xyz)
        input_neighbors.append(neighbour_idx)
        input_pools.append(pool_i)
        input_up_samples.append(up_i)
        batch_xyz = sub_points

    input_list = input_points + input_neighbors + input_pools + input_up_samples
    input_list += [batch_features, batch_pc_idx, batch_cloud_idx]
    return input_list
    
def _poss_choose(possibility):
	'''
    @param  possibility: 每次遍历所有点选中的可能性
    @return:
    possibility: 这次的可能性
    queried_pc_xyz: 这次选取的点的xyz
    queried_pc_colors: 对应的feature
    queried_idx: 对应点的索引
    cloud_idx: 哪个点云,这里推理只有一个即0
	'''
	# 获取最小概率(做推理结束阈值用,后续我用的循环次数)
    min_possibility = float(np.min(possibility))
    cloud_idx = int(np.argmin(min_possibility))
    point_ind = np.argmin(possibility)

    center_point = points[point_ind, :].reshape(1, -1)
    noise = np.random.normal(scale=cfg.noise_init / 10, size=center_point.shape)
    pick_point = center_point + noise.astype(center_point.dtype)

    if len(points) < cfg.num_points:
        queried_idx = input_trees.query(pick_point, k=len(points))[1][0]
    else:
        queried_idx = input_trees.query(pick_point, k=cfg.num_points)[1][0]

    queried_idx = DP.shuffle_idx(queried_idx)
    queried_pc_xyz = points[queried_idx]
    queried_pc_xyz = queried_pc_xyz - pick_point
    queried_pc_colors = input_colors[queried_idx]

    dists = np.sum(np.square((points[queried_idx] - pick_point).astype(np.float32)), axis=1)
    delta = np.square(1 - dists / np.max(dists))
    possibility[queried_idx] += delta
    min_possibility = float(np.min(possibility))

    return possibility, queried_pc_xyz, queried_pc_colors, queried_idx, cloud_idx


def _process(queried_pc_xyz, queried_pc_colors,queried_idx, cloud_idx, idx):
	'''
    @param queried_pc_xyz: 这次选取的点的xyz
    @param queried_pc_colors: 对应的feature
    @param queried_idx: 对应点的索引
    @param cloud_idx: 哪个点云,这里推理只有一个即0
    @param idx: 第几次随机选点
    @return: poss_thread_list 全局列表,总随机选点的数据进行模型推理
    '''
    if len(points) < cfg.num_points:
        queried_pc_xyz, queried_pc_colors, queried_idx = \
            DP.data_aug_no_label(queried_pc_xyz, queried_pc_colors, queried_idx, cfg.num_points)

    queried_pc_xyz = np.array(queried_pc_xyz, dtype=np.float32)
    queried_pc_colors = np.array(queried_pc_colors, dtype=np.float32)

    selected_idx = np.expand_dims(np.array(queried_idx, dtype=np.float32), axis=0)
    cloud_ind = np.expand_dims(np.array([cloud_idx], dtype=np.float32), axis=0)
    selected_pc = np.expand_dims(np.concatenate((queried_pc_xyz, queried_pc_colors), axis=1), axis=0)
    selected_xyz = selected_pc[:, :, 0:3]
    selected_features = selected_pc[:, :, 3:6]
    flat_inputs = tf_map(selected_xyz, selected_features, selected_idx, cloud_ind)

    num_layers = cfg.num_layers
    inputs = {}
    inputs['xyz'] = [torch.from_numpy(tmp).float() for tmp in flat_inputs[:num_layers]]
    inputs['neigh_idx'] = [torch.from_numpy(tmp).long() for tmp in flat_inputs[num_layers: 2 * num_layers]]
    inputs['sub_idx'] = [torch.from_numpy(tmp).long() for tmp in flat_inputs[2 * num_layers:3 * num_layers]]
    inputs['interp_idx'] = [torch.from_numpy(tmp).long() for tmp in flat_inputs[3 * num_layers:4 * num_layers]]
    inputs['features'] = torch.from_numpy(flat_inputs[4 * num_layers]).float()
    inputs['input_inds'] = torch.from_numpy(flat_inputs[4 * num_layers + 1]).long()
    inputs['cloud_inds'] = torch.from_numpy(flat_inputs[4 * num_layers + 2]).long()
    poss_thread_list[idx] = inputs
    
# 初始每个点云选中的概率
possibility = np.random.rand(input_colors.data.shape[0])
points = np.array(input_trees.data)
poss_thread_list = [None] * cfg.val_steps
# 线程加速总的随机选点
for idx in range(cfg.val_steps):
    possibility, queried_pc_xyz, queried_pc_colors, queried_idx, cloud_idx = _poss_choose(possibility)
    start_thread(_process,(queried_pc_xyz, queried_pc_colors, queried_idx, cloud_idx, idx))
    
while True:
    if None not in poss_thread_list:
        break # 总的选取数据获取完毕,即预处理完成
    time.sleep(0.01)

模型推理:

# 遍历每次随机选取的点云数据
for batch_data in poss_thread_list:
       for key in batch_data:
           if isinstance(batch_data[key], list):
               batch_data[key] = [d.to(device) for d in batch_data[key]]
           else:
               batch_data[key] = batch_data[key].to(device)
       with torch.no_grad():
       	   #模型推理
           end_points = net(batch_data)

后处理及可视化:

logits = end_points['logits']
stacked_probs = logits.transpose(1, 2).reshape(-1, cfg.num_classes)
point_idx = end_points['input_inds'].cpu().numpy()
cloud_idx = end_points['cloud_inds'].cpu().numpy()
stacked_probs = torch.reshape(stacked_probs, [cfg.val_batch_size, cfg.num_points, cfg.num_classes])
stacked_probs = stacked_probs.cpu().numpy()
stacked_probs = softmax(stacked_probs, axis=2)
test_smooth = 0.98
for j in range(np.shape(stacked_probs)[0]):
    probs = stacked_probs[j, :, :]
    p_idx = point_idx[j, :]
    c_i = cloud_idx[j][0]
    test_probs[c_i][p_idx] = test_smooth * test_probs[c_i][p_idx] + (1 - test_smooth) * probs

proj_probs_list = []
proj_idx = val_proj
probs = test_probs[0][proj_idx, :]
proj_probs_list += [probs]

preds = label_values[np.argmax(proj_probs_list[0], axis=1)].astype(np.uint8)
# 可视化,我的数据两类两种配色
plot_colors = [
   (0.0, 0.0, 0.9461538461538463),
   (0.0, 1.0, 0.2846153846153845)]

Plot.draw_pc_sem_ins(xyz, preds, plot_colors)

完整代码获取

该框架推理完整的代码仓库:https://github.com/crisp9999/RandLA-Net-Pytorch-Inference.git,如有问题可以在这里留言,或者去提issue并且如果对你有帮助可以给个star!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值