【点云常用代码】

本文介绍了如何使用Python在点云数据上进行归一化处理,使其位于单位球内,以及如何进行随机采样(包括两次采样和随机打乱点云),同时包含随机噪声添加的方法。这些技术主要用于点云数据预处理,以适应机器学习和计算机视觉任务。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

归一化到单位球

参考:https://github.com/yanx27/Pointnet_Pointnet2_pytorch

data_utils/ModelNetDataLoader.py
# 对单个样本处理,在dataset的__getitem__函数中。
point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])

def pc_normalize(pc):
    centroid = np.mean(pc, axis=0)
    pc = pc - centroid
    m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
    pc = pc / m
    return pc

# 版本2
def normalize_points(points):
    r"""Normalize point cloud to a unit sphere at origin."""
    points = points - points.mean(axis=0)
    points = points / np.max(np.linalg.norm(points, axis=1))
    return points

采样方法

随机采样

从给定的点云数据中随机采样指定数量的点,并返回采样后的点云数据。

如果 num_points 大于 num_samples,则选择前 num_samples 个索引,表示从中随机抽样 num_samples 个点;否则,需要重复抽样直到达到 num_samples 个点。
【来源:GEOtransformer-dataset】
if self.twice_sample:
    # twice sample on both point clouds
    ref_points, ref_normals = random_sample_points(ref_points, self.num_points, normals=ref_normals)
    src_points, src_normals = random_sample_points(src_points, self.num_points, normals=src_normals)

def random_sample_points(points, num_samples, normals=None):
    r"""Randomly sample points."""
    num_points = points.shape[0]
    sel_indices = np.random.permutation(num_points)
    if num_points > num_samples:
        sel_indices = sel_indices[:num_samples]
    elif num_points < num_samples:
        num_iterations = num_samples // num_points
        num_paddings = num_samples % num_points
        all_sel_indices = [sel_indices for _ in range(num_iterations)]
        if num_paddings > 0:
            all_sel_indices.append(sel_indices[:num_paddings])
        sel_indices = np.concatenate(all_sel_indices, axis=0)
    points = points[sel_indices]
    if normals is not None:
        normals = normals[sel_indices]
        return points, normals
    else:
        return points

最远点采样

让采样尽可能覆盖全局。
来源:https://github.com/yanx27/Pointnet_Pointnet2_pytorch
从1024个点采样到512个点的索引值,根据索引的位置,在原始点云中,找到这512个点。

单个样本的最远点采样

def farthest_point_sample(point, npoint):
    """
    Input:
        xyz: pointcloud data, [N, D]
        npoint: number of samples
    Return:
        xyz: sampled pointcloud, [npoint, D]
    """
    N, D = point.shape
    xyz = point[:,:3]
    centroids = np.zeros((npoint,))
    distance = np.ones((N,)) * 1e10
    farthest = np.random.randint(0, N)
    for i in range(npoint):
        centroids[i] = farthest
        centroid = xyz[farthest, :]
        dist = np.sum((xyz - centroid) ** 2, -1)
        mask = dist < distance
        distance[mask] = dist[mask]
        farthest = np.argmax(distance, -1)
    point = point[centroids.astype(np.int32)]
    return point

带batchsize的最远点采样

new_xyz = index_points(xyz, farthest_point_sample(xyz, S))
def farthest_point_sample(xyz, npoint):
    """
    例如:从1024个点采样到512个,这里为512个点的索引值。
    Input:
        xyz: pointcloud data, [B, N, 3]
        npoint: number of samples
    Return:
        centroids: sampled pointcloud index, [B, npoint]
    """
    device = xyz.device
    B, N, C = xyz.shape
    centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
    distance = torch.ones(B, N).to(device) * 1e10
    farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) # 第一个采样点选随机初始化的索引
    batch_indices = torch.arange(B, dtype=torch.long).to(device)
    for i in range(npoint):
        centroids[:, i] = farthest
        centroid = xyz[batch_indices, farthest, :].view(B, 1, 3) # 得到当前采样点的坐标 B*3
        dist = torch.sum((xyz - centroid) ** 2, -1) # 计算当前采样点与其他点的距离
        mask = dist < distance # 选择距离最近的来更新距离(更新维护这个表)
        distance[mask] = dist[mask]
        farthest = torch.max(distance, -1)[1] # 重新计算得到最远点索引(在更新的表中选择距离最大的那个点)
    return centroids
def index_points(points, idx):
    """
	根据索引的位置,在原始点云中,找到这512个点。
    Input:
        points: input points data, [B, N, C]
        idx: sample index data, [B, S]
    Return:
        new_points:, indexed points data, [B, S, C]
    """
    device = points.device
    B = points.shape[0]
    view_shape = list(idx.shape)
    view_shape[1:] = [1] * (len(view_shape) - 1)
    repeat_shape = list(idx.shape)
    repeat_shape[0] = 1
    batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
    new_points = points[batch_indices, idx, :]
    return new_points

球查询

参考:参考:https://github.com/yanx27/Pointnet_Pointnet2_pytorch

models/pointnet2_utils.py
group_idx = query_ball_point(radius, K, xyz, new_xyz) # 返回的是索引
grouped_xyz = index_points(xyz, group_idx) # 得到各个组中实际点
grouped_xyz -= new_xyz.view(B, S, 1, C) # 去mean,new_xyz相当于簇的中心点



def query_ball_point(radius, nsample, xyz, new_xyz):
    """
    Input:
        radius: local region radius
        nsample: max sample number in local region
        xyz: all points, [B, N, 3]
        new_xyz: query points, [B, S, 3]
    Return:
        group_idx: grouped points index, [B, S, nsample]  #[B,512,16]
    """
    device = xyz.device
    B, N, C = xyz.shape
    _, S, _ = new_xyz.shape
    group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
    sqrdists = square_distance(new_xyz, xyz) # 得到B N M ,(就是N个点中每一个和M中每一的欧氏距离)
    group_idx[sqrdists > radius ** 2] = N  # 找到距离大于给定半径的设置成一个N值(1024)索引
    group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample] # 做升序排序,后面的都是大的值(1024)
    group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample]) # 如果半径内的点没那么多,就直接用第一个点来代替了。
    mask = group_idx == N
    group_idx[mask] = group_first[mask]
    return group_idx


def square_distance(src, dst):
    """
    Calculate Euclid distance between each two points.

    src^T * dst = xn * xm + yn * ym + zn * zm;
    sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
    sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
    dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
         = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst

    Input:
        src: source points, [B, N, C]
        dst: target points, [B, M, C]
    Output:
        dist: per-point square distance, [B, N, M]
    """
    B, N, _ = src.shape
    _, M, _ = dst.shape
    dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
    dist += torch.sum(src ** 2, -1).view(B, N, 1)
    dist += torch.sum(dst ** 2, -1).view(B, 1, M)
    return dist

噪声

# random jitter
if self.noise_magnitude is not None:
    ref_points = random_jitter_points(ref_points, scale=0.01, noise_magnitude=self.noise_magnitude)
    src_points = random_jitter_points(src_points, scale=0.01, noise_magnitude=self.noise_magnitude)

def random_jitter_points(points, scale, noise_magnitude=0.05):
    r"""Randomly jitter point cloud."""
    noises = np.clip(np.random.normal(scale=scale, size=points.shape), a_min=-noise_magnitude, a_max=noise_magnitude)
    points = points + noises
    return points

打乱点云

# random shuffle
ref_points, ref_normals = random_shuffle_points(ref_points, normals=ref_normals)
src_points, src_normals = random_shuffle_points(src_points, normals=src_normals)

def random_shuffle_points(points, normals=None):
    r"""Randomly permute point cloud."""
    indices = np.random.permutation(points.shape[0])
    points = points[indices]
    if normals is not None:
        normals = normals[indices]
        return points, normals
    else:
        return points
<think>好的,我需要帮助用户寻找点云补全的代码示例。首先,用户提到的点云补全应该是指通过深度学习模型来补全不完整的3D点云数据。根据用户提供的引用内容,特别是引用[3],里面提到了分割网络的结构,其中结合了局部和全局特征,这可能与点云补全有关联。 首先,我应该确认点云补全的常用方法。根据引用[2],这是一篇关于点云理解的深度学习综述,可能提到了一些模型,比如PointNet、PointNet++、PCN(Point Completion Network)等。PCN是专门用于点云补全的模型,用户可能需要这样的例子。 接下来,用户需要代码示例。我应该推荐使用流行的深度学习框架,如PyTorch或TensorFlow,并提供简单的代码结构。例如,PCN的模型架构通常包括编码器和解码器,编码器提取全局特征,解码器生成完整的点云。 还需要考虑数据集的加载,比如ShapeNet数据集,这是点云补全常用的数据集。代码示例应包括数据预处理、模型定义、训练循环等部分。同时,引用[3]中提到的特征拼接方法可能可以应用到解码器中,以结合局部和全局信息,提高补全效果。 另外,用户提供的引用[3]中的方法是将全局特征与每个点的特征拼接,这样每个点都包含局部和全局信息。在点云补全中,这可能用于解码阶段,生成更准确的点云结构。 最后,要确保代码示例的可行性,建议用户安装必要的库,如PyTorch3D或Open3D,并提供参考链接或文献,比如引用[2]和引用[3]中的方法,帮助用户进一步研究。</think>以下是一个基于深度学习的点云补全代码示例框架,使用PyTorch实现。该框架参考了PCN(Point Completion Network)架构[^2][^3],并包含局部与全局特征融合的关键步骤: ```python import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader class PointCompletionNetwork(nn.Module): def __init__(self, input_dim=3, output_points=2048): super().__init__() # 编码器 self.encoder = nn.Sequential( nn.Conv1d(input_dim, 64, 1), nn.BatchNorm1d(64), nn.ReLU(), nn.Conv1d(64, 128, 1), nn.BatchNorm1d(128), nn.ReLU(), nn.Conv1d(128, 256, 1), nn.BatchNorm1d(256), nn.ReLU() ) # 全局特征提取 self.global_feat = nn.Sequential( nn.Linear(256, 512), nn.BatchNorm1d(512), nn.ReLU(), nn.Linear(512, 256) ) # 解码器 self.decoder = nn.Sequential( nn.Conv1d(256 + 3, 256, 1), # 结合全局特征与坐标 nn.BatchNorm1d(256), nn.ReLU(), nn.Conv1d(256, 128, 1), nn.BatchNorm1d(128), nn.ReLU(), nn.Conv1d(128, 3, 1) ) self.output_points = output_points def forward(self, x): batch_size = x.size(0) # 编码局部特征 local_feat = self.encoder(x.transpose(1,2)) # (B, 256, N) global_feat = torch.max(local_feat, 2)[0] # (B, 256) global_feat = self.global_feat(global_feat) # (B, 256) # 生成初始补全点云 grid = self.folding_grid(batch_size) # (B, 3, N) global_feat = global_feat.unsqueeze(2).expand(-1, -1, self.output_points) # 特征拼接(参考引用[3]) combined = torch.cat([global_feat, grid], dim=1) # (B, 256+3, N) # 解码生成完整点云 output = self.decoder(combined) # (B, 3, N) return output.transpose(1,2) # (B, N, 3) def folding_grid(self, batch_size): return nn.init.uniform_(torch.zeros(batch_size, 3, self.output_points), -0.1, 0.1).cuda() # 示例用法 model = PointCompletionNetwork().cuda() partial_cloud = torch.randn(32, 512, 3).cuda() # 输入不完整点云 (B, N, 3) completed_cloud = model(partial_cloud) # 输出补全点云 (B, 2048, 3) ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值