open3d -pointcloud

一、Visualize point cloud

import open3d as o3d
import numpy as np


print("Load a ply point cloud, print it, and render it")
ply_point_cloud = o3d.data.PLYPointCloud()
pcd = o3d.data.PLYPointCloud()
print(pcd)
print(np.asarray(pcd.points))
o3d.visualization.draw_geometries([pcd],
                                  zoom=0.3412,
                                  front=[0.4257, -0.2125, -0.8795],
                                  lookat=[2.6172, 2.0475, 1.532],
                                  up=[-0.0694, -0.9768, 0.2024])

o3d.io.read_point_cloud(ply_point_cloud.path)——通过对象本地地址io读取,转换为open3d.cuda.pybind.geometry.PointCloud对象

np.asarray(pcd.points)——以列表形式展示点云的固定格式,每个点有三个坐标元素

[[0.65234375 0.84686458 2.37890625]
 [0.65234375 0.83984375 2.38430572]
 [0.66737998 0.83984375 2.37890625]
 ...
 [2.00839925 2.39453125 1.88671875]
 [2.00390625 2.39488506 1.88671875]
 [2.00390625 2.39453125 1.88793314]]

.shape:196133*3

o3d.visualization.draw_geometries([pcd])——打印pcd(open3d.cuda.pybind.geometry.PointCloud对象),后面的是定位不加也可以打印

二、Voxel downsampling

print("Downsample the point cloud with a voxel of 0.05")
downpcd = pcd.voxel_down_sample(voxel_size=0.05)
o3d.visualization.draw_geometries([downpcd],
                                  zoom=0.3412,
                                  front=[0.4257, -0.2125, -0.8795],
                                  lookat=[2.6172, 2.0475, 1.532],
                                  up=[-0.0694, -0.9768, 0.2024])

pcd.voxel_down_sample(voxel_size=0.1)——下采样,数字越大单个样本越大,得到点越少

三、Vertex normal estimation

print("Recompute the normal of the downsampled point cloud")
downpcd.estimate_normals(
    search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
o3d.visualization.draw_geometries([downpcd],
                                  zoom=0.3412,
                                  front=[0.4257, -0.2125, -0.8795],
                                  lookat=[2.6172, 2.0475, 1.532],
                                  up=[-0.0694, -0.9768, 0.2024],
                                  point_show_normal=True)

五、Crop point cloud

print("Load a polygon volume and use it to crop the original point cloud")
demo_crop_data = o3d.data.DemoCropPointCloud()
pcd = o3d.io.read_point_cloud(demo_crop_data.point_cloud_path)
vol = o3d.visualization.read_selection_polygon_volume(demo_crop_data.cropped_json_path)
chair = vol.crop_point_cloud(pcd)
o3d.visualization.draw_geometries([chair],
                                  zoom=0.7,
                                  front=[0.5439, -0.2333, -0.8060],
                                  lookat=[2.4615, 2.1331, 1.338],
                                  up=[-0.1781, -0.9708, 0.1608])
demo_crop_data.point_cloud_path——点云路径
o3d.visualization.read_selection_polygon_volume(demo_crop_data.cropped_json_path)——读取切割区域
vol.crop_point_cloud(pcd)——切割区域.crop_point_cloud(原文件)

六、Paint point cloud

print("Paint chair")
chair.paint_uniform_color([1, 0.706, 0])
o3d.visualization.draw_geometries([chair],
                                  zoom=0.7,
                                  front=[0.5439, -0.2333, -0.8060],
                                  lookat=[2.4615, 2.1331, 1.338],
                                  up=[-0.1781, -0.9708, 0.1608])

paint_uniform_color ——统一涂色, The color is in RGB space, [0, 1] range.

七、Point cloud distance

# Load data
demo_crop_data = o3d.data.DemoCropPointCloud()
pcd = o3d.io.read_point_cloud(demo_crop_data.point_cloud_path)
vol = o3d.visualization.read_selection_polygon_volume(demo_crop_data.cropped_json_path)
chair = vol.crop_point_cloud(pcd)

dists = pcd.compute_point_cloud_distance(chair)
dists = np.asarray(dists)
ind = np.where(dists > 0.01)[0]
pcd_without_chair = pcd.select_by_index(ind)
o3d.visualization.draw_geometries([pcd_without_chair],
                                  zoom=0.3412,
                                  front=[0.4257, -0.2125, -0.8795],
                                  lookat=[2.6172, 2.0475, 1.532],
                                  up=[-0.0694, -0.9768, 0.2024])

dists = pcd.compute_point_cloud_distance(chair)  #  原点云.compute_point_cloud_distance(目标点云)
dists = np.asarray(dists)     ##   通常格式:得出距离的一维数组

 ind = np.where(dists>0.01)[0]    #   得出满足条件的点序列号

pcd.select_by_index(ind)——按ind中的序列号取pcd的点

去除噪音范例:https://zhuanlan.zhihu.com/p/462325566#:~:text=Open3D中的,compute_point_cloud_distance 函数计算从源点云中每个点到目标点云中最近邻点的距离。

八、Bounding volumes

aabb = chair.get_axis_aligned_bounding_box()  #  获取轴对齐边框
aabb.color = (1, 0, 0)                   #  红色框
obb = chair.get_oriented_bounding_box()  #  获取定向包围框
obb.color = (0, 1, 0)                    #  绿色框
o3d.visualization.draw_geometries([chair, aabb, obb],
                                  zoom=0.7,
                                  front=[0.5439, -0.2333, -0.8060],
                                  lookat=[2.4615, 2.1331, 1.338],
                                  up=[-0.1781, -0.9708, 0.1608])

九、Convex hull

bunny = o3d.data.BunnyMesh()
mesh = o3d.io.read_triangle_mesh(bunny.path)
mesh.compute_vertex_normals()

pcl = mesh.sample_points_poisson_disk(number_of_points=2000)
hull, _ = pcl.compute_convex_hull()
hull_ls = o3d.geometry.LineSet.create_from_triangle_mesh(hull)
hull_ls.paint_uniform_color((1, 0, 0))
o3d.visualization.draw_geometries([pcl, hull_ls])

十一、Plane segmentation最大支撑的平面分割

pcd_point_cloud = o3d.data.PCDPointCloud()
pcd = o3d.io.read_point_cloud(pcd_point_cloud.path)

plane_model, inliers = pcd.segment_plane(distance_threshold=0.01,
                                         ransac_n=3,
                                         num_iterations=1000)
#  distance_threshold:一个点到估计平面的最大距离可以被认为是内平面
#  ransac_n:随机抽样来估计一个平面的点数
#  num_iterations:随机平面抽样和验证的频率

[a, b, c, d] = plane_model
print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0")

inlier_cloud = pcd.select_by_index(inliers)  # 按照inliers序列选择点
inlier_cloud.paint_uniform_color([1.0, 0, 0])  # 内平面涂色
outlier_cloud = pcd.select_by_index(inliers, invert=True)  # 利用反选平面外部分
o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud],
                                  zoom=0.8,
                                  front=[-0.4999, -0.1659, -0.8499],
                                  lookat=[2.1813, 2.0619, 2.0999],
                                  up=[0.1204, -0.9852, 0.1215])

参考:Open3D中RANSAC平面拟合_qq_35706216的博客-优快云博客_open3d 平面文章目录一、函数介绍二、代码实现三、结果展示四、总结一、函数介绍使用RANSAC从点云中分割平面,用segement_plane函数。这个函数需要三个参数:destance_threshold:定义了一个点到一个估计平面的最大距离,这些距离内的点被认为是内点(inlier),ransac_n:定义了使用随机抽样估计一个平面的点的个数,num_iterations:定义了随机平面采样和验证的频率(迭代次数)。这个函数返回(a,b,c,d)作为一个平面,对于平面上每个点(x,y,z)满足ax+byhttps://blog.youkuaiyun.com/qq_35706216/article/details/108420011

### Open3D与PointNet技术集成 #### 背景介绍 Open3D 是一个开源库,专注于处理 3D 数据,支持点云、网格等多种数据结构的操作。而 PointNet 是一种专门用于点云数据分析的深度学习模型,能够有效提取几何特征并应用于分类和分割任务。两者的结合可以显著提升点云数据的分析能力。 #### 技术实现概述 为了将 PointNet 集成到 Open3D 中,通常需要完成以下几个方面的开发: 1. **点云预处理** 使用 Open3D 对原始点云数据进行必要的清洗和标准化操作。这一步骤包括去除噪声、下采样以及规范化坐标范围等[^1]。 2. **特征提取网络设计** 基于 PointNet 的架构构建神经网络模型。具体来说,可以通过 PyTorch 或 TensorFlow 实现 PointNet 的核心模块——即对输入点集应用 MLP 层和最大池化层以捕获全局特征[^2]。 3. **训练流程配置** 利用标注好的三维物体类别标签作为监督信号,在公开的数据集上(如 ModelNet40)训练该模型直至收敛。期间需注意调整超参数设置以优化性能表现[^4]。 以下是基于 Python 和 PyTorch 的简单代码示例展示如何加载点云并通过自定义的 PointNet 进行推理计算: ```python import open3d as o3d import torch from torch import nn class SimplePointNet(nn.Module): def __init__(self, num_classes=16): super(SimplePointNet, self).__init__() self.mlp1 = nn.Sequential( nn.Conv1d(3, 64, kernel_size=1), nn.BatchNorm1d(64), nn.ReLU(), nn.Conv1d(64, 128, kernel_size=1), nn.BatchNorm1d(128), nn.ReLU() ) self.fc = nn.Linear(128, num_classes) def forward(self, x): # Input shape should be BxNx3 where N is number of points. x = x.transpose(1, 2).contiguous() # Convert to BxCxN format required by Conv1d layers. features = self.mlp1(x) global_features = torch.max(features, dim=-1)[0] output = self.fc(global_features) return output def load_and_preprocess_pcd(file_path): pcd = o3d.io.read_point_cloud(file_path) downsampled_pcd = pcd.voxel_down_sample(voxel_size=0.02) # Down-sample for efficiency. normalized_points = np.asarray(downsampled_pcd.points) / max(np.linalg.norm(pcd.points, axis=1)) tensor_input = torch.tensor(normalized_points[:1024], dtype=torch.float32).unsqueeze(0) # Limiting to top-k points & adding batch dimension. return tensor_input if __name__ == "__main__": model = SimplePointNet(num_classes=10) test_data = load_and_preprocess_pcd("example.ply") # Replace with your actual file path. predictions = model(test_data) print(predictions.argmax(dim=-1)) # Print predicted class index. ``` 此脚本展示了从读取 PLY 文件至执行前向传播获取预测结果的整体过程。值得注意的是,这里仅提供了一个简化版的 PointNet 架构;实际部署时可根据需求扩展其复杂度。 #### 结论 通过上述方法可成功搭建起融合了 Open3D 功能特性的 PointNet 应用框架。这一组合不仅继承了两者各自的优势特性,还为进一步探索更复杂的场景提供了坚实基础[^3]。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值