我们通过深度相机获取的物体点云数据十分庞大,对后续的点云数据预处理需要大量的计算时间,而传统的体素网格法算法简单,但是对于物体的细节部分不能很好的保留。
我把整个程序分成了两个部分,精简的效果还是不错的,程序太长就不上图了。
一:设置体素大小,提取点云体素中心
#include<iostream>
#include<pcl/visualization/pcl_visualizer.h>
#include<pcl/visualization/cloud_viewer.h>
#include<pcl/octree/octree.h>
#include<pcl/io/pcd_io.h>
#include<vector>
using namespace std;
typedef Eigen::aligned_allocator<pcl::PointXYZ> AlignedPointT;
typedef pcl::PointXYZ PointT;
int main()
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
pcl::io::loadPCDFile("C:\\Users\\15390\\Desktop\\horse实验结果\\horse.pcd", *cloud);
float resolution = 0.002; //体素的大小
cout << "before点云" << cloud->points.size() << endl;
pcl::octree::OctreePointCloud<pcl::PointXYZ> octree(resolution);
octree.setInputCloud(cloud);
octree.addPointsFromInputCloud();
vector<PointT, AlignedPointT> voxel_centers;
octree.getOccupiedVoxelCenters(voxel_centers);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_core(new pcl::PointCloud<pcl::PointXYZ>);
cloud_core->width = voxel_centers.size();
cloud_core->height = 1;
cloud_core->points.resize(cloud_core->height*cloud_core->width);
for (size_t i = 0; i < voxel_centers.size() - 1; i++)
{
cloud_core->points[i].x = voxel_centers[i].x;
cloud_core->points[i].y = voxel_centers[i].y;
cloud_core->points[i].z = voxel_centers[i].z;
}
pcl::PCDWriter writer;
writer.write("C:\\User