系列文章链接:
【SLAM】LIO-SAM解析——数据预处理imageProjection(2)
【SLAM】LIO-SAM解析——特征提取featureTrack(3)
【SLAM】LIO-SAM解析——IMU预计分IMU-Preintegration(4)
【SLAM】LIO-SAM解析——后端优化MapOptimization(5)
【SLAM】LIO-SAM解析——里程计融合transformFusion(6)
知识点:
如何使用GTSAM对低频的lidar位姿进行后端优化;
如何在全局地图里找到和最新帧相关的局部地图;
如何用最新帧点云与局部地图进行点面icp实现scan2mapOptimization求位姿;
为什么地图点云要保存在不同帧各自的lidar坐标系下。
5. 综述
mapOptmization.cpp是LIO-SAM里代码量最大,涉及算法内容最多的内容。
主要函数:
laserCloudInfoHandler():
输入:
来自featureExtracion的角点和平面点数据(imuPreintegration的imu里程计和外部的imu也藏在这里面了,因为imageProjection订阅了这俩数据,然后传给了featureExtraction);
gps数据;
回环帧id数据。
输出:
transformFusion特供版的lidar里程计,被transformFusion订阅;
imuPreintegration特供版的lidar里程计,被imuPreintegration订阅;
详情请见流程图,照常进入构造函数看看:
mapOptimization()
{
// ISM2参数
ISAM2Params parameters;
parameters.relinearizeThreshold = 0.1;
parameters.relinearizeSkip = 1;
isam = new ISAM2(parameters);
// 发布历史关键帧里程计
pubKeyPoses = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/trajectory", 1);
// 发布局部关键帧map的特征点云
pubLaserCloudSurround = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/map_global", 1);
// 发布激光里程计,rviz中表现为坐标轴
pubLaserOdometryGlobal = nh.advertise<nav_msgs::Odometry> ("lio_sam/mapping/odometry", 1);
// 发布激光里程计,它与上面的激光里程计基本一样,只是roll、pitch用imu数据加权平均了一下,z做了限制
pubLaserOdometryIncremental = nh.advertise<nav_msgs::Odometry> ("lio_sam/mapping/odometry_incremental", 1);
// 发布激光里程计路径,rviz中表现为载体的运行轨迹
pubPath = nh.advertise<nav_msgs::Path>("lio_sam/mapping/path", 1);
// 订阅当前激光帧点云信息,来自featureExtraction
subCloud = nh.subscribe<lio_sam::cloud_info>("lio_sam/feature/cloud_info", 1, &mapOptimization::laserCloudInfoHandler, this, ros::TransportHints().tcpNoDelay());
// 订阅GPS里程计
subGPS = nh.subscribe<nav_msgs::Odometry> (gpsTopic, 200, &mapOptimization::gpsHandler, this, ros::TransportHints().tcpNoDelay());
// 订阅来自外部闭环检测程序提供的闭环数据,本程序没有提供,这里实际没用上
subLoop = nh.subscribe<std_msgs::Float64MultiArray>("lio_loop/loop_closure_detection", 1, &mapOptimization::loopInfoHandler, this, ros::TransportHints().tcpNoDelay());
// 发布地图保存服务
srvSaveMap = nh.advertiseService("lio_sam/save_map", &mapOptimization::saveMapService, this);
// 发布闭环匹配关键帧局部map
pubHistoryKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/icp_loop_closure_history_cloud", 1);
// 发布当前关键帧经过闭环优化后的位姿变换之后的特征点云
pubIcpKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/icp_loop_closure_corrected_cloud", 1);
// 发布闭环边,rviz中表现为闭环帧之间的连线
pubLoopConstraintEdge = nh.advertise<visualization_msgs::MarkerArray>("/lio_sam/mapping/loop_closure_constraints", 1);
// 发布局部map的降采样平面点集合
pubRecentKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/map_local", 1);
// 发布历史帧(累加的)的角点、平面点降采样集合
pubRecentKeyFrame = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/cloud_registered", 1);
// 发布当前帧原始点云配准之后的点云
pubCloudRegisteredRaw = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/cloud_registered_raw", 1);
downSizeFilterCorner.setLeafSize(mappingCornerLeafSize, mappingCornerLeafSize, mappingCornerLeafSize);
downSizeFilterSurf.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize);
downSizeFilterICP.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize);
downSizeFilterSurroundingKeyPoses.setLeafSize(surroundingKeyframeDensity, surroundingKeyframeDensity, surroundingKeyframeDensity); // for surrounding key poses of scan-to-map optimization
allocateMemory();
}
构造函数写了一堆内容,大部分都是用于可视化的,真正关键的是laserCloudInfoHandler(),lidar帧的回调函数。
5.1 计算lidar里程计laserCloudInfoHandler()
和一般slam系统的流程基本一致,首先你得有最新一帧lidar帧时的位姿的初始估计(updateInitialGuess()),虽然这个代码会算位姿的,但是初始值不能太离谱;然后获取当前帧的点云(downsampleCurrentScan());从而和全局地图去匹配去得到当前帧一个更好的位姿(scan2MapOptimization()),当然你不能把所有全局地图都拿过来匹配,只取选择那些和当前帧比较近的点云(extractSurroundingKeyFrames());遗憾的是,哪怕经过这种匹配也可能不能很完美,还需要一个后端去融合所有输入信息获得一个真正准确的位姿(saveKeyFramesAndFactor());
随后更新所有的状态量(correctPoses()),最后把结果告诉下游模块(publish())。接下来我们一个个抠每一个过程的具体实现。
void laserCloudInfoHandler(const lio_sam::cloud_infoConstPtr& msgIn)
{
// 拿到当前激光帧时间戳
timeLaserInfoStamp = msgIn->header.stamp;
timeLaserInfoCur = msgIn->header.stamp.toSec();
// 提取featureExtraction给的当前激光帧角点、平面点集合
cloudInfo = *msgIn;
pcl::fromROSMsg(msgIn->cloud_corner, *laserCloudCornerLast);
pcl::fromROSMsg(msgIn->cloud_surface, *laserCloudSurfLast);
std::lock_guard<std::mutex> lock(mtx);
// 控制频率稳定
static double timeLastProcessing = -1;
if (timeLaserInfoCur - timeLastProcessing >= mappingProcessInterval)
{
timeLastProcessing = timeLaserInfoCur;
// 当前帧位姿初始化
updateInitialGuess();
// 获取局部地图点云
extractSurroundingKeyFrames();
// 获取当前帧点云
downsampleCurrentScan();
// scan-to-map优化当前帧位姿
scan2MapOptimization();
// 设置当前帧为关键帧并执行因子图优化
saveKeyFramesAndFactor();
// 更新因子图中所有变量节点的位姿,也就是所有历史关键帧的位姿,更新里程计轨迹
correctPoses();
// 发布激光里程计
publishOdometry();
// 发布里程计、点云、轨迹
publishFrames();
}
}
5.1.1 当前帧位姿的初始估计updateInitialGuess()
我们回忆一下imageProjection,它提供了lidar帧点云,同时也包含了IMU里程计和IMU旋转角。一般情况下, 直接把它提供的里程计给到位姿的初始估计就可以了。
但是对于第一帧而言,这就是一个先有鸡还是先有蛋的问题了。这里cloudInfo来源于imageProjection,而imageProjection的里程计来源于IMUPreintegration,IMUPreintegration要想提供高频的里程计,就需要mapOptimization先提供低频的里程计,而这里就是mapOptimization,绕了一圈回来了。
所以来自imageProjection的第一帧lidar点云数据是没有里程计数据的,这个函数得区分第一帧还是后续帧这两种情况。很多解析都把里程计理解成两个,IMU里程计和lidar里程计,我觉得这样命名是容易歧义的,代码里这两个里程计都是lidar->world的表示,唯二的区别是在不同的地方生成的和频率不一样。
这个函数的核心思想是分了当前是第一帧,第二帧,>=第三帧这三种情况的:
当前是第一帧:输入数据只有IMU旋转变换,没有里程计输入,所以第一帧位姿初始化为IMU的旋转数据;
当前是第二帧:此时输入数据开始有里程计了,但是没有上一个里程计,所以这两帧的位姿变换得通过IMU数据获得,然后变换矩阵作用到上一帧位姿上得到当前帧位姿初始值。
>=第三帧:上一帧输入数据就有里程计了,那么这两帧之间的来自输入里程计的位姿变换就能求出来,然后变换矩阵作用到上一帧位姿上得到当前帧位姿初始值。
有点绕,慢慢理解。
进入函数,代码首先保存了上一帧的位姿给到incrementalOdometryAffineFront,别处要用。transformTobeMapped是这个类里面维护的一个实时的保存位姿的容器。这里吐槽一下作者的命名规范有些......
void updateInitialGuess()
{
// 前一帧的位姿,注:这里指lidar的位姿,后面都简写成位姿 Twri, because transformTobeMapped updated in this func
incrementalOdometryAffineFront = trans2Affine3f(transformTobeMapped);
然后定义了一个用于保存来自cloudInfo的上一帧的IMU旋转变换输入,为什么要这么做呢?代码不信任来自cloudInfo的位姿,只信任自己的位姿transformTobeMapped,但是来自cloudInfo的上一帧位姿和当前帧位姿的变化量是可以信任的,把这个变化量作用到自己上一帧的位姿上,就能获得当前帧位姿的初始估计了。这是这个函数的核心思想。
这个变量是为估计第二帧位姿服务的。
// 前一帧的初始化姿态角(来自cloudInfo的IMU数据)
static Eigen::Affine3f lastImuTransformation;
(1)如果是第一帧lidar,肯定是没有里程计的,但是是有IMU数据的,所以把开始时的旋转角记录下来作为第一帧位姿的初始估计:
//1、如果是第一帧,用原始imu数据的RPY初始化当前帧位姿(旋转部分)
// 如果关键帧集合为空,继续进行初始化 1st frame
if (cloudKeyPoses3D->points.empty())
{
// 当前帧位姿的旋转部分,用激光帧信息中的RPY(来自imu原始数据)初始化
transformTobeMapped[0] = cloudInfo.imuRollInit;
transformTobeMapped[1] = cloudInfo.imuPitchInit;
transformTobeMapped[2] = cloudInfo.imuYawInit;
if (!useImuHeadingInitialization)
transformTobeMapped[2] = 0;
lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit);
return;
}
(2)如果是第二帧lidar,首先把输入里程计记录下来:
static bool lastImuPreTransAvailable = false;
static Eigen::Affine3f lastImuPreTransformation;
if (cloudInfo.odomAvailable == true)
{
// 当前帧的初始估计位姿(来自imu里程计),后面用来计算增量位姿变换 Tr0ri+1
Eigen::Affine3f transBack = pcl::getTransformation(cloudInfo.initialGuessX, cloudInfo.initialGuessY, cloudInfo.initialGuessZ,
cloudInfo.initialGuessRoll, cloudInfo.initialGuessPitch, cloudInfo.initialGuessYaw);
if (lastImuPreTransAvailable == false) //2nd frame
{
// 赋值给前一帧 Tr0ri, this value is used after 3rd frame
lastImuPreTransformation = transBack;
lastImuPreTransAvailable = true;
}
我们跳过后面的else{},直接看后面的内容:
这个时候就忽略掉平移了,只看旋转。上一帧IMU旋转知道,当前IMU旋转知道,那么旋转的变化量就能知道,作用到上一帧位姿后就能得到当前位姿估计了。
if (cloudInfo.imuAvailable == true)
{
// 当前帧的姿态角(来自原始imu数据) Twri+1
Eigen::Affine3f transBack = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit);
// 当前帧相对于前一帧的姿态变换 Triri+1 = Twri.inv * Twri+1 (here is the only use of lastImuTransformation)
Eigen::Affine3f transIncre = lastImuTransformation.inverse() * transBack;
// 前一帧的位姿 Twri
Eigen::Affine3f transTobe = trans2Affine3f(transformTobeMapped);
// 当前帧的位姿 Twri+1 = Twri * Triri+1
Eigen::Affine3f transFinal = transTobe * transIncre;
// 更新当前帧位姿transformTobeMapped Twri+1
pcl::getTranslationAndEulerAngles(transFinal, transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5],
transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]);
//this value is used only on 2nd frame (above), so I think it is no use to save
lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // save imu before return;
return;
}
}
(3)如果是第三帧以后,就可以根据输入里程计的变化量作用到上一帧位姿上获得当前帧位姿的估计了,我们回到上面的代码看:
else { //3rd frame
// 当前帧相对于前一帧的位姿变换,imu里程计计算得到 Triri+1 = Tr0ri.inv * Tr0ri+1
Eigen::Affine3f transIncre = lastImuPreTransformation.inverse() * transBack;
// 前一帧的位姿 Twri
Eigen::Affine3f transTobe = trans2Affine3f(transformTobeMapped);
// 当前帧的位姿 Twri+1 = Twri * Triri+1
Eigen::Affine3f transFinal = transTobe * transIncre;
// 更新当前帧位姿transformTobeMapped Twri+1
pcl::getTranslationAndEulerAngles(transFinal, transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5],
transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]);
// 赋值给前一帧 Tr0ri; so transBack is curImuPreTransformation
lastImuPreTransformation = transBack;
//this value is used only on 2nd frame (below), so I think it is no use to save
lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // save imu before return;
return;
}
5.1.2 局部地图的获取extractSurroundingKeyFrames()
进入函数,发现跳到了extractNearby(),你会发现这和基于位姿信息的回环检测比较像:把时间和最新帧近的地图都找出来,这些地图肯定都很近;然后再把时间久远但是距离近的帧对应的地图找过来,由于是lidar,你不用考虑朝向,只考虑距离就好了。
(1) 找到哪些帧作为局部地图对应的帧
和当前帧距离近的帧找出来:
// kdtree的输入,全局关键帧位姿集合(历史所有关键帧集合)
kdtreeSurroundingKeyPoses->setInputCloud(cloudKeyPoses3D);
// 对最近的一帧关键帧,在半径区域内搜索空间区域上相邻的关键帧集合
kdtreeSurroundingKeyPoses->radiusSearch(cloudKeyPoses3D->back(), (double)surroundingKeyframeSearchRadius, pointSearchInd, pointSearchSqDis);
// 遍历搜索结果,pointSearchInd存的是结果在cloudKeyPoses3D下面的索引
for (int i = 0; i < (int)pointSearchInd.size(); ++i)
{
int id = pointSearchInd[i];
// 加入相邻关键帧位姿集合中
surroundingKeyPoses->push_back(cloudKeyPoses3D->points[id]);
}
// 降采样一下
downSizeFilterSurroundingKeyPoses.setInputCloud(surroundingKeyPoses);
downSizeFilterSurroundingKeyPoses.filter(*surroundingKeyPosesDS);
和当前帧时间近的帧找出来:
// 加入时间上相邻的一些关键帧,比如当载体在原地转圈,这些帧加进来是合理的
int numPoses = cloudKeyPoses3D->size();
for (int i = numPoses-1; i >= 0; --i)
{
if (timeLaserInfoCur - cloudKeyPoses6D->points[i].time < 10.0)
surroundingKeyPosesDS->push_back(cloudKeyPoses3D->points[i]);
else
break;
}
(2) 把这些帧的点找出来作为局部地图
// 将相邻关键帧集合对应的角点、平面点,加入到局部map中,作为scan-to-map匹配的局部点云地图
extractCloud(surroundingKeyPosesDS);
进入函数看一下,第一步又根据距离筛选了一下,看到这里我在想,一开始只把距离近的放进来不就好了..
void extractCloud(pcl::PointCloud<PointType>::Ptr cloudToExtract)
{
// 相邻关键帧集合对应的角点、平面点,加入到局部map中;注:称之为局部map,后面进行scan-to-map匹配,所用到的map就是这里的相邻关键帧对应点云集合
laserCloudCornerFromMap->clear();
laserCloudSurfFromMap->clear();
// 遍历当前帧(实际是取最近的一个关键帧来找它相邻的关键帧集合)时空维度上相邻的关键帧集合
for (int i = 0; i < (int)cloudToExtract->size(); ++i)
{
// 距离超过阈值,丢弃
if (pointDistance(cloudToExtract->points[i], cloudKeyPoses3D->back()) > surroundingKeyframeSearchRadius)
continue;
然后把这些帧对应的点找出来放到局部地图里,它把角点和平面点分开保存了:
// 相邻关键帧索引
int thisKeyInd = (int)cloudToExtract->points[i].intensity;
if (laserCloudMapContainer.find(thisKeyInd) != laserCloudMapContainer.end())
{
*laserCloudCornerFromMap += laserCloudMapContainer[thisKeyInd].first;
*laserCloudSurfFromMap += laserCloudMapContainer[thisKeyInd].second;
} else {
// 相邻关键帧对应的角点、平面点云,通过6D位姿变换到世界坐标系下
pcl::PointCloud<PointType> laserCloudCornerTemp = *transformPointCloud(cornerCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]);
pcl::PointCloud<PointType> laserCloudSurfTemp = *transformPointCloud(surfCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]);
// 加入局部map
*laserCloudCornerFromMap += laserCloudCornerTemp;
*laserCloudSurfFromMap += laserCloudSurfTemp;
laserCloudMapContainer[thisKeyInd] = make_pair(laserCloudCornerTemp, laserCloudSurfTemp);
}
}
// 降采样局部角点map
downSizeFilterCorner.setInputCloud(laserCloudCornerFromMap);
downSizeFilterCorner.filter(*laserCloudCornerFromMapDS);
laserCloudCornerFromMapDSNum = laserCloudCornerFromMapDS->size();
// 降采样局部平面点map
downSizeFilterSurf.setInputCloud(laserCloudSurfFromMap);
downSizeFilterSurf.filter(*laserCloudSurfFromMapDS);
laserCloudSurfFromMapDSNum = laserCloudSurfFromMapDS->size();
// 太大了,清空一下内存
if (laserCloudMapContainer.size() > 1000)
laserCloudMapContainer.clear();
}
5.1.3 当前帧点云的获取和降采样downsampleCurrentScan()
来自输入数据的当前帧点云的获取先回到laserCloudInfoHandler()里看,
// 提取当前激光帧角点、平面点集合
cloudInfo = *msgIn;
pcl::fromROSMsg(msgIn->cloud_corner, *laserCloudCornerLast);
pcl::fromROSMsg(msgIn->cloud_surface, *laserCloudSurfLast);
再之后才是在这个地方进行降采样的:
void downsampleCurrentScan()
{
// 当前激光帧角点集合降采样
laserCloudCornerLastDS->clear();
downSizeFilterCorner.setInputCloud(laserCloudCornerLast);
downSizeFilterCorner.filter(*laserCloudCornerLastDS);
laserCloudCornerLastDSNum = laserCloudCornerLastDS->size();
// 当前激光帧平面点集合降采样
laserCloudSurfLastDS->clear();
downSizeFilterSurf.setInputCloud(laserCloudSurfLast);
downSizeFilterSurf.filter(*laserCloudSurfLastDS);
laserCloudSurfLastDSNum = laserCloudSurfLastDS->size();
}
5.1.4 当前帧与局部地图匹配来更新位姿scan2MapOptimization()
这部分是一个硬菜!放在这个小标题讲是不合适的,请跳到 5.2 部分。
5.1.5 后端优化saveKeyFramesAndFactor()
公式推不出来没关系,知道什么是因子图,其它的GTSAM帮你搞定。首先加入lidar里程计的因子,对于第一帧,他把yaw角和平移分量的方差设的非常大,看来是要固定住这几个数。
对于后续的帧,你看方差设的就很小了。往因子图里加的都是当前帧和上一帧的序号和两帧之间的位姿变换,同时再把当前帧的位姿加到initialEstimate里。
void addOdomFactor()
{
if (cloudKeyPoses3D->points.empty())
{
// 第一帧初始化先验因子
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Variances((Vector(6) << 1e-2, 1e-2, M_PI*M_PI, 1e8, 1e8, 1e8).finished()); // rad*rad, meter*meter
gtSAMgraph.add(PriorFactor<Pose3>(0, trans2gtsamPose(transformTobeMapped), priorNoise));
// 变量节点设置初始值
initialEstimate.insert(0, trans2gtsamPose(transformTobeMapped));
}else{
// 添加激光里程计因子
noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Variances((Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
gtsam::Pose3 poseFrom = pclPointTogtsamPose3(cloudKeyPoses6D->points.back());
gtsam::Pose3 poseTo = trans2gtsamPose(transformTobeMapped);
// 参数:前一帧id,当前帧id,前一帧与当前帧的位姿变换(作为观测值),噪声协方差
gtSAMgraph.add(BetweenFactor<Pose3>(cloudKeyPoses3D->size()-1, cloudKeyPoses3D->size(), poseFrom.between(poseTo), odometryNoise));
// 变量节点设置初始值
initialEstimate.insert(cloudKeyPoses3D->size(), poseTo);
}
}
添加GPS因子:
void addGPSFactor()
{
if (gpsQueue.empty()) return;
// 如果没有关键帧,或者首尾关键帧距离小于5m,不添加gps因子
if (cloudKeyPoses3D->points.empty()) return;
else
{
if (pointDistance(cloudKeyPoses3D->front(), cloudKeyPoses3D->back()) < 5.0) return;
}
// 位姿协方差很小,没必要加入GPS数据进行校正
if (poseCovariance(3,3) < poseCovThreshold && poseCovariance(4,4) < poseCovThreshold) return;
static PointType lastGPSPoint;
while (!gpsQueue.empty())
{
// 删除当前帧0.2s之前的里程计
if (gpsQueue.front().header.stamp.toSec() < timeLaserInfoCur - 0.2) gpsQueue.pop_front();
// 超过当前帧0.2s之后,退出
else if (gpsQueue.front().header.stamp.toSec() > timeLaserInfoCur + 0.2) break;
else
{
nav_msgs::Odometry thisGPS = gpsQueue.front();
gpsQueue.pop_front();
// GPS噪声协方差太大,不能用
float noise_x = thisGPS.pose.covariance[0];
float noise_y = thisGPS.pose.covariance[7];
float noise_z = thisGPS.pose.covariance[14];
if (noise_x > gpsCovThreshold || noise_y > gpsCovThreshold) continue;
// GPS里程计位置
float gps_x = thisGPS.pose.pose.position.x;
float gps_y = thisGPS.pose.pose.position.y;
float gps_z = thisGPS.pose.pose.position.z;
if (!useGpsElevation)
{
gps_z = transformTobeMapped[5];
noise_z = 0.01;
}
// (0,0,0)无效数据
if (abs(gps_x) < 1e-6 && abs(gps_y) < 1e-6) continue;
// 每隔5m添加一个GPS里程计
PointType curGPSPoint;
curGPSPoint.x = gps_x;
curGPSPoint.y = gps_y;
curGPSPoint.z = gps_z;
if (pointDistance(curGPSPoint, lastGPSPoint) < 5.0) continue;
else lastGPSPoint = curGPSPoint;
// 添加GPS因子
gtsam::Vector Vector3(3);
Vector3 <<
max(noise_x, 1.0f), max(noise_y, 1.0f), max(noise_z, 1.0f);
noiseModel::Diagonal::shared_ptr gps_noise = noiseModel::Diagonal::Variances(Vector3);
gtsam::GPSFactor gps_factor(cloudKeyPoses3D->size(), gtsam::Point3(gps_x, gps_y, gps_z), gps_noise);
gtSAMgraph.add(gps_factor);
aLoopIsClosed = true;
break;
}
}
}
添加闭环因子:
void addLoopFactor()
{
if (loopIndexQueue.empty())
return;
// 闭环队列
for (int i = 0; i < (int)loopIndexQueue.size(); ++i)
{
// 闭环边对应两帧的索引
int indexFrom = loopIndexQueue[i].first;
int indexTo = loopIndexQueue[i].second;
// 闭环边的位姿变换
gtsam::Pose3 poseBetween = loopPoseQueue[i];
gtsam::noiseModel::Diagonal::shared_ptr noiseBetween = loopNoiseQueue[i];
gtSAMgraph.add(BetweenFactor<Pose3>(indexFrom, indexTo, poseBetween, noiseBetween));
}
loopIndexQueue.clear();
loopPoseQueue.clear();
loopNoiseQueue.clear();
aLoopIsClosed = true;
}
你可以发现往GTSAM里添加因子都是添加的是序号和相对间的位姿变换和noise,往往这个noise作者写成固定值。添加完因子之后进行一次图优化,如果有GPS或回环输入的时候,要再进行好几次优化,是不是多开一个线程干这个事会好一些呢。
// 执行优化
isam->update(gtSAMgraph, initialEstimate);
isam->update();
if (aLoopIsClosed == true)
{
isam->update();
isam->update();
isam->update();
isam->update();
isam->update();
}
清空因子图,
gtSAMgraph.resize(0);
initialEstimate.clear();
得到优化后的位姿和累计的方差:
// 优化结果
isamCurrentEstimate = isam->calculateEstimate();
// 当前帧位姿结果
latestEstimate = isamCurrentEstimate.at<Pose3>(isamCurrentEstimate.size()-1);
// cloudKeyPoses3D加入当前帧位姿
thisPose3D.x = latestEstimate.translation().x();
thisPose3D.y = latestEstimate.translation().y();
thisPose3D.z = latestEstimate.translation().z();
// 索引
thisPose3D.intensity = cloudKeyPoses3D->size();
cloudKeyPoses3D->push_back(thisPose3D);
// cloudKeyPoses6D加入当前帧位姿
thisPose6D.x = thisPose3D.x;
thisPose6D.y = thisPose3D.y;
thisPose6D.z = thisPose3D.z;
thisPose6D.intensity = thisPose3D.intensity ;
thisPose6D.roll = latestEstimate.rotation().roll();
thisPose6D.pitch = latestEstimate.rotation().pitch();
thisPose6D.yaw = latestEstimate.rotation().yaw();
thisPose6D.time = timeLaserInfoCur;
cloudKeyPoses6D->push_back(thisPose6D);
// 位姿协方差
poseCovariance = isam->marginalCovariance(isamCurrentEstimate.size()-1);
// transformTobeMapped更新当前帧位姿
transformTobeMapped[0] = latestEstimate.rotation().roll();
transformTobeMapped[1] = latestEstimate.rotation().pitch();
transformTobeMapped[2] = latestEstimate.rotation().yaw();
transformTobeMapped[3] = latestEstimate.translation().x();
transformTobeMapped[4] = latestEstimate.translation().y();
transformTobeMapped[5] = latestEstimate.translation().z();
把当前帧点云加到全局地图里,有意思的是它的坐标系是当前时刻的lidar坐标系下,相比保存在world系下的好处就是当位姿被优化之后不用再调整点云坐标了:
// 当前帧激光角点、平面点,降采样集合
pcl::PointCloud<PointType>::Ptr thisCornerKeyFrame(new pcl::PointCloud<PointType>());
pcl::PointCloud<PointType>::Ptr thisSurfKeyFrame(new pcl::PointCloud<PointType>());
pcl::copyPointCloud(*laserCloudCornerLastDS, *thisCornerKeyFrame);
pcl::copyPointCloud(*laserCloudSurfLastDS, *thisSurfKeyFrame);
// 保存特征点降采样集合
cornerCloudKeyFrames.push_back(thisCornerKeyFrame);
surfCloudKeyFrames.push_back(thisSurfKeyFrame);
最后添加到path里:
void updatePath(const PointTypePose& pose_in)
{
geometry_msgs::PoseStamped pose_stamped;
pose_stamped.header.stamp = ros::Time().fromSec(pose_in.time);
pose_stamped.header.frame_id = odometryFrame;
pose_stamped.pose.position.x = pose_in.x;
pose_stamped.pose.position.y = pose_in.y;
pose_stamped.pose.position.z = pose_in.z;
tf::Quaternion q = tf::createQuaternionFromRPY(pose_in.roll, pose_in.pitch, pose_in.yaw);
pose_stamped.pose.orientation.x = q.x();
pose_stamped.pose.orientation.y = q.y();
pose_stamped.pose.orientation.z = q.z();
pose_stamped.pose.orientation.w = q.w();
globalPath.poses.push_back(pose_stamped);
}
5.1.6 更新状态量correctPoses()
进行完一次图优化之后,改变的不单单是当前帧的位姿,其它节点的位姿也可能变化,所以都更新一下:
void correctPoses()
{
if (cloudKeyPoses3D->points.empty())
return;
if (aLoopIsClosed == true)
{
// 清空局部map
laserCloudMapContainer.clear();
// 清空里程计轨迹
globalPath.poses.clear();
// 更新因子图中所有变量节点的位姿,也就是所有历史关键帧的位姿
int numPoses = isamCurrentEstimate.size();
for (int i = 0; i < numPoses; ++i)
{
cloudKeyPoses3D->points[i].x = isamCurrentEstimate.at<Pose3>(i).translation().x();
cloudKeyPoses3D->points[i].y = isamCurrentEstimate.at<Pose3>(i).translation().y();
cloudKeyPoses3D->points[i].z = isamCurrentEstimate.at<Pose3>(i).translation().z();
cloudKeyPoses6D->points[i].x = cloudKeyPoses3D->points[i].x;
cloudKeyPoses6D->points[i].y = cloudKeyPoses3D->points[i].y;
cloudKeyPoses6D->points[i].z = cloudKeyPoses3D->points[i].z;
cloudKeyPoses6D->points[i].roll = isamCurrentEstimate.at<Pose3>(i).rotation().roll();
cloudKeyPoses6D->points[i].pitch = isamCurrentEstimate.at<Pose3>(i).rotation().pitch();
cloudKeyPoses6D->points[i].yaw = isamCurrentEstimate.at<Pose3>(i).rotation().yaw();
// 更新里程计轨迹
updatePath(cloudKeyPoses6D->points[i]);
}
aLoopIsClosed = false;
}
}
5.1.7 发布数据publish()
在publishOdometry()里,注意pubLaserOdometryIncremental的话题是"lio_sam/mapping/odometry_incremental",它发布的laserOdomIncremental位姿是经过IMU数据加权融合的,这个是由IMUPreintegration订阅的,用于制作高频的IMU里程计;而没有经过IMU数据融合,单纯由mapOptimization经过GTSAM优化后的位姿是由pubLaserOdometryGlobal发布的,它的话题是"lio_sam/mapping/odometry",这个是由transformFusion订阅的,用于与IMU里程计进行融合。这两个位姿虽然数值可能区别不大,但是用途有着巨大的区别。注意laserOdomIncremental会有一个是否退化isDegenerate的判断,这个判断是在scan2map的LMOptimization那里计算匹配的J矩阵时获得的,如果发生退化,那么标志位covariance[0] = 1,在IMUPreintegration那里的图优化会给当前位姿的计算加一个大的方差。
if (isDegenerate)
laserOdomIncremental.pose.covariance[0] = 1;
else
laserOdomIncremental.pose.covariance[0] = 0;
在publishFrames()里,发布的是path和点云,在原代码里是用于rviz订阅可视化。
自此,mapOptimization中最核心的laserCloudInfoHandler()完成了。你会发现,它具备一个典型的流程:获得初始位姿,获得用于匹配的两拨点云,scan2map匹配矫正位姿,图优化优化位姿,发布出去。然后是图优化,你会发现GTSAM的图优化里的节点只有低频的lidar位姿,它没有把高频的IMU位姿节点加进去(是在IMUPreintegration中进行的),也没有把地图点节点加进去,从而也省去了很多连接关系。
这个与VINS有着较大的区别。非常有意思的是它的图优化分散在了多个类里分别进行,优化后的高频和低频的位姿又相互依赖:高频的IMU里程计是基于低频的lidar里程计的(IMUPreintegration订阅mapOptimization),但是下一帧低频的lidar里程计的初值又是由高频的IMU里程计提供的(imageProjection订阅IMUPreintegration),这样做的一个重要优点就是位姿的变换非常平滑。
5.2 scan-Map Optimization
这部分内容是 激光SLAM/点云匹配 必会内容,它的核心思想是两拨点云匹配,求出相对位姿变换,然后作用到当前位姿上,实现位姿矫正。最简单的办法就直接用pcl的icp就好了,实际上代码里回环检测的部分就是用pcl的icp的。那为什么这里作者要自己实现呢?
传统的icp匹配,是点和点之间的匹配,我做过的测试发现对初始位姿(差的不能太大)和点云质量(完整度/噪声)要求还是有些高的。但是这里,用的是特征的匹配:点与面之间的匹配,点与线之间的匹配,会带来更多的鲁棒性。
站在算法的角度看,激光slam和RGBD-slam这部分可以共代码。虽然深度相机对输入数据不用去运动畸变,但是激光雷达有着深度相机所不具备的巨大优势:大视角!
scan2map匹配时,对两拨点云的要求还是挺苛刻的,需要他们涵盖的环境特征的范围要足够大,重叠部分足够大,这样才能匹配上。如果是相对小FOV的深度相机,万一此时机器人旋转比较大,深度相机看到的东西比较新,当前帧点云和历史地图点云重叠范围不大,就很可能匹配失败。
站在工程的角度看,里面的数学公式也许并不能完全掌握,这个没关系,但是得知道这个函数是干什么用的,里面的参数会对结果有什么影响,这个需要知道。
void scan2MapOptimization()
{
// 要求有关键帧
if (cloudKeyPoses3D->points.empty())
return;
// 当前激光帧的角点、平面点数量足够多
if (laserCloudCornerLastDSNum > edgeFeatureMinValidNum && laserCloudSurfLastDSNum > surfFeatureMinValidNum)
{
// kdtree输入为局部map点云
kdtreeCornerFromMap->setInputCloud(laserCloudCornerFromMapDS);
kdtreeSurfFromMap->setInputCloud(laserCloudSurfFromMapDS);
// 迭代30次
for (int iterCount = 0; iterCount < 30; iterCount++)
{
// 每次迭代清空特征点集合
laserCloudOri->clear();
coeffSel->clear();
// 当前激光帧与局部地图之间的:角点-角点 特征匹配
cornerOptimization();
// 当前激光帧与局部地图之间的:面点-面点 特征匹配
surfOptimization();
// 提取当前帧中与局部map匹配上了的角点、平面点,加入同一集合
combineOptimizationCoeffs();
// scan-to-map优化
if (LMOptimization(iterCount) == true)
break;
}
//更新当前帧位姿
transformUpdate();
} else {
ROS_WARN("Not enough features! Only %d edge and %d planar features available.", laserCloudCornerLastDSNum, laserCloudSurfLastDSNum);
}
}
5.2.1 角点特征匹配 cornerOptimization()
这部分内容相当于点-线icp,过程为遍历当前帧地图点,转到world系下,找到局部地图里和它最近的5个点:
// 遍历当前帧角点集合
#pragma omp parallel for num_threads(numberOfCores)
for (int i = 0; i < laserCloudCornerLastDSNum; i++)
{
PointType pointOri, pointSel, coeff;
std::vector<int> pointSearchInd;
std::vector<float> pointSearchSqDis;
// 角点(坐标还是lidar系)
pointOri = laserCloudCornerLastDS->points[i];
// 根据当前帧位姿,变换到世界坐标系(map系)下
pointAssociateToMap(&pointOri, &pointSel);
// 在局部角点map中查找当前角点相邻的5个角点
kdtreeCornerFromMap->nearestKSearch(pointSel, 5, pointSearchInd, pointSearchSqDis);
定义协方差矩阵,特征值,特征向量的容器:
cv::Mat matA1(3, 3, CV_32F, cv::Scalar::all(0));
cv::Mat matD1(1, 3, CV_32F, cv::Scalar::all(0));
cv::Mat matV1(3, 3, CV_32F, cv::Scalar::all(0));
计算局部地图中与当前点最近的5个点的中心点坐标:
// 要求距离都小于1m
if (pointSearchSqDis[4] < 1.0) {
// 计算5个点的均值坐标,记为中心点
float cx = 0, cy = 0, cz = 0;
for (int j = 0; j < 5; j++) {
cx += laserCloudCornerFromMapDS->points[pointSearchInd[j]].x;
cy += laserCloudCornerFromMapDS->points[pointSearchInd[j]].y;
cz += laserCloudCornerFromMapDS->points[pointSearchInd[j]].z;
}
cx /= 5; cy /= 5; cz /= 5;
构建协方差矩阵并分解:
// 计算协方差
float a11 = 0, a12 = 0, a13 = 0, a22 = 0, a23 = 0, a33 = 0;
for (int j = 0; j < 5; j++) {
// 计算点与中心点之间的距离
float ax = laserCloudCornerFromMapDS->points[pointSearchInd[j]].x - cx;
float ay = laserCloudCornerFromMapDS->points[pointSearchInd[j]].y - cy;
float az = laserCloudCornerFromMapDS->points[pointSearchInd[j]].z - cz;
a11 += ax * ax; a12 += ax * ay; a13 += ax * az;
a22 += ay * ay; a23 += ay * az;
a33 += az * az;
}
a11 /= 5; a12 /= 5; a13 /= 5; a22 /= 5; a23 /= 5; a33 /= 5;
// 构建协方差矩阵
matA1.at<float>(0, 0) = a11; matA1.at<float>(0, 1) = a12; matA1.at<float>(0, 2) = a13;
matA1.at<float>(1, 0) = a12; matA1.at<float>(1, 1) = a22; matA1.at<float>(1, 2) = a23;
matA1.at<float>(2, 0) = a13; matA1.at<float>(2, 1) = a23; matA1.at<float>(2, 2) = a33;
// 特征值分解
cv::eigen(matA1, matD1, matV1);
判断局部地图里的这5个点能否组成一条直线:
// 如果最大的特征值相比次大特征值,大很多,认为构成了线,角点是合格的
if (matD1.at<float>(0, 0) > 3 * matD1.at<float>(0, 1)) {
// 当前帧角点坐标(map系下)
float x0 = pointSel.x;
float y0 = pointSel.y;
float z0 = pointSel.z;
// 局部map对应中心角点,沿着特征向量(直线方向)方向,前后各取一个点
float x1 = cx + 0.1 * matV1.at<float>(0, 0);
float y1 = cy + 0.1 * matV1.at<float>(0, 1);
float z1 = cz + 0.1 * matV1.at<float>(0, 2);
float x2 = cx - 0.1 * matV1.at<float>(0, 0);
float y2 = cy - 0.1 * matV1.at<float>(0, 1);
float z2 = cz - 0.1 * matV1.at<float>(0, 2);
计算当前帧的当前点与这条直线之间的距离:
// area_012,也就是三个点组成的三角形面积*2,叉积的模|axb|=a*b*sin(theta)
float a012 = sqrt(((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) * ((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1))
+ ((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1)) * ((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1))
+ ((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1)) * ((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1)));
// line_12,底边边长
float l12 = sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
// 两次叉积,得到点到直线的垂线段单位向量,x分量,下面同理
float la = ((y1 - y2)*((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1))
+ (z1 - z2)*((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1))) / a012 / l12;
float lb = -((x1 - x2)*((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1))
- (z1 - z2)*((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1))) / a012 / l12;
float lc = -((x1 - x2)*((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1))
+ (y1 - y2)*((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1))) / a012 / l12;
// 三角形的高,也就是点到直线距离
float ld2 = a012 / l12;
计算该点在优化中占比的权重,和它与直线的距离有关:
// 距离越大,s越小,是个距离惩罚因子(权重)
float s = 1 - 0.9 * fabs(ld2);
把这个权重乘到法向量和距离上,并保存下来,用于一会进行的优化:
// 点到直线的垂线段单位向量
coeff.x = s * la;
coeff.y = s * lb;
coeff.z = s * lc;
// 点到直线距离
coeff.intensity = s * ld2;
if (s > 0.1) {
// 当前激光帧角点,加入匹配集合中
laserCloudOriCornerVec[i] = pointOri;
// 角点的参数
coeffSelCornerVec[i] = coeff;
laserCloudOriCornerFlag[i] = true;
}
}
}
}
}
5.2.2 平面特征匹配 surfOptimization()
这部分内容相当于点-面icp,过程为遍历当前帧地图点,转到world系下,找到局部地图里和它最近的5个点:
// 遍历当前帧平面点集合
#pragma omp parallel for num_threads(numberOfCores)
for (int i = 0; i < laserCloudSurfLastDSNum; i++)
{
PointType pointOri, pointSel, coeff;
std::vector<int> pointSearchInd;
std::vector<float> pointSearchSqDis;
// 平面点(坐标还是lidar系)
pointOri = laserCloudSurfLastDS->points[i];
// 根据当前帧位姿,变换到世界坐标系(map系)下
pointAssociateToMap(&pointOri, &pointSel);
// 在局部平面点map中查找当前平面点相邻的5个平面点
kdtreeSurfFromMap->nearestKSearch(pointSel, 5, pointSearchInd, pointSearchSqDis);
判断局部地图这5个点能否构成一个平面:
Eigen::Matrix<float, 5, 3> matA0;
Eigen::Matrix<float, 5, 1> matB0;
Eigen::Vector3f matX0;
matA0.setZero();
matB0.fill(-1);
matX0.setZero();
// 要求距离都小于1m
if (pointSearchSqDis[4] < 1.0) {
for (int j = 0; j < 5; j++) {
matA0(j, 0) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].x;
matA0(j, 1) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].y;
matA0(j, 2) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].z;
}
// 假设平面方程为ax+by+cz+1=0,这里就是求方程的系数abc,d=1
matX0 = matA0.colPivHouseholderQr().solve(matB0);
// 平面方程的系数,也是法向量的分量
float pa = matX0(0, 0);
float pb = matX0(1, 0);
float pc = matX0(2, 0);
float pd = 1;
// 单位法向量
float ps = sqrt(pa * pa + pb * pb + pc * pc);
pa /= ps; pb /= ps; pc /= ps; pd /= ps;
// 检查平面是否合格,如果5个点中有点到平面的距离超过0.2m,那么认为这些点太分散了,不构成平面
bool planeValid = true;
for (int j = 0; j < 5; j++) {
if (fabs(pa * laserCloudSurfFromMapDS->points[pointSearchInd[j]].x +
pb * laserCloudSurfFromMapDS->points[pointSearchInd[j]].y +
pc * laserCloudSurfFromMapDS->points[pointSearchInd[j]].z + pd) > 0.2) {
planeValid = false;
break;
}
}
计算当前帧的当前点到该平面的距离,法向量,并乘以一个权重,权重与距离是负相关,然后把相关数据保存下来,用于之后的优化:
// 平面合格
if (planeValid) {
// 当前激光帧点到平面距离
float pd2 = pa * pointSel.x + pb * pointSel.y + pc * pointSel.z + pd;
float s = 1 - 0.9 * fabs(pd2) / sqrt(sqrt(pointSel.x * pointSel.x
+ pointSel.y * pointSel.y + pointSel.z * pointSel.z));
// 点到平面垂线单位法向量(其实等价于平面法向量)
coeff.x = s * pa;
coeff.y = s * pb;
coeff.z = s * pc;
// 点到平面距离
coeff.intensity = s * pd2;
if (s > 0.1) {
// 当前激光帧平面点,加入匹配集合中
laserCloudOriSurfVec[i] = pointOri;
// 参数
coeffSelSurfVec[i] = coeff;
laserCloudOriSurfFlag[i] = true;
}
}
}
}
}
我自己的实验认为在室内环境下点面icp的效果要比点线icp要好,可能城市环境也是这个结论,但是荒郊野外就不知道哪个更好了。
上面的几个参数,如果你的初始位姿比较准的话,可以设的严一点,否则最好松一些,不然匹配点对数可能就不够了。或者这几个参数不写死,前几个循环设的松一点,后几个循环再严一点。
5.2.3 优化前的预处理combineOptimizationCoeffs()
就是把刚刚点面icp和点线icp求得的各个点的带权重的距离和法向量从各自容器放到一个统一的容器里:
void combineOptimizationCoeffs()
{
// 遍历当前帧角点集合,提取出与局部map匹配上了的角点
for (int i = 0; i < laserCloudCornerLastDSNum; ++i){
if (laserCloudOriCornerFlag[i] == true){
laserCloudOri->push_back(laserCloudOriCornerVec[i]);
coeffSel->push_back(coeffSelCornerVec[i]);
}
}
// 遍历当前帧平面点集合,提取出与局部map匹配上了的平面点
for (int i = 0; i < laserCloudSurfLastDSNum; ++i){
if (laserCloudOriSurfFlag[i] == true){
laserCloudOri->push_back(laserCloudOriSurfVec[i]);
coeffSel->push_back(coeffSelSurfVec[i]);
}
}
// 清空标记
std::fill(laserCloudOriCornerFlag.begin(), laserCloudOriCornerFlag.end(), false);
std::fill(laserCloudOriSurfFlag.begin(), laserCloudOriSurfFlag.end(), false);
}
5.2.4 优化! LMOptimization()
首先是因为历史原因需要进行一波坐标转换:
bool LMOptimization(int iterCount)
{
// This optimization is from the original loam_velodyne by Ji Zhang, need to cope with coordinate transformation
// lidar <- camera --- camera <- lidar
// x = z --- x = y
// y = x --- y = z
// z = y --- z = x
// roll = yaw --- roll = pitch
// pitch = roll --- pitch = yaw
// yaw = pitch --- yaw = roll
// lidar -> camera
float srx = sin(transformTobeMapped[1]); //pitch
float crx = cos(transformTobeMapped[1]);
float sry = sin(transformTobeMapped[2]); //yaw
float cry = cos(transformTobeMapped[2]);
float srz = sin(transformTobeMapped[0]); //roll
float crz = cos(transformTobeMapped[0]);
刚刚点线icp和点面icp一共的匹配点对数得超过一定的数量才行,越多越准确:
// 当前帧匹配特征点数太少
int laserCloudSelNum = laserCloudOri->size();
if (laserCloudSelNum < 50) {
return false;
}
接下来是求解高斯牛顿的增量方程求出位姿的增量,这里我并没有去研究公式是怎么推导的,但是你看matA的行数是匹配的点对数,列对应的是位姿的维度数,再看matAt,明显是matA的转置,这个matA是不是很像J矩阵了?matB对应着残差,matX就是要求解的位姿的增量了。
cv::solve(matAtA, matAtB, matX, cv::DECOMP_QR)对应着J^T·J·delta_x = -J^T·f 高斯牛顿方程。
cv::Mat matA(laserCloudSelNum, 6, CV_32F, cv::Scalar::all(0));
cv::Mat matAt(6, laserCloudSelNum, CV_32F, cv::Scalar::all(0));
cv::Mat matAtA(6, 6, CV_32F, cv::Scalar::all(0));
cv::Mat matB(laserCloudSelNum, 1, CV_32F, cv::Scalar::all(0));
cv::Mat matAtB(6, 1, CV_32F, cv::Scalar::all(0));
cv::Mat matX(6, 1, CV_32F, cv::Scalar::all(0));
PointType pointOri, coeff;
// 遍历匹配特征点,构建Jacobian矩阵
for (int i = 0; i < laserCloudSelNum; i++) {
// lidar -> camera todo
pointOri.x = laserCloudOri->points[i].y;
pointOri.y = laserCloudOri->points[i].z;
pointOri.z = laserCloudOri->points[i].x;
// lidar -> camera
coeff.x = coeffSel->points[i].y;
coeff.y = coeffSel->points[i].z;
coeff.z = coeffSel->points[i].x;
coeff.intensity = coeffSel->points[i].intensity;
// in camera
float arx = (crx*sry*srz*pointOri.x + crx*crz*sry*pointOri.y - srx*sry*pointOri.z) * coeff.x
+ (-srx*srz*pointOri.x - crz*srx*pointOri.y - crx*pointOri.z) * coeff.y
+ (crx*cry*srz*pointOri.x + crx*cry*crz*pointOri.y - cry*srx*pointOri.z) * coeff.z;
float ary = ((cry*srx*srz - crz*sry)*pointOri.x
+ (sry*srz + cry*crz*srx)*pointOri.y + crx*cry*pointOri.z) * coeff.x
+ ((-cry*crz - srx*sry*srz)*pointOri.x
+ (cry*srz - crz*srx*sry)*pointOri.y - crx*sry*pointOri.z) * coeff.z;
float arz = ((crz*srx*sry - cry*srz)*pointOri.x + (-cry*crz-srx*sry*srz)*pointOri.y)*coeff.x
+ (crx*crz*pointOri.x - crx*srz*pointOri.y) * coeff.y
+ ((sry*srz + cry*crz*srx)*pointOri.x + (crz*sry-cry*srx*srz)*pointOri.y)*coeff.z;
// lidar -> camera
matA.at<float>(i, 0) = arz;
matA.at<float>(i, 1) = arx;
matA.at<float>(i, 2) = ary;
matA.at<float>(i, 3) = coeff.z;
matA.at<float>(i, 4) = coeff.x;
matA.at<float>(i, 5) = coeff.y;
// 点到直线距离、平面距离,作为观测值
matB.at<float>(i, 0) = -coeff.intensity;
}
cv::transpose(matA, matAt);
matAtA = matAt * matA;
matAtB = matAt * matB;
// J^T·J·delta_x = -J^T·f 高斯牛顿
cv::solve(matAtA, matAtB, matX, cv::DECOMP_QR);
接着是对JTJ矩阵的退化检查,测试发现matE第一行这六个数是依次变小的,如果匹配的好的话这几个数都会很大,甚至比100都大很多,如果跑到后面匹配的越来越差的时候,就会变小了,退化就会经常出现了。
// 首次迭代,检查近似Hessian矩阵(J^T·J)是否退化,或者称为奇异,行列式值=0 todo
if (iterCount == 0) {
cv::Mat matE(1, 6, CV_32F, cv::Scalar::all(0));
cv::Mat matV(6, 6, CV_32F, cv::Scalar::all(0));
cv::Mat matV2(6, 6, CV_32F, cv::Scalar::all(0));
cv::eigen(matAtA, matE, matV);
matV.copyTo(matV2);
isDegenerate = false;
float eignThre[6] = {100, 100, 100, 100, 100, 100};
for (int i = 5; i >= 0; i--) {
if (matE.at<float>(0, i) < eignThre[i]) {
for (int j = 0; j < 6; j++) {
matV2.at<float>(i, j) = 0;
}
isDegenerate = true;
} else {
break;
}
}
matP = matV.inv() * matV2;
}
if (isDegenerate)
{
cv::Mat matX2(6, 1, CV_32F, cv::Scalar::all(0));
matX.copyTo(matX2);
matX = matP * matX2;
}
把求得的位姿增量加到当前的位姿上,实现当前轮次对位姿的修正:
// 更新当前位姿 x = x + delta_x
transformTobeMapped[0] += matX.at<float>(0, 0);
transformTobeMapped[1] += matX.at<float>(1, 0);
transformTobeMapped[2] += matX.at<float>(2, 0);
transformTobeMapped[3] += matX.at<float>(3, 0);
transformTobeMapped[4] += matX.at<float>(4, 0);
transformTobeMapped[5] += matX.at<float>(5, 0);
计算俩阈值,判断是否满足停止循环的条件:
float deltaR = sqrt(
pow(pcl::rad2deg(matX.at<float>(0, 0)), 2) +
pow(pcl::rad2deg(matX.at<float>(1, 0)), 2) +
pow(pcl::rad2deg(matX.at<float>(2, 0)), 2));
float deltaT = sqrt(
pow(matX.at<float>(3, 0) * 100, 2) +
pow(matX.at<float>(4, 0) * 100, 2) +
pow(matX.at<float>(5, 0) * 100, 2));
// delta_x很小,认为收敛
if (deltaR < 0.05 && deltaT < 0.05) {
return true;
}
return false;
}
5.2.5 利用IMU调整位姿transformUpdate()
在完成了scan2map迭代优化位姿之后,作者加了一个把这个位姿和IMU数据融合的过程。可能是因为他的IMU比较强把。这部分是对上一步优化结果的一个限定。
void transformUpdate()
{
if (cloudInfo.imuAvailable == true)
{
// 俯仰角小于1.4
if (std::abs(cloudInfo.imuPitchInit) < 1.4)
{
double imuWeight = imuRPYWeight;
tf::Quaternion imuQuaternion;
tf::Quaternion transformQuaternion;
double rollMid, pitchMid, yawMid;
// roll角求加权均值,用scan-to-map优化得到的位姿与imu原始RPY数据,进行加权平均
transformQuaternion.setRPY(transformTobeMapped[0], 0, 0);
imuQuaternion.setRPY(cloudInfo.imuRollInit, 0, 0);
tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid);
transformTobeMapped[0] = rollMid;
// pitch角求加权均值,用scan-to-map优化得到的位姿与imu原始RPY数据,进行加权平均
transformQuaternion.setRPY(0, transformTobeMapped[1], 0);
imuQuaternion.setRPY(0, cloudInfo.imuPitchInit, 0);
tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid);
transformTobeMapped[1] = pitchMid;
}
}
// 更新当前帧位姿的roll, pitch, z坐标;因为是小车,roll、pitch是相对稳定的,不会有很大变动,一定程度上可以信赖imu的数据,z是进行高度约束
transformTobeMapped[0] = constraintTransformation(transformTobeMapped[0], rotation_tollerance);
transformTobeMapped[1] = constraintTransformation(transformTobeMapped[1], rotation_tollerance);
transformTobeMapped[5] = constraintTransformation(transformTobeMapped[5], z_tollerance);
// 当前帧位姿
incrementalOdometryAffineBack = trans2Affine3f(transformTobeMapped);
}
5.3 回环检测loopClosureThread()
在main()里,还开了一个回环检测线程,不同于V-SLAM可以计算特征和描述子来构建词袋,lidar-SLAM回环检测只能依赖位置关系进行回环判断:
void loopClosureThread()
{
if (loopClosureEnableFlag == false)
return;
ros::Rate rate(loopClosureFrequency);
while (ros::ok())
{
rate.sleep();
// 闭环scan-to-map,icp优化位姿
performLoopClosure();
// rviz展示闭环边
visualizeLoopClosure();
}
}
5.3.1 回环检测和回环矫正performLoopClosure()
第一步,根据位姿关系找到哪些帧可能是回环帧。首先距离得近对吧,由于是360°的lidar不用考虑朝向,如果是深度相机的话需要增加朝向的判断;然后回环帧的时间越早越好对吧,因为越早的帧位姿误差越小,回环矫正的程度越大。
if (cloudKeyPoses3D->points.empty() == true)
return;
mtx.lock();
*copy_cloudKeyPoses3D = *cloudKeyPoses3D;
*copy_cloudKeyPoses6D = *cloudKeyPoses6D;
mtx.unlock();
// 当前关键帧索引,候选闭环匹配帧索引
int loopKeyCur;
int loopKeyPre;
// not-used
if (detectLoopClosureExternal(&loopKeyCur, &loopKeyPre) == false)
// 在历史关键帧中查找与当前关键帧距离最近的关键帧集合,选择时间相隔较远的一帧作为候选闭环帧
if (detectLoopClosureDistance(&loopKeyCur, &loopKeyPre) == false)
return;
detectLoopClosureExternal()是用来处理来自外部订阅的回环关系的一对帧的id。
detectLoopClosureDistance()是自己实现的基于位姿远近和时间差大小判断回环关系的代码:
bool detectLoopClosureDistance(int *latestID, int *closestID)
{
// 当前关键帧帧
int loopKeyCur = copy_cloudKeyPoses3D->size() - 1;
int loopKeyPre = -1;
// 当前帧已经添加过闭环对应关系,不再继续添加
auto it = loopIndexContainer.find(loopKeyCur);
if (it != loopIndexContainer.end())
return false;
//在历史关键帧中查找与当前关键帧距离最近的关键帧集合,historyKeyframeSearchRadius make sure not far
std::vector<int> pointSearchIndLoop;
std::vector<float> pointSearchSqDisLoop;
kdtreeHistoryKeyPoses->setInputCloud(copy_cloudKeyPoses3D);
kdtreeHistoryKeyPoses->radiusSearch(copy_cloudKeyPoses3D->back(), historyKeyframeSearchRadius, pointSearchIndLoop, pointSearchSqDisLoop, 0);
// 在候选关键帧集合中,找到与当前帧时间相隔较远的帧,设为候选匹配帧
for (int i = 0; i < (int)pointSearchIndLoop.size(); ++i)
{
int id = pointSearchIndLoop[i];
if (abs(copy_cloudKeyPoses6D->points[id].time - timeLaserInfoCur) > historyKeyframeSearchTimeDiff)
{
loopKeyPre = id;
break;
}
}
if (loopKeyPre == -1 || loopKeyCur == loopKeyPre)
return false;
*latestID = loopKeyCur;
*closestID = loopKeyPre;
return true;
}
第二步,当找到两个回环帧分别的id之后,那么就可以把分别的点云找出来:
// 提取
pcl::PointCloud<PointType>::Ptr cureKeyframeCloud(new pcl::PointCloud<PointType>());
pcl::PointCloud<PointType>::Ptr prevKeyframeCloud(new pcl::PointCloud<PointType>());
{
// 提取当前关键帧特征点集合,降采样
loopFindNearKeyframes(cureKeyframeCloud, loopKeyCur, 0);
// 提取闭环匹配关键帧前后相邻若干帧的关键帧特征点集合,降采样
loopFindNearKeyframes(prevKeyframeCloud, loopKeyPre, historyKeyframeSearchNum);
// 如果特征点较少,返回
if (cureKeyframeCloud->size() < 300 || prevKeyframeCloud->size() < 1000)
return;
// 发布闭环匹配关键帧局部map
if (pubHistoryKeyFrames.getNumSubscribers() != 0)
publishCloud(&pubHistoryKeyFrames, prevKeyframeCloud, timeLaserInfoStamp, odometryFrame);
}
当然了,不能分别只找一帧的点云,因为可能视角不够,点云数不够,导致包含的特征不够丰富,从而共视特征不足,这会导致匹配失败,所以就分别多找几帧相邻帧的点云来丰富点云,在loopFindNearKeyframes()中实现:
void loopFindNearKeyframes(pcl::PointCloud<PointType>::Ptr& nearKeyframes, const int& key, const int& searchNum)
{
// 提取key索引的关键帧前后相邻若干帧的关键帧特征点集合
nearKeyframes->clear();
int cloudSize = copy_cloudKeyPoses6D->size();
for (int i = -searchNum; i <= searchNum; ++i)
{
int keyNear = key + i;
if (keyNear < 0 || keyNear >= cloudSize )
continue;
*nearKeyframes += *transformPointCloud(cornerCloudKeyFrames[keyNear], ©_cloudKeyPoses6D->points[keyNear]);
*nearKeyframes += *transformPointCloud(surfCloudKeyFrames[keyNear], ©_cloudKeyPoses6D->points[keyNear]);
}
if (nearKeyframes->empty())
return;
// 降采样
pcl::PointCloud<PointType>::Ptr cloud_temp(new pcl::PointCloud<PointType>());
downSizeFilterICP.setInputCloud(nearKeyframes);
downSizeFilterICP.filter(*cloud_temp);
*nearKeyframes = *cloud_temp;
}
第三步,回环帧分别的点云找出来之后,就可以做icp匹配了,这里作者匹配的方式和scan2map不一样了,他直接使用了pcl原生的点点icp匹配的方式:
// ICP参数设置
static pcl::IterativeClosestPoint<PointType, PointType> icp;
icp.setMaxCorrespondenceDistance(historyKeyframeSearchRadius*2);
icp.setMaximumIterations(100);
icp.setTransformationEpsilon(1e-6);
icp.setEuclideanFitnessEpsilon(1e-6);
icp.setRANSACIterations(0);
// scan-to-map,调用icp匹配
icp.setInputSource(cureKeyframeCloud);
icp.setInputTarget(prevKeyframeCloud);
pcl::PointCloud<PointType>::Ptr unused_result(new pcl::PointCloud<PointType>());
icp.align(*unused_result);
// 未收敛,或者匹配不够好
if (icp.hasConverged() == false || icp.getFitnessScore() > historyKeyframeFitnessScore)
return;
// 发布当前关键帧经过闭环优化后的位姿变换之后的特征点云
if (pubIcpKeyFrames.getNumSubscribers() != 0)
{
pcl::PointCloud<PointType>::Ptr closed_cloud(new pcl::PointCloud<PointType>());
pcl::transformPointCloud(*cureKeyframeCloud, *closed_cloud, icp.getFinalTransformation());
publishCloud(&pubIcpKeyFrames, closed_cloud, timeLaserInfoStamp, odometryFrame);
}
第四步,如果icp匹配成功了话,那么就更新位姿,并记录误差:
// 闭环优化得到的当前关键帧与闭环关键帧之间的位姿变换
float x, y, z, roll, pitch, yaw;
Eigen::Affine3f correctionLidarFrame;
correctionLidarFrame = icp.getFinalTransformation();
// 闭环优化前当前帧位姿
Eigen::Affine3f tWrong = pclPointToAffine3f(copy_cloudKeyPoses6D->points[loopKeyCur]);
// 闭环优化后当前帧位姿
Eigen::Affine3f tCorrect = correctionLidarFrame * tWrong;
pcl::getTranslationAndEulerAngles (tCorrect, x, y, z, roll, pitch, yaw);
gtsam::Pose3 poseFrom = Pose3(Rot3::RzRyRx(roll, pitch, yaw), Point3(x, y, z));
// 闭环匹配帧的位姿
gtsam::Pose3 poseTo = pclPointTogtsamPose3(copy_cloudKeyPoses6D->points[loopKeyPre]);
gtsam::Vector Vector6(6);
float noiseScore = icp.getFitnessScore();
Vector6 <<
noiseScore, noiseScore, noiseScore, noiseScore, noiseScore, noiseScore;
noiseModel::Diagonal::shared_ptr constraintNoise = noiseModel::Diagonal::Variances(Vector6);
第五步,把回环帧id和位姿加到buffer里,下一轮图优化的时候加进入,同时把回环帧id加到字典里去:
// 添加闭环因子需要的数据
mtx.lock();
loopIndexQueue.push_back(make_pair(loopKeyCur, loopKeyPre));
loopPoseQueue.push_back(poseFrom.between(poseTo));
loopNoiseQueue.push_back(constraintNoise);
mtx.unlock();
loopIndexContainer[loopKeyCur] = loopKeyPre;
5.3.2 回环关系的可视化visualizeLoopClosure()
略。就是把回环帧位姿的地方画上几个大点,回环帧之间用线连起来给rviz看。
5.4 可视化visualizeGlobalMapThread()
干了两个事,一个是向某个topic发点云,另一个是把点云地图保存成文件。
5.4.1 地图的发布publishGlobalMap()
第一步,把最新帧附近的点云找出来:
// kdtree查找最近一帧关键帧相邻的关键帧集合
std::vector<int> pointSearchIndGlobalMap;
std::vector<float> pointSearchSqDisGlobalMap;
mtx.lock();
kdtreeGlobalMap->setInputCloud(cloudKeyPoses3D);
kdtreeGlobalMap->radiusSearch(cloudKeyPoses3D->back(), globalMapVisualizationSearchRadius, pointSearchIndGlobalMap, pointSearchSqDisGlobalMap, 0);
mtx.unlock();
for (int i = 0; i < (int)pointSearchIndGlobalMap.size(); ++i)
globalMapKeyPoses->push_back(cloudKeyPoses3D->points[pointSearchIndGlobalMap[i]]);
// 降采样
pcl::VoxelGrid<PointType> downSizeFilterGlobalMapKeyPoses;
downSizeFilterGlobalMapKeyPoses.setLeafSize(globalMapVisualizationPoseDensity, globalMapVisualizationPoseDensity, globalMapVisualizationPoseDensity); // for global map visualization
downSizeFilterGlobalMapKeyPoses.setInputCloud(globalMapKeyPoses);
downSizeFilterGlobalMapKeyPoses.filter(*globalMapKeyPosesDS);
第二步,由于这些点云都是保存在各自的lidar坐标系下, 所以得根据各自的位姿转换到world系下:
// 提取局部相邻关键帧对应的特征点云
for (int i = 0; i < (int)globalMapKeyPosesDS->size(); ++i){
// 距离过大
if (pointDistance(globalMapKeyPosesDS->points[i], cloudKeyPoses3D->back()) > globalMapVisualizationSearchRadius)
continue;
int thisKeyInd = (int)globalMapKeyPosesDS->points[i].intensity;
*globalMapKeyFrames += *transformPointCloud(cornerCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]);
*globalMapKeyFrames += *transformPointCloud(surfCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]);
}
第三步,发布:
// 降采样,发布
pcl::VoxelGrid<PointType> downSizeFilterGlobalMapKeyFrames; // for global map visualization
downSizeFilterGlobalMapKeyFrames.setLeafSize(globalMapVisualizationLeafSize, globalMapVisualizationLeafSize, globalMapVisualizationLeafSize); // for global map visualization
downSizeFilterGlobalMapKeyFrames.setInputCloud(globalMapKeyFrames);
downSizeFilterGlobalMapKeyFrames.filter(*globalMapKeyFramesDS);
publishCloud(&pubLaserCloudSurround, globalMapKeyFramesDS, timeLaserInfoStamp, odometryFrame);
}
5.4.2 地图的保存saveMapService()
第一步,创建路径:
if(req.destination.empty()) saveMapDirectory = std::getenv("HOME") + savePCDDirectory;
else saveMapDirectory = std::getenv("HOME") + req.destination;
int unused = system((std::string("exec rm -r ") + saveMapDirectory).c_str());
unused = system((std::string("mkdir -p ") + saveMapDirectory).c_str());
第二步,保存path:
// 保存历史关键帧位姿
pcl::io::savePCDFileBinary(saveMapDirectory + "/trajectory.pcd", *cloudKeyPoses3D);
pcl::io::savePCDFileBinary(saveMapDirectory + "/transformations.pcd", *cloudKeyPoses6D);
第三步,全局地图从各自的lidar坐标系下转换到world系下:
// 提取历史关键帧角点、平面点集合
pcl::PointCloud<PointType>::Ptr globalCornerCloud(new pcl::PointCloud<PointType>());
pcl::PointCloud<PointType>::Ptr globalCornerCloudDS(new pcl::PointCloud<PointType>());
pcl::PointCloud<PointType>::Ptr globalSurfCloud(new pcl::PointCloud<PointType>());
pcl::PointCloud<PointType>::Ptr globalSurfCloudDS(new pcl::PointCloud<PointType>());
pcl::PointCloud<PointType>::Ptr globalMapCloud(new pcl::PointCloud<PointType>());
for (int i = 0; i < (int)cloudKeyPoses3D->size(); i++) {
*globalCornerCloud += *transformPointCloud(cornerCloudKeyFrames[i], &cloudKeyPoses6D->points[i]);
*globalSurfCloud += *transformPointCloud(surfCloudKeyFrames[i], &cloudKeyPoses6D->points[i]);
}
第四步,分别保存角点点云全局地图和平面点点云全局地图:
if(req.resolution != 0)
{
// 降采样
downSizeFilterCorner.setInputCloud(globalCornerCloud);
downSizeFilterCorner.setLeafSize(req.resolution, req.resolution, req.resolution);
downSizeFilterCorner.filter(*globalCornerCloudDS);
pcl::io::savePCDFileBinary(saveMapDirectory + "/CornerMap.pcd", *globalCornerCloudDS);
// 降采样
downSizeFilterSurf.setInputCloud(globalSurfCloud);
downSizeFilterSurf.setLeafSize(req.resolution, req.resolution, req.resolution);
downSizeFilterSurf.filter(*globalSurfCloudDS);
pcl::io::savePCDFileBinary(saveMapDirectory + "/SurfMap.pcd", *globalSurfCloudDS);
}
else
{
pcl::io::savePCDFileBinary(saveMapDirectory + "/CornerMap.pcd", *globalCornerCloud);
pcl::io::savePCDFileBinary(saveMapDirectory + "/SurfMap.pcd", *globalSurfCloud);
}
第五步,保存总的全局地图:
// 保存到一起,全局关键帧特征点集合
*globalMapCloud += *globalCornerCloud;
*globalMapCloud += *globalSurfCloud;
int ret = pcl::io::savePCDFileBinary(saveMapDirectory + "/GlobalMap.pcd", *globalMapCloud);
res.success = ret == 0;
downSizeFilterCorner.setLeafSize(mappingCornerLeafSize, mappingCornerLeafSize, mappingCornerLeafSize);
downSizeFilterSurf.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize);
return true;
}
完结。