上一步已经找到了初始的两帧,并且得到对应的匹配,那下一步就根据匹配点计算相应的单应矩阵和基础矩阵。
具体计算就是随机选取8个匹配点,启动两个线程分别计算单应矩阵和基础矩阵。具体计算两个矩阵就不在叙述,我在代码中添加了比较详细的注释。
计算单应矩阵
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 | cv::Mat Initializer::calcHFromMatches(const std::vector<cv::Point2f> &points_ref, const std::vector<cv::Point2f> &points_cur)
{
/**
* x = H y ,则对向量 x和Hy 进行叉乘为0,即:
*
* | 0 -1 v2| |a b c| |u1| |0|
* | 1 0 -u2| * |d e f| * |v1| = |0|
* |-v2 u2 0| |g h 1| |1 | |0|
*
* 矩阵化简得:
*
* (-d+v2*g)*u1 + (-e+v2*h)*v1 + -f+v2 |0|
* (a-u2*g)*u1 + (b-u2*h)*v1 + c-u2 = |0|
* (-v2*a+u2*d)*u1 + (-v2*b+u2*e)*v1 + -v2*c+u2*f |0|
*
* 0*a + 0*b + 0*c - u1*d - v1*e - 1*f + v2*u1*g + v2*v1*h + v2 = 0
* u1*a+v1*b + 1*c + 0*d + 0*e +0*f - u2*u1*g - u2*v1*h - u2 = 0
* -v2*u1*a -v2*v1*b -v2*c +u2*u1*d +u2*v1*e +u2*f +0*g +0*h + 0 = 0
*/
const int N = points_ref.size();
cv::Mat A(2 * N, 9, CV_32F);
for (int i = 0; i < N; i++)
{
const float u1 = points_ref[i].x;
const float v1 = points_ref[i].y;
const float u2 = points_cur[i].x;
const float v2 = points_cur[i].y;
A.at<float>(2 * i, 0) = 0.0;
A.at<float>(2 * i, 1) = 0.0;
A.at<float>(2 * i, 2) = 0.0;
A.at<float>(2 * i, 3) = -u1;
A.at<float>(2 * i, 4) = -v1;
A.at<float>(2 * i, 5) = -1;
A.at<float>(2 * i, 6) = v2*u1;
A.at<float>(2 * i, 7) = v2*v1;
A.at<float>(2 * i, 8) = v2;
A.at<float>(2 * i + 1, 0) = u1;
A.at<float>(2 * i + 1, 1) = v1;
A.at<float>(2 * i + 1, 2) = 1;
A.at<float>(2 * i + 1, 3) = 0.0;
A.at<float>(2 * i + 1, 4) = 0.0;
A.at<float>(2 * i + 1, 5) = 0.0;
A.at<float>(2 * i + 1, 6) = -u2*u1;
A.at<float>(2 * i + 1, 7) = -u2*v1;
A.at<float>(2 * i + 1, 8) = -u2;
}
cv::Mat u, w, vt;
// 通过svd进行最小二乘求解
cv::SVDecomp(A, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV);
return vt.row(8).reshape(0, 3);
}
|
计算基础矩阵
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | cv::Mat Initializer::calcFFromMatches(const std::vector<cv::Point2f> &points_ref, const std::vector<cv::Point2f> &points_cur)
{
/**
* 构建基础矩阵的约束方程,给定一对点对应m=(u1,v1,1)T,m'=(u2,v2,1)T
* 满足基础矩阵F m'TFm=0,令F=(f_ij),则约束方程可以化简为:
* u2u1f_11+u2v1f_12+u2f_13+v2u1f_21+v2v1f_22+v2f_23+u1f_31+v1f_32+f_33=0
* 令f = (f_11,f_12,f_13,f_21,f_22,f_23,f_31,f_32,f_33)
* 则(u2u1,u2v1,u2,v2u1,v2v1,v2,u1,v1,1)f=0;
* 这样,给定N个对应点就可以得到线性方程组Af=0
* A就是一个N*9的矩阵,由于基础矩阵是非零的,所以f是一个非零向量,即
* 线性方程组有非零解,另外基础矩阵的秩为2,重要的约束条件
*/
const int N = points_ref.size();
cv::Mat A(N, 9, CV_32F);
for (int i = 0; i < N; i++)
{
const float u1 = points_ref[i].x;
const float v1 = points_ref[i].y;
const float u2 = points_cur[i].x;
const float v2 = points_cur[i].y;
A.at<float>(i, 0) = u2*u1;
A.at<float>(i, 1) = u2*v1;
A.at<float>(i, 2) = u2;
A.at<float>(i, 3) = v2*u1;
A.at<float>(i, 4) = v2*v1;
A.at<float>(i, 5) = v2;
A.at<float>(i, 6) = u1;
A.at<float>(i, 7) = v1;
A.at<float>(i, 8) = 1;
}
cv::Mat u, w, vt;
cv::SVDecomp(A, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV);
cv::Mat Fpre = vt.row(8).reshape(0, 3);
cv::SVDecomp(Fpre, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV);
w.at<float>(2) = 0;
return u*cv::Mat::diag(w)*vt;
}
|
下一步就是如何选择,也就是到底何时采用基础矩阵,何时采用单应矩阵。这边作者将计算好的单应矩阵或者基础矩阵重新计算投影误差,具体作者设置的阈值这边我也不是很明白,把论文中这个部分贴在这边。
这样基本的位姿初始化就结束了,具体添加第二帧的代码如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 | bool Initializer::addSecondFrame(FramePtr cur_frame)
{
cur_features_ = cur_frame->features_;
if (cur_features_.size() < 100)
{
OPENSLAM_WARN << "Second image has less than 100 features. Retry in more textured environment." << std::endl;
return false;
}
// 寻找对应匹配
ORBmatcher matcher(0.9, true);
int nmatches = matcher.searchForInitialization(ref_frame_, cur_frame, prev_matched_, init_matchex_, 100);
// 检测是否有足够的匹配
if (nmatches < 100)
{
OPENSLAM_WARN << "Matching number has less than 100. " << std::endl;
return false;
}
cur_features_ = cur_frame->features_;
// 给出当前帧与参考帧的匹配
matches_ref_cur_.clear();
matches_ref_cur_.reserve(cur_features_.size());
is_matched_ref_.resize(ref_features_.size());
for (size_t i = 0, iend = init_matchex_.size(); i < iend; i++)
{
if (init_matchex_[i] >= 0)
{
matches_ref_cur_.push_back(std::make_pair(i, init_matchex_[i]));
is_matched_ref_[i] = true;
}
else
{
is_matched_ref_[i] = false;
}
}
const int N = matches_ref_cur_.size();
// 用于随机取8组数据的全部索引
std::vector<size_t> all_indices;
all_indices.reserve(N);
std::vector<size_t> available_indices;
for (int i = 0; i < N; i++)
{
all_indices.push_back(i);
}
// 生成一组8个点的数据集用于RANSAC的迭代
ransac_sets_ = std::vector< std::vector<size_t> >(max_iter_num_, std::vector<size_t>(8, 0));
// 这边随机改time(0)
srand(time(0));
for (int it = 0; it < max_iter_num_; it++)
{
available_indices = all_indices;
// 在所有数据集中选择8组
for (size_t j = 0; j < 8; j++)
{
int d = available_indices.size();
int randi = int(((double)rand() / ((double)RAND_MAX + 1.0)) * d);
int idx = available_indices[randi];
ransac_sets_[it][j] = idx;
available_indices[randi] = available_indices.back();
available_indices.pop_back();
}
}
// 启动线程用于并行计算基础矩阵和单应矩阵
std::vector<bool> matches_inliers_H, matches_inliers_F;
float SH, SF;
cv::Mat H, F;
std::thread threadH(&Initializer::findHomography, this, std::ref(matches_inliers_H), std::ref(SH), std::ref(H));
std::thread threadF(&Initializer::findFundamental, this, std::ref(matches_inliers_F), std::ref(SF), std::ref(F));
// 等待直到两个线程结束
threadH.join();
threadF.join();
// 计算scores的比值
float RH = SH / (SH + SF);
cv::Mat R_cur_ref; //当前相机的旋转,相对于上一帧,上一帧为初始帧姿态为I单位阵
cv::Mat t_cur_ref; // 当前相机的平移
std::vector<cv::Point3f> init_3d_points;
std::vector<bool> is_triangulated; // 初始化匹配当中,三角定位是否成功
cv::Mat K = cur_frame->cam_->cvK();
// 具体采用基础矩阵还是单应矩阵分解计算初始结构取决于ratio (0.40-0.45)
if (RH > 0.40)
{
if (!reconstructH(matches_inliers_H, H, K, R_cur_ref, t_cur_ref, init_3d_points, is_triangulated, 1.0, 50))
return false;
}
else
{
if (!reconstructF(matches_inliers_F, F, K, R_cur_ref, t_cur_ref, init_3d_points, is_triangulated, 1.0, 50))
return false;
}
for (size_t i = 0, iend = init_matchex_.size(); i < iend; i++)
{
if (init_matchex_[i] >= 0 && !is_triangulated[i])
{
init_matchex_[i] = -1;
nmatches--;
}
}
// 设置当前帧的位姿
cv::Mat Tcw = cv::Mat::eye(4, 4, CV_32F);
R_cur_ref.copyTo(Tcw.rowRange(0, 3).colRange(0, 3));
t_cur_ref.copyTo(Tcw.rowRange(0, 3).col(3));
cur_frame->T_f_w_ = Tcw;
return true;
} |
本文详细介绍了一种基于两帧图像匹配的视觉里程计初始化方法,包括单应矩阵与基础矩阵的计算,以及如何根据匹配点选择合适的矩阵进行位姿初始化。
4413

被折叠的 条评论
为什么被折叠?



