前文分析了如何进行特征检测匹配,接下来分析如何求解摄像机矩阵。
I:求解基础矩阵
首先介绍两个代码段,关于关键点和Point2f相互转化的函数
void KeyPointsToPoints(const vector<KeyPoint>& kps, vector<Point2f>& ps) {
ps.clear();
for (unsigned int i=0; i<kps.size(); i++) ps.push_back(kps[i].pt);
}
void PointsToKeyPoints(const vector<Point2f>& ps, vector<KeyPoint>& kps) {
kps.clear();
for (unsigned int i=0; i<ps.size(); i++) kps.push_back(KeyPoint(ps[i],1.0f));
}
这两个函数在求解基础矩阵的时候用的特别多。这里提前放着。
下面是求解每两幅图像的基础矩阵
void MultiCameraPnP::PruneMatchesBasedOnF() {
//prune the match between <_i> and all views using the Fundamental matrix to prune
//#pragma omp parallel for
//求解每两幅图像的基础矩阵
for (int _i=0; _i < imgs.size() - 1; _i++)
{
for (unsigned int _j=_i+1; _j < imgs.size(); _j++) {
int older_view = _i, working_view = _j;
GetFundamentalMat( imgpts[older_view],
imgpts[working_view],
imgpts_good[older_view],
imgpts_good[working_view],
matches_matrix[std::make_pair(older_view,working_view)]
#ifdef __SFM__DEBUG__
,imgs_orig[older_view],imgs_orig[working_view]
#endif
);
//update flip matches as well
#pragma omp critical
matches_matrix[std::make_pair(working_view,older_view)] = FlipMatches(matches_matrix[std::make_pair(older_view,working_view)]);
}
}
}
核心函数GetFundamentalMat的实现:
计算出两幅图像的基础矩阵,并优化匹配组,基本去除错误匹配
Mat GetFundamentalMat(const vector<KeyPoint>& imgpts1,
const vector<KeyPoint>& imgpts2,
vector<KeyPoint>& imgpts1_good,
vector<KeyPoint>& imgpts2_good,
vector<DMatch>& matches
#ifdef __SFM__DEBUG__
,const Mat& img_1,
const Mat& img_2
#endif
)
{
//Try to eliminate keypoints based on the fundamental matrix
//(although this is not the proper way to do this)
vector<uchar> status(imgpts1.size());
#ifdef __SFM__DEBUG__
std::vector< DMatch > good_matches_;
std::vector<KeyPoint> keypoints_1, keypoints_2;
#endif
// undistortPoints(imgpts1, imgpts1, cam_matrix, distortion_coeff);
// undistortPoints(imgpts2, imgpts2, cam_matrix, distortion_coeff);
//
imgpts1_good.clear(); imgpts2_good.clear();
vector<KeyPoint> imgpts1_tmp;
vector<KeyPoint> imgpts2_tmp;
if (matches.size() <= 0) {
//points already aligned...
imgpts1_tmp = imgpts1;
imgpts2_tmp = imgpts2;
} else {
GetAlignedPointsFromMatch(imgpts1, imgpts2, matches, imgpts1_tmp, imgpts2_tmp);//选取特征点集中匹配的特征点
}
Mat F;
{
vector<Point2f> pts1,pts2;
KeyPointsToPoints(imgpts1_tmp, pts1);//特征点转化成Point2f点
KeyPointsToPoints(imgpts2_tmp, pts2);
#ifdef __SFM__DEBUG__
cout << "pts1 " << pts1.size() << " (orig pts " << imgpts1_tmp.size() << ")" << endl;
cout << "pts2 " << pts2.size() << " (orig pts " << imgpts2_tmp.size() << ")" << endl;
#endif
double minVal,maxVal;
cv::minMaxIdx(pts1,&minVal,&maxVal);
F = findFundamentalMat(pts1, pts2, FM_RANSAC, 0.006 * maxVal, 0.99, status); //threshold from [Snavely07 4.1]//核心函数,计算F,并得到优化的匹配组模板status
}
vector<DMatch> new_matches;
cout << "F keeping " << countNonZero(status) <<