https://www.3dgep.com/understanding-quaternions/
// SIFTCalibration.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
#include<iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d/nonfree.hpp>
using namespace std;
using namespace cv;
void main() {
cv::Mat source_1 = cv::imread("D:\\python_opencv\\source_image\\1123\\1\\1.jpg");
cv::Mat source_2 = cv::imread("D:\\python_opencv\\source_image\\1123\\1\\2.jpg");
cv::Mat source_1_part(source_1, Rect(100, 100, 150, 150));
cv::Mat source_2_part(source_2, Rect(100, 100, 150, 150));
cout << "source_1_part size:" << source_1.size() << endl;
//cv::imshow("source_1", source_1);
//cv::imshow("source_2", source_2);
cout << source_1.size() << endl;
vector<KeyPoint> key_points_1, key_points_2;
cv::Mat descriptors_1, descriptors_2,descriptors_3,descriptors_4;
cv::Mat imgShow1, imgShow2;
cv::Ptr<Feature2D> sift = xfeatures2d::SIFT::create(0, 3, 0.04, 10);
double start = double(getTickCount());
sift->detectAndCompute(source_1_part, noArray(), key_points_1, descriptors_1);
double duration_ms = (double(getTickCount()) - start) * 1000 / getTickFrequency();//计时
std::cout << "It took " << duration_ms << " ms to detect features in pic1 using SIFT." << std::endl;
std::cout << key_points_1.size() << " keypoints are detected in pic1." << std::endl;
start = double(getTickCount());
sift->detectAndCompute(source_2_part, noArray(), key_points_2, descriptors_2);
duration_ms = (double(getTickCount()) - start) * 1000 / getTickFrequency();//计时
std::cout << "It took " << duration_ms << " ms to detect features in pic2 using SIFT." << std::endl;
cv::drawKeypoints(source_1, key_points_1, imgShow1, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
cv::drawKeypoints(source_2, key_points_2, imgShow2, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
std::cout << key_points_2.size() << " keypoints are detected in pic2." << std::endl;
std::cout << "key_points_1 size:" << key_points_1.size() << endl;
std::cout << "key_points_2 size:" << key_points_2.size() << endl;
std::cout << "descriptors_1 type" << descriptors_1.type() << endl;
std::cout << "descriptors1 size:" << descriptors_1.size() << endl;
std::cout << "descriptors2 size:" << descriptors_2.size() << endl;
std::cout << "descriptors_2 type:" << descriptors_2.type() << endl;
//cv::resize(imgShow2, imgShow2, cv::Size(imgShow2.cols, imgShow2.rows));
//cv::resize(imgShow1, imgShow1, cv::Size(imgShow1.cols, imgShow1.rows));
cv::imshow("SIFTKeypoints1", imgShow1);
cv::imshow("SIFTKeypoints2", imgShow2);
// knn_matcher
//在这里采用BruteForce_L2
cv::Ptr<cv::DescriptorMatcher> knn_matcher = cv::DescriptorMatcher::create("BruteForce");
//std::vector<DMatch> knn_matches;
std::vector< std::vector<cv::DMatch> > knn_matches;
//用Matcher匹配特征 :
BFMatcher bfMatcher;
std::vector< DMatch > bf_matches;
//knn match start
//采用knn匹配法来观察合成效果
sift->detectAndCompute(source_1_part, noArray(), key_points_1, descriptors_3);
sift->detectAndCompute(source_1_part, noArray(), key_points_1, descriptors_4);
knn_matcher->knnMatch(descriptors_3, descriptors_4, knn_matches, 2);
//去掉不可靠的L2 - KNN匹配
vector< cv::DMatch > goodMatches;
cout << "knn matches size" << knn_matches.size()<<endl;
for (size_t i = 0; i < knn_matches.size(); i++)
{
if (knn_matches[i][0].distance < 0.8 *
knn_matches[i][1].distance)
goodMatches.push_back(knn_matches[i][0]);
}
cout << "goodMatches size" << goodMatches.size() << endl;
vector< DMatch > inliers_knn;
cout << "Computing homography (RANSAC)" << endl;
vector<Point2f> points3(goodMatches.size());
vector<Point2f> points4(goodMatches.size());
for (size_t i = 0; i < goodMatches.size(); i++)
{
points3[i] = key_points_1[goodMatches[i].queryIdx].pt;//queryIdx 指代的是匹配上的点在待匹配图像的Id
points4[i] = key_points_2[goodMatches[i].trainIdx].pt;//trainIdx 指代的是匹配上的点在匹配图像的Id
}
cout << "size of knn match points size" << endl;
cout << points3.size() << endl;
cout << points4.size() << endl;
std::cout << "descriptors_3 size:" << descriptors_3.size() << endl;
std::cout << "descriptors_4 type:" << descriptors_4.type() << endl;
cv::Mat source_2_aftertransaction_knn;
vector<uchar> flag1_knn(points3.size(), 0);
Mat Hknn = findHomography(points4, points3, CV_RANSAC, 3, flag1_knn);
warpPerspective(source_2, source_2_aftertransaction_knn, Hknn, source_2.size());
//duration_ms = (double(getTickCount()) - start) * 1000 / getTickFrequency();//计时
std::cout << "It took " << duration_ms << " ms to generate the relationship matrix." << std::endl;
cv::Mat source_1_32F_knn, source_2_aftertransaction_32F_knn;
source_1.convertTo(source_1_32F_knn, CV_32F, 1.0 / 255.0);
source_2_aftertransaction_knn.convertTo(source_2_aftertransaction_32F_knn, CV_32F, 1.0 / 255.0);
//source_2.convertTo(source_2_32F, CV_32F, 1.0 / 255.0);
cv::Mat imresult_knn;
//imresult_2 = (source_1_32F + source_2_32F) / 2;
imresult_knn = (source_1_32F_knn + source_2_aftertransaction_32F_knn) / 2;
//cv::resize(imresult, imresult, cv::Size(imgshowcanny.cols / 16, imgshowcanny.rows / 16));
cv::imshow("knnresult", imresult_knn);
//cv::imshow("result_2", imresult_
//cout << H << endl << endl;
std::vector<DMatch> inliners_knn;
for (int i = 0; i < goodMatches.size(); i++)
{
if (flag1_knn[i])
{
inliners_knn.push_back(goodMatches[i]);
}
}
cout << "Good KNN matches inliers size = " << inliners_knn.size() << endl;
cv::Mat KNNMatchImage;
cv::drawMatches(source_1, key_points_1, source_2, key_points_2, inliners_knn, KNNMatchImage);
cv::imshow("KNNMatchImage", KNNMatchImage);
//knn match end
start = double(getTickCount());
bfMatcher.match(descriptors_1, descriptors_2, bf_matches);
duration_ms = (double(getTickCount()) - start) * 1000 / getTickFrequency();//计时
std::cout << "It took " << duration_ms << " ms to do bf-match on SIFT features." << std::endl;
cout << "bf_matches size::" << bf_matches.size() << endl;
//用RANSAC求取单应矩阵的方式去掉不可靠的BF匹配
vector< DMatch > inliers;
cout << "Computing homography (RANSAC)" << endl;
vector<Point2f> points1(bf_matches.size());
vector<Point2f> points2(bf_matches.size());
for (size_t i = 0; i < bf_matches.size(); i++)
{
points1[i] = key_points_1[bf_matches[i].queryIdx].pt;//queryIdx 指代的是匹配上的点在待匹配图像的Id
points2[i] = key_points_2[bf_matches[i].trainIdx].pt;//trainIdx 指代的是匹配上的点在匹配图像的Id
}
//计算单应矩阵并找出inliers
vector<uchar> flag1(points1.size(), 0);
Mat source_2_aftertransaction, imresult, source_1_32F, source_2_aftertransaction_32F, source_2_32F, imresult_2;
start = double(getTickCount());
//0.2 (Threshold)
Mat H = findHomography(points2, points1, CV_RANSAC, 3, flag1);
warpPerspective(source_2, source_2_aftertransaction, H, source_2.size());
duration_ms = (double(getTickCount()) - start) * 1000 / getTickFrequency();//计时
std::cout << "It took " << duration_ms << " ms to generate the relationship matrix." << std::endl;
source_1.convertTo(source_1_32F, CV_32F, 1.0 / 255.0);
source_2_aftertransaction.convertTo(source_2_aftertransaction_32F, CV_32F, 1.0 / 255.0);
source_2.convertTo(source_2_32F, CV_32F, 1.0 / 255.0);
imresult_2 = (source_1_32F + source_2_32F) / 2;
imresult = (source_1_32F + source_2_aftertransaction_32F) / 2;
//cv::resize(imresult, imresult, cv::Size(imgshowcanny.cols / 16, imgshowcanny.rows / 16));
cv::imshow("result", imresult);
cv::imshow("result_2", imresult_2);
imresult.convertTo(imresult, CV_16U, 255.0);
//cv::imwrite("result_1.jpg", imresult);
std::cout << "transaction matrix:" << H << std::endl;
//cout << H << endl << endl;
for (int i = 0; i < bf_matches.size(); i++)
{
if (flag1[i])
{
inliers.push_back(bf_matches[i]);
}
}
cout << "AKAZE BF matches inliers size = " << inliers.size() << endl;
cv::Mat BFMatchImage;
cv::drawMatches(source_1, key_points_1, source_2, key_points_2, inliers, BFMatchImage);
cv::imshow("BFMatchImage", BFMatchImage);
cv::waitKey(0);
}