#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/nonfree/nonfree.hpp>
#include<opencv2/legacy/legacy.hpp>
using namespace cv;
int main( int argc, char** argv )
{
Mat img_1 = imread( "a.jpg",CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "b.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BruteForceMatcher< L2<float> > matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
std::nth_element(matches.begin(),matches.begin()+24,matches.end());
matches.erase(matches.begin()+25,matches.end());
//show matches
for( size_t m = 0; m < matches.size(); m++ )
{
int i1 = matches[m].queryIdx;
int i2 = matches[m].trainIdx;
std::cout<<"1......x:"<<matches[m].distance<<std::endl;
assert(i1 >= 0 && i1 < static_cast<int>(keypoints_1.size()));
assert(i2 >= 0 && i2 < static_cast<int>(keypoints_2.size()));
std::cout<<"1......x:"<<keypoints_1[i1].pt.x<<" y: "<<keypoints_1[i1].pt.y<<std::endl;
std::cout<<"2......x:"<<keypoints_2[i2].pt.x<<" y: "<<keypoints_2[i2].pt.y<<std::endl;
}
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );
waitKey(0);
// draw matches
return 0;
}
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/nonfree/nonfree.hpp>
#include<opencv2/legacy/legacy.hpp>
using namespace cv;
int main( int argc, char** argv )
{
Mat img_1 = imread( "a.jpg",CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "b.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BruteForceMatcher< L2<float> > matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
std::nth_element(matches.begin(),matches.begin()+24,matches.end());
matches.erase(matches.begin()+25,matches.end());
//show matches
for( size_t m = 0; m < matches.size(); m++ )
{
int i1 = matches[m].queryIdx;
int i2 = matches[m].trainIdx;
std::cout<<"1......x:"<<matches[m].distance<<std::endl;
assert(i1 >= 0 && i1 < static_cast<int>(keypoints_1.size()));
assert(i2 >= 0 && i2 < static_cast<int>(keypoints_2.size()));
std::cout<<"1......x:"<<keypoints_1[i1].pt.x<<" y: "<<keypoints_1[i1].pt.y<<std::endl;
std::cout<<"2......x:"<<keypoints_2[i2].pt.x<<" y: "<<keypoints_2[i2].pt.y<<std::endl;
}
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );
waitKey(0);
// draw matches
return 0;
}
27万+

被折叠的 条评论
为什么被折叠?



