- 本源码是根据网络大牛的在opencv2的例子移植过来到opencv4.2上的的,很多语法需要调整。特别是SURF探测器定义:
旧代码为:
SurfFeatureDetector featureDetector(80);
SurfDescriptorExtractor featureExtractor;
新的opencv4.2中为:
Ptr<cv::xfeatures2d::SURF> detector = cv::xfeatures2d::SURF::create(minHessian);
Ptr <cv::xfeatures2d::SURF> extractor = cv::xfeatures2d::SURF::create(minHessian);
- 要运行本代码,需要下载在opencv4.2+pencv_contirb4.20(64位)平台上运行。可以在以下地址下载已经这个资源
https://download.youkuaiyun.com/download/dcx_dcx/12237707
- 本代码是是用vc2019编译的成64位代码。
- 演示源码下载地址为: https://download.youkuaiyun.com/download/dcx_dcx/12192309
- 主要代码如下:
void SURFDemo1(void)
{
//使用SURF算子检测关键点
int minHessian = 400;//SURF算法中的hessian阈值
Ptr<cv::xfeatures2d::SURF> detector = cv::xfeatures2d::SURF::create(minHessian);
vector<KeyPoint> keypoints_object, keypoints_scene;//vector模板类,存放任意类型的动态数组
Ptr <cv::xfeatures2d::SURF> extractor = cv::xfeatures2d::SURF::create(minHessian);;
Mat descriptors_object, descriptors_scene;
//调用detect函数检测出SURF特征关键点,保存在vector容器中
detector->detect(srcImage1, keypoints_object);
detector->detect(srcImage2, keypoints_scene);
Mat matshow1, matshow2, kp1, kp2;
//画出SURF关键点
//drawKeypoints(srcImage1, keypoints_object, kp1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
//drawKeypoints(srcImage2, keypoints_scene, kp2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
extractor->compute(srcImage1, keypoints_object, descriptors_object);
extractor->compute(srcImage2, keypoints_scene, descriptors_scene);
//下面是吧detect和compute用一个函数就完成了
//detector->detectAndCompute(srcImage1, Mat(), keypoints_object, descriptors_object);
//detector->detectAndCompute(srcImage2, Mat(),keypoints_scene, descriptors_scene);
vector< DMatch > matches;
//使用FLANN匹配算子进行匹配
FlannBasedMatcher matcher;// (new flann::LshIndexParams(20, 10, 2));
//使用暴力匹配匹配算子进行匹配
BFMatcher matcher1;
//自选一种匹配
if (0)
{
matcher.match(descriptors_object, descriptors_scene, matches);
}
else
{
matcher1.match(descriptors_object, descriptors_scene, matches);
}
double max_dist = 0; double min_dist = 100;//最小距离和最大距离
//【6】计算出关键点之间距离的最大值和最小值
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf(">Max dist 最大距离 : %f \n", max_dist);
printf(">Min dist 最小距离 : %f \n", min_dist);
//【7】存下匹配距离小于3*min_dist的点对
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 2.5 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
//绘制出匹配到的关键点
Mat img_matches;
drawMatches(srcImage1, keypoints_object, srcImage2, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//定义两个局部变量
vector<Point2f> obj;
vector<Point2f> scene;
//从匹配成功的匹配对中获取关键点
for (unsigned int i = 0; i < good_matches.size(); i++)
//for (unsigned int i = 0; i < 10; i++)
{
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
Mat H = findHomography(obj, scene, RANSAC);//计算透视变换
//从待测图片中获取角点
vector<Point2f> obj_corners(4);
obj_corners[0] = cv::Point(0, 0);
obj_corners[1] = Point(srcImage1.cols, 0);
obj_corners[2] = Point(srcImage1.cols, srcImage1.rows);
obj_corners[3] = Point(0, srcImage1.rows);
vector<Point2f> scene_corners(4);
//进行透视变换
perspectiveTransform(obj_corners, scene_corners, H);
//绘制出角点之间的直线
cv::Point2f p1, p2, p;
p1 = scene_corners[0] + Point2f(static_cast<float>(srcImage1.cols), 0);
p2 = scene_corners[1] + Point2f(static_cast<float>(srcImage1.cols), 0),
p = p1 - p2;
float fk, ang;
fk = p.x / p.y;
ang = atan(fk);
ang = 180.00001 * ang / 3.1415926;
//在大图中画出小图的匹配位置
line(img_matches, scene_corners[0] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[1] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(0, 0, 255), 8);
line(img_matches, scene_corners[1] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[2] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(0, 0, 255), 8);
line(img_matches, scene_corners[2] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[3] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(0, 0, 255), 8);
line(img_matches, scene_corners[3] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[0] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(0, 0, 255), 8);
//显示最终结果
imshow("效果图", img_matches);
}
- 运行结果如下