【opencv】示例-asift.cpp 对两张图片之间进行仿射特征比对

ffde010e5d8d369c5d53747b8c02cf7e.png

7306a586d8985056a8b34a7970e25b58.png

#include <opencv2/core.hpp> // 包含OpenCV核心功能的头文件
#include <opencv2/imgproc.hpp> // 包含OpenCV图像处理功能的头文件
#include <opencv2/features2d.hpp> // 包含OpenCV特征检测相关功能的头文件
#include <opencv2/highgui.hpp> // 包含OpenCV的GUI功能,如窗口显示的头文件
#include <opencv2/calib3d.hpp> // 包含OpenCV进行相机标定和三维重建功能的头文件
#include <iostream> // 包含标准输入输出流库的头文件
#include <iomanip> // 包含输入输出流格式设置的头文件


using namespace std; // 使用标准命名空间
using namespace cv; // 使用OpenCV命名空间


// 声明帮助函数,该函数会输出使用本程序的方式
static void help(char** argv)
{
    cout
    << "This is a sample usage of AffineFeature detector/extractor.\n"
    << "And this is a C++ version of samples/python/asift.py\n"
    << "Usage: " << argv[0] << "\n"
    // 以下是该程序的参数说明
    << "     [ --feature=<sift|orb|brisk> ]         # Feature to use.\n"
    << "     [ --flann ]                            # use Flann-based matcher instead of bruteforce.\n"
    << "     [ --maxlines=<number(50 as default)> ] # The maximum number of lines in visualizing the matching result.\n"
    << "     [ --image1=<image1(aero1.jpg as default)> ]\n"
    << "     [ --image2=<image2(aero3.jpg as default)> ] # Path to images to compare."
    << endl;
}


// 声明计时器函数,用于计算操作的耗时
static double timer()
{
    return getTickCount() / getTickFrequency();
}


// 程序的主函数,argc是参数数量,argv是参数列表
int main(int argc, char** argv)
{
    vector<String> fileName; // 存储文件名的字符串向量
    // 使用OpenCV的命令行解析器解析输入的命令行参数
    cv::CommandLineParser parser(argc, argv,
        "{help h ||}"
        "{feature|brisk|}"
        "{flann||}"
        "{maxlines|50|}"
        "{image1|aero1.jpg|}{image2|aero3.jpg|}");
    // 如果用户请求帮助,调用help函数并退出程序
    if (parser.has("help"))
    {
        help(argv);
        return 0;
    }
    // 从解析器中获取输入的参数
    string feature = parser.get<string>("feature");
    bool useFlann = parser.has("flann");
    int maxlines = parser.get<int>("maxlines");
    // 查找并存储输入的图像文件路径
    fileName.push_back(samples::findFile(parser.get<string>("image1")));
    fileName.push_back(samples::findFile(parser.get<string>("image2")));
    // 检查参数是否有误
    if (!parser.check())
    {
        parser.printErrors();
        cout << "See --help (or missing '=' between argument name and value?)" << endl;
        return 1;
    }


    // 读取图像,并将其转换为灰度图
    Mat img1 = imread(fileName[0], IMREAD_GRAYSCALE);
    Mat img2 = imread(fileName[1], IMREAD_GRAYSCALE);
    // 确保图像成功加载
    if (img1.empty())
    {
        cerr << "Image " << fileName[0] << " is empty or cannot be found" << endl;
        return 1;
    }
    if (img2.empty())
    {
        cerr << "Image " << fileName[1] << " is empty or cannot be found" << endl;
        return 1;
    }


    // 声明特征检测器和描述符匹配器的指针
    Ptr<Feature2D> backend;
    Ptr<DescriptorMatcher> matcher;


    // 根据用户选择初始化特征检测器和匹配器
    if (feature == "sift")
    {
        backend = SIFT::create();
        if (useFlann)
            matcher = DescriptorMatcher::create("FlannBased");
        else
            matcher = DescriptorMatcher::create("BruteForce");
    }
    else if (feature == "orb")
    {
        backend = ORB::create();
        if (useFlann)
            matcher = makePtr<FlannBasedMatcher>(makePtr<flann::LshIndexParams>(6, 12, 1));
        else
            matcher = DescriptorMatcher::create("BruteForce-Hamming");
    }
    else if (feature == "brisk")
    {
        backend = BRISK::create();
        if (useFlann)
            matcher = makePtr<FlannBasedMatcher>(makePtr<flann::LshIndexParams>(6, 12, 1));
        else
            matcher = DescriptorMatcher::create("BruteForce-Hamming");
    }
    else
    {
        cerr << feature << " is not supported. See --help" << endl;
        return 1;
    }


    // 提取特征点和描述符,并进行匹配
    cout << "extracting with " << feature << "..." << endl;
    Ptr<AffineFeature> ext = AffineFeature::create(backend);
    vector<KeyPoint> kp1, kp2;
    Mat desc1, desc2;


    ext->detectAndCompute(img1, Mat(), kp1, desc1);
    ext->detectAndCompute(img2, Mat(), kp2, desc2);
    cout << "img1 - " << kp1.size() << " features, "
         << "img2 - " << kp2.size() << " features"
         << endl;


    cout << "matching with " << (useFlann ? "flann" : "bruteforce") << "..." << endl;
    double start = timer(); // 开始计时
    // 匹配特征点,并筛选出好的匹配
    vector< vector<DMatch> > rawMatches;
    vector<Point2f> p1, p2;
    vector<float> distances;
    matcher->knnMatch(desc1, desc2, rawMatches, 2);
    // 筛选出好的匹配点
    for (size_t i = 0; i < rawMatches.size(); i++)
    {
        const vector<DMatch>& m = rawMatches[i];
        if (m.size() == 2 && m[0].distance < m[1].distance * 0.75)
        {
            p1.push_back(kp1[m[0].queryIdx].pt);
            p2.push_back(kp2[m[0].trainIdx].pt);
            distances.push_back(m[0].distance);
        }
    }
    // 利用单应性计算匹配点对的状态
    vector<uchar> status; // 创建一个uchar类型的向量status,用来存储每对匹配点是否是内点的状态
    vector< pair<Point2f, Point2f> > pointPairs; // 创建一个存储匹配点对(两个图像中匹配的点)的vector
    Mat H = findHomography(p1, p2, status, RANSAC); // 利用RANSAC算法计算从图像1到图像2的单应性矩阵H
    int inliers = 0; // 初始化内点数量计数器
    // 遍历status向量,统计内点数量并存储这些点对
    for (size_t i = 0; i < status.size(); i++)
    {
        // 如果status向量中的元素为true,则表示该匹配点对是内点
        if (status[i])
        {
            pointPairs.push_back(make_pair(p1[i], p2[i])); // 将内点对添加到pointPairs向量中
            distances[inliers] = distances[i]; // 将对应内点的距离存储到distances向量中
            // CV_Assert(inliers <= (int)i); // 断言inliers的值应小于等于当前索引,通常用于调试
            inliers++; // 内点数量加一
        }
    }
    distances.resize(inliers); // 重新调整distances向量的大小以匹配内点的数量
    
    // 输出执行时间
    cout << "execution time: " << fixed << setprecision(2) << (timer()-start)*1000 << " ms" << endl;
    // 输出内点与匹配点对的比例
    cout << inliers << " / " << status.size() << " inliers/matched" << endl;
    
    // 可视化匹配结果前的准备工作
    cout << "visualizing..." << endl;
    vector<int> indices(inliers); // 创建一个大小等于内点数量的整数型向量indices,用于存储排序后的索引
    // 将distances向量中元素的索引按照距离从小到大排序并存入indices向量
    cv::sortIdx(distances, indices, SORT_EVERY_ROW+SORT_ASCENDING);
    // 创建可视化图像并绘制匹配的特征点
    int h1 = img1.size().height;
    int w1 = img1.size().width;
    int h2 = img2.size().height;
    int w2 = img2.size().width;
    Mat vis = Mat::zeros(max(h1, h2), w1+w2, CV_8U);
    img1.copyTo(Mat(vis, Rect(0, 0, w1, h1)));
    img2.copyTo(Mat(vis, Rect(w1, 0, w2, h2)));
    cvtColor(vis, vis, COLOR_GRAY2BGR);


    vector<Point2f> corners(4);
    corners[0] = Point2f(0, 0);
    corners[1] = Point2f((float)w1, 0);
    corners[2] = Point2f((float)w1, (float)h1);
    corners[3] = Point2f(0, (float)h1);
    vector<Point2i> icorners;
    perspectiveTransform(corners, corners, H); // 对图像1的四个角进行单应性变换
    transform(corners, corners, Matx23f(1,0,(float)w1,0,1,0)); // 将变换后的角点移到图像2的右侧
    Mat(corners).convertTo(icorners, CV_32S); // 将角落点的类型转化为整数
    polylines(vis, icorners, true, Scalar(255,255,255)); // 在可视化图像中绘制边界线


    // 绘制前maxlines个的匹配对
    for (int i = 0; i < min(inliers, maxlines); i++)
    {
        int idx = indices[i];
        const Point2f& pi1 = pointPairs[idx].first;
        const Point2f& pi2 = pointPairs[idx].second;
        circle(vis, pi1, 2, Scalar(0,255,0), -1); // 绘制圆点
        circle(vis, pi2 + Point2f((float)w1,0), 2, Scalar(0,255,0), -1); // 在图像2相应的位置绘制圆点
        line(vis, pi1, pi2 + Point2f((float)w1,0), Scalar(0,255,0)); // 绘制连线
    }
    if (inliers > maxlines)
        cout << "only " << maxlines << " inliers are visualized" << endl;
    imshow("affine find_obj", vis); // 显示最终的可视化结果窗口


    // 当存在更多的匹配时,输出提示信息
    // Mat vis2 = Mat::zeros(max(h1, h2), w1+w2, CV_8U); // 创建另一个可视化用的空白图像
    // Mat warp1; // 存储变换后图像的矩阵
    // warpPerspective(img1, warp1, H, Size(w1, h1)); // 对图像1应用单应性变换
    // warp1.copyTo(Mat(vis2, Rect(0, 0, w1, h1))); // 将变换后的图像1复制到可视化图像的左半边
    // img2.copyTo(Mat(vis2, Rect(w1, 0, w2, h2))); // 将图像2复制到可视化图像的右半边
    // imshow("warped", vis2); // 显示变换后图像与图像2的对比窗口


    waitKey(); // 等待任意键按下
    cout << "done" << endl; // 输出完成提示
    return 0; // 程序结束
}

此段C++代码的主要功能是载入两张图像,通过OpenCV库进行特征点检测和匹配,然后通过单应性变换计算两图像之间的匹配关系,并将匹配结果可视化显示出来。用户可以指定不同的特征检测算法(如SIFT、ORB、BRISK等)以及是否使用FLANN库进行近似最邻近搜索而不是暴力匹配。代码结束时还会显示执行时间和匹配对的数量。

终端输出:

img1 - 39607 features, img2 - 24674 features
matching with bruteforce...
execution time: 35513.65 ms
41 / 105 inliers/matched
visualizing...

Ptr<AffineFeature> ext = AffineFeature::create(backend);

4ca2e08198e7ea9eee447b87043f3a19.png

Mat H = findHomography(p1, p2, status, RANSAC);

685a67c444636176a512ab30f8cc05bb.png

2b0f108afb23babef5cea0de1a4e7823.png

仿射特征匹配有哪些应用场景

17122b21c5fca6eac63ed99b0d01660c.png

ASIFT+OpenCV图像特征匹配实战VC工程源码 OpenCV包含头文件: #include "cv.h" #include "highgui.h" #include "cxcore.h" 核心代码如下: if (!m_pImage1||!m_pImage2) { AfxMessageBox("please,select 2 images!"); return; } UpdateData(TRUE); CvSize sz1 = cvSize(m_pImage1->width,m_pImage1->height); CvSize sz2 = cvSize(m_pImage2->width,m_pImage2->height); CvScalar s; IplImage *gimg1 = cvCreateImage(sz1,IPL_DEPTH_8U,1); cvCvtColor(m_pImage1,gimg1,CV_BGR2GRAY); IplImage *gimg2 = cvCreateImage(sz2,IPL_DEPTH_8U,1); cvCvtColor(m_pImage2,gimg2,CV_BGR2GRAY); size_t w1, h1; w1 = gimg1->width; h1 = gimg1->height; float * iarr1 = new float[w1*h1]; for(int i=0;i<h1;i++) { for(int j=0;j<w1;j++) { s=cvGet2D(gimg1,i,j); iarr1[i*w1+j] = s.val[0]; } } vector ipixels1(iarr1, iarr1 + w1 * h1); delete [] iarr1; size_t w2, h2; w2 = gimg2->width; h2 = gimg2->height; float * iarr2 = new float[w2*h2]; for(int i=0;i<h2;i++) { for(int j=0;j<w2;j++) { s=cvGet2D(gimg2,i,j); iarr2[i*w2+j] = s.val[0]; } } vector ipixels2(iarr2, iarr2 + w2 * h2); delete [] iarr2; float wS = IM_X; float hS = IM_Y; float zoom1=0, zoom2=0; int wS1=0, hS1=0, wS2=0, hS2=0; vector ipixels1_zoom, ipixels2_zoom; if (!m_bOrininal) { if (m_lWidth==0 || m_lHeight == 0) return; wS = m_lWidth; hS = m_lHeight; float InitSigma_aa = 1.6; float fproj_p, fproj_bg; char fproj_i; float *fproj_x4, *fproj_y4; int fproj_o; fproj_o = 3; fproj_p = 0; fproj_i = 0; fproj_bg = 0; fproj_x4 = 0; fproj_y4 = 0; float areaS = wS * hS; // Resize image 1 float area1 = w1 * h1; zoom1 = sqrt(area1/areaS); wS1 = (int) (w1 / zoom1); hS1 = (int) (h1 / zoom1); int fproj_sx = wS1; int fproj_sy = hS1; float fproj_x1 = 0; float fproj_y1 = 0; float fproj_x2 = wS1; float fproj_y2 = 0; float fproj_x3 = 0; float fproj_y3 = hS1; /* Anti-aliasing filtering along vertical direction */ if ( zoom1 > 1 ) { float sigma_aa = InitSigma_aa * zoom1 / 2; GaussianBlur1D(ipixels1,w1,h1,sigma_aa,1); GaussianBlur1D(ipixels1,w1,h1,sigma_aa,0); } // simulate a tilt: subsample the image along the vertical axis by a factor of t. ipixels1_zoom.resize(wS1*hS1); fproj (ipixels1, ipixels1_zoom, w1, h1, &fproj;_sx, &fproj;_sy, &fproj;_bg, &fproj;_o, &fproj;_p, &fproj;_i , fproj_x1 , fproj_y1 , fproj_x2 , fproj_y2 , fproj_x3 , fproj_y3, fproj_x4, fproj_y4); // Resize image 2 float area2 = w2 * h2; zoom2 = sqrt(area2/areaS); wS2 = (int) (w2 / zoom2); hS2 = (int) (h2 / zoom2); fproj_sx = wS2; fproj_sy = hS2; fproj_x2 = wS2; fproj_y3 = hS2; /* Anti-aliasing filtering along vertical direction */ if ( zoom1 > 1 ) { float sigma_aa = InitSigma_aa * zoom2 / 2; GaussianBlur1D(ipixels2,w2,h2,sigma_aa,1); GaussianBlur1D(ipixels2,w2,h2,sigma_aa,0); } // simulate a tilt: subsample the image along the vertical axis by a factor of t. ipixels2_zoom.resize(wS2*hS2); fproj (ipixels2, ipixels2_zoom, w2, h2, &fproj;_sx, &fproj;_sy, &fproj;_bg, &fproj;_o, &fproj;_p, &fproj;_i , fproj_x1 , fproj_y1 , fproj_x2 , fproj_y2 , fproj_x3 , fproj_y3, fproj_x4, fproj_y4); } else { ipixels1_zoom.resize(w1*h1); ipixels1_zoom = ipixels1; wS1 = w1; hS1 = h1; zoom1 = 1; ipixels2_zoom.resize(w2*h2); ipixels2_zoom = ipixels2; wS2 = w2; hS2 = h2; zoom2 = 1; } int num_of_tilts1 = m_lTilts1; int num_of_tilts2 = m_lTilts2; int verb = 0; // Define the SIFT parameters siftPar siftparameters; default_sift_parameters(siftparameters); vector< vector > keys1; vector< vector > keys2; int num_keys1=0, num_keys2=0; SetWindowText("Computing keypoints on the two images..."); CString str1,str2; time_t tstart, tend1,tend2; tstart = time(0); DWORD dstart = GetTickCount(); num_keys1 = compute_asift_keypoints(ipixels1_zoom, wS1, hS1, num_of_tilts1, verb, keys1, siftparameters); tend1 = time(0); m_lKeyNum1 = num_keys1; UpdateData(FALSE); str1.Format("Img1 Keypoints computation accomplished in %f s",difftime(tend1, tstart)); SetWindowText(str1); num_keys2 = compute_asift_keypoints(ipixels2_zoom, wS2, hS2, num_of_tilts2, verb, keys2, siftparameters); tend2 = time(0); m_lKeyNum2 = num_keys2; UpdateData(FALSE); str2.Format("Img2 Keypoints computation accomplished in %f s ,Matching the keypoints...",difftime(tend2, tstart)); SetWindowText(str2); //// Match ASIFT keypoints int num_matchings; matchingslist matchings; tstart = time(0); num_matchings = compute_asift_matches(num_of_tilts1, num_of_tilts2, wS1, hS1, wS2, hS2, verb, keys1, keys2, matchings, siftparameters); tend1 = time(0); DWORD dSpan = GetTickCount() - dstart; cout << "Keypoints matching accomplished in " << difftime(tend1, tstart) << " seconds." << endl; str2.Format("Keypoints matching accomplished in %f s",difftime(tend1, tstart)); SetWindowText(str2); m_lMatches = num_matchings; UpdateData(FALSE); str1.Format("Total time used:%d ms",dSpan); AfxMessageBox(str1); cvRelease((void**)&gimg1;); cvRelease((void**)&gimg2;); 参考网址:http://www.ipol.im/pub/art/2011/my-asift/
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值