全景拼接代码

本文展示了如何利用OpenCV库进行全景图像的拼接。通过读取多张图片,利用SURF特征匹配、Homography计算以及透视变换,将多张图片组合成一张全景图。代码中涉及关键步骤包括图像预处理、特征检测与匹配、 Homography矩阵计算以及图像融合。最终实现了6张图像的无缝拼接,并展示了拼接过程和结果。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

/// 全景拼接.cpp : 定义控制台应用程序的入口点。
//*下面注释掉网上的库函数例程代码*/
//bool try_use_gpu = false;
//vector<Mat> imgs;
//using namespace std; 
//using namespace cv;
//string IMAGE_PATH_PREFIX = "data1/";
//Mat img = imread(IMAGE_PATH_PREFIX+"1.jpg");
// imgs.push_back(img);
// img = imread(IMAGE_PATH_PREFIX + "2.jpg");
// imgs.push_back(img);
// img = imread(IMAGE_PATH_PREFIX + "3.jpg");
// imgs.push_back(img);
// img = imread(IMAGE_PATH_PREFIX + "4.jpg");
// imgs.push_back(img);
// img = imread(IMAGE_PATH_PREFIX + "5.jpg");
// imgs.push_back(img);
// img = imread(IMAGE_PATH_PREFIX + "6.jpg");
// imgs.push_back(img);
// cout << "finish "<<imgs.size()<<endl;
// Mat pano;//拼接结果图片
// //Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
// Stitcher stitcher = Stitcher::createDefault(true);
// Stitcher::Status status = stitcher.stitch(imgs, pano);
// if (status != Stitcher::OK)
// {
//  cout << "Can't stitch images, error code = " << int(status) << endl;
//  return -1;
// }
// imwrite(result_name, pano);
// imwrite(result_name, pano);
// waitKey(0);
// return 0;
//void CalcCorners(const Mat& H, const Mat& src)
//{
// double v2[] = { 0, 0, 1 };//左上角
// double v1[3];//变换后的坐标值
// Mat V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
// Mat V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
//
// V1 = H * V2;
// //左上角(0,0,1)
// corners.left_top.x = v1[0] / v1[2];
// corners.left_top.y = v1[1] / v1[2];
//
// //左下角(0,src.rows,1)
// v2[0] = 0;
// v2[1] = src.rows;
// v2[2] = 1;
// V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
// V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
// V1 = H * V2;
// corners.left_bottom.x = v1[0] / v1[2];
// corners.left_bottom.y = v1[1] / v1[2];
//
// //右上角(src.cols,0,1)
// v2[0] = src.cols;
// v2[1] = 0;
// v2[2] = 1;
// V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
// V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
// V1 = H * V2;
// corners.right_top.x = v1[0] / v1[2];
// corners.right_top.y = v1[1] / v1[2];
//
// //右下角(src.cols,src.rows,1)
// v2[0] = src.cols;
// v2[1] = src.rows;
// v2[2] = 1;
// V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
// V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
// V1 = H * V2;
// corners.right_bottom.x = v1[0] / v1[2];
// corners.right_bottom.y = v1[1] / v1[2];
//
//}
//typedef struct
//{
// Point2f left_top;
// Point2f left_bottom;
// Point2f right_top;
// Point2f right_bottom;
//}four_corners_t;
//four_corners_t corners;
#include "stdafx.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2\imgproc\imgproc.hpp>
#include<opencv2/features2d/features2d.hpp>
#include<opencv2/legacy/legacy.hpp>
#include<opencv2/nonfree/nonfree.hpp>
#include<iostream>
#include <opencv2/stitching/stitcher.hpp>
using namespace cv;
using namespace std;
string IMAGE_PATH_PREFIX = "data1/";
cv::Mat Stitcher2pic(const cv::Mat &srcimage1, const cv::Mat &srcimage2);
cv::Mat Stitcher3pic(cv::Mat &srcimage1, cv::Mat &srcimage2, cv::Mat& image1_2);
std::vector<cv::Mat> imgs;
cv::Mat imagedisplacement(cv::Mat &src, int x, int y);
cv::Mat Stitcher4pic(cv::Mat &srcimage1, cv::Mat &srcimage2, cv::Mat& image123);
cv::Mat Stitcher6pic(cv::Mat &srcimage1, cv::Mat &srcimage2);
int main()
 {
 Mat img = imread(IMAGE_PATH_PREFIX + "1.jpg");
 resize(img, img, cv::Size(), 0.5, 0.5);
  imgs.push_back(img);
  img = imread(IMAGE_PATH_PREFIX + "2.jpg"); 
  resize(img, img, cv::Size(), 0.5, 0.5);
  imgs.push_back(img);
  img = imread(IMAGE_PATH_PREFIX + "3.jpg"); 
  resize(img, img, cv::Size(), 0.5, 0.5);
  imgs.push_back(img);
  img = imread(IMAGE_PATH_PREFIX + "4.jpg"); 
  resize(img, img, cv::Size(), 0.5, 0.5);
  imgs.push_back(img);
  img = imread(IMAGE_PATH_PREFIX + "5.jpg"); 
  resize(img, img, cv::Size(), 0.5, 0.5);
  imgs.push_back(img);
  img = imread(IMAGE_PATH_PREFIX + "6.jpg"); 
  resize(img, img, cv::Size(), 0.5, 0.5);
  imgs.push_back(img);
  cout << "finish " << imgs.size() << endl;
  //imgs[1] = imagedisplacement(imgs[1], 100, 100);
  cv::Mat k1 = Stitcher2pic(imgs[1], imgs[2]);
  cv::Mat k2= Stitcher3pic(imgs[5], imgs[2], k1);
 // imshow("k2", k2);//pic236
  cv::Mat k3 = Stitcher2pic(imgs[0], imgs[3]);//pic145
  cv::Mat k4 = Stitcher3pic(imgs[4], imgs[3], k3);
  k4 = imagedisplacement(k4 ,-380,0);
 // imshow("k4", k4);
  cv::Mat k5 = Stitcher6pic(k2,k4);
//  imshow("k5", k5);
 
  waitKey(0);
  return 0;
 }
//int main()
//{
//
// cv::Mat img = cv::imread(IMAGE_PATH_PREFIX + "1.jpg");
// resize(img, img, cv::Size(), 0.5, 0.5);
//  imgs.push_back(img); 
//    img = cv::imread(IMAGE_PATH_PREFIX + "2.jpg");
// resize(img, img, cv::Size(), 0.5, 0.5);
// imgs.push_back(img);
// img = cv::imread(IMAGE_PATH_PREFIX + "3.jpg");
// resize(img, img, cv::Size(), 0.5, 0.5);
// imgs.push_back(img);
// img = cv::imread(IMAGE_PATH_PREFIX + "4.jpg");
// resize(img, img, cv::Size(), 0.5, 0.5);
// imgs.push_back(img);
// cv::Mat k1=Stitcher2pic(imgs[0], imgs[1]);
// k1=imagedisplacement(k1, 500, 300);
// k1 = Stitcher3pic(imgs[2], imgs[1], k1);
// Stitcher4pic(imgs[3], imgs[1], k1);
// cv::waitKey(0);
//
//}
cv::Mat imagedisplacement(cv::Mat &src,int x,int y)
{
 cv::Mat dst;
 cv::Size dst_sz = src.size();
 //定义平移矩阵  
 cv::Mat t_mat = cv::Mat::zeros(2, 3, CV_32FC1);
 t_mat.at<float>(0, 0) = 1;
 t_mat.at<float>(0, 2) = x;
 t_mat.at<float>(1, 1) = 1;
 t_mat.at<float>(1, 2) =y;
 cv::warpAffine(src, dst, t_mat, dst_sz);
 //cv::imshow("result", dst);
 return dst;
 cv::waitKey(0);
}
cv::Mat Stitcher2pic(const cv::Mat &srcimage1, const cv::Mat &srcimage2)//完成两张图的匹配
{ //SURF检测器检测特征点
 int minHessian = 300;
 cv::SURF detector(minHessian);
 std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
 detector.detect(srcimage1, keypoints_1);
 detector.detect(srcimage2, keypoints_2);
 //计算SURF描述子(由特征点和原图构成的特征向量)
 cv::SURF extractor;
 cv::Mat descriptors_1, descriptors_2;
 extractor.compute(srcimage1, keypoints_1, descriptors_1);
 extractor.compute(srcimage2, keypoints_2, descriptors_2);
 //采用FLANN匹配
 cv::FlannBasedMatcher matcher;
 std::vector<cv::DMatch> matches, goodmatches;
 matcher.match(descriptors_1, descriptors_2, matches);
 std::vector<cv::Point2f>list1, list2;
 double min_dist = 100;
 for (int i = 0; i < descriptors_1.rows; i++)
 {
  if (matches[i].distance < min_dist) min_dist = matches[i].distance;
 }
 for (int i = 0; i < descriptors_1.rows; i++)//筛选好的匹配点
 {
  if (matches[i].distance < 4 * min_dist)
  {
   goodmatches.push_back(matches[i]);
  }
 }
 for (int i = 0; i < (int)goodmatches.size(); i++)
 {
  list1.push_back(keypoints_1[goodmatches[i].queryIdx].pt);
  list2.push_back(keypoints_2[goodmatches[i].trainIdx].pt);
 }
 //计算homography
 cv::Mat h = cv::findHomography(list1, list2, CV_RANSAC, 3);
 list1.clear();
 list2.clear();
 //cv::Mat AdjustMat = (cv::Mat_<double>(3, 3) << 1.0, 0,0, 0, 1.0, 0, 0, 0, 1.0);
 //cv::Mat adhomo=AdjustMat*h;
 //图像配准 
 cv::Mat imageTransform1;
 warpPerspective(srcimage1, imageTransform1, h, cv::Size(srcimage2.cols * 2, srcimage2.rows * 2));
 //imshow("直接经过透视矩阵变换", imageTransform1);
 cv::Mat dst(srcimage2.rows + srcimage1.rows, srcimage2.cols + srcimage1.cols, CV_8UC3);
 dst.setTo(0);
 imageTransform1.copyTo(dst(cv::Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
 srcimage2.copyTo(dst(cv::Rect(0, 0, srcimage1.cols, srcimage1.rows)));
 //cv::imshow("b_dst1", dst);
 return dst;
}
// ////------------------------------------------------------------------------
// //CalcCorners(h, srcimage1);
// //int start = MAX(corners.left_top.x, corners.left_bottom.x);
// //double processWidth = srcimage2.cols - start;//重叠区域的宽度 
// //std::cout << "processWidth" << processWidth<<endl;
// //std::cout << "Width" << dst.cols << endl;
// //int rows = min(srcimage2.rows, imageTransform1.rows);
// //for (int i = 0; i <rows; i++)
// //{
// // for (int j = 0; j < processWidth; j++)
// // {
// //  if (!imageTransform1.at<Vec3b>(i, j + start)[1] )
// //   continue;
// //  dst.at<Vec3b>(i, j + start) = srcimage2.at<Vec3b>(i, j + start)*(double)((processWidth - j) / processWidth) + imageTransform1.at<Vec3b>(i, j + start)*(double)(j / processWidth);
// // }
// //}
// //double processWidth = srcimage2.cols/8;
// ////int rows = min(srcimage2.rows, imageTransform1.rows);
// ////for (int i = 0; i < rows; i++)
// //// {
// ////  for (int j = 0; j < processWidth*2; j++)
// ////  {
// ////   dst.at<Vec3b>(i, j + srcimage2.cols - processWidth) = srcimage2.at<Vec3b>(i, j + srcimage2.cols - processWidth)*0.5 + imageTransform1.at<Vec3b>(i, j + srcimage2.cols - processWidth)*0.5;
// ////   //dst.at<Vec3b>(i, j + start + 10) = srcimage2.at<Vec3b>(i, j + start + 10)*0.5 + imageTransform1.at<Vec3b>(i, j + start + 10)*0.5;
// ////    //cout << dst.at<Vec3b>(i, j + start) << endl;
// ////  }
// //// }
// //cv::imshow("dst", dst);
//}
//// Stitcher3pic改变了标准图2的位置了,因此Stitcher4pic直接算homo不需要先平移标准视角图后再算
cv::Mat Stitcher3pic(cv::Mat &srcimage1,  cv::Mat &srcimage2,cv::Mat& image1_2)
{
 image1_2 = imagedisplacement(image1_2, 400, 200);
    cv::Mat temp(srcimage2.rows + srcimage1.rows, srcimage2.cols + srcimage1.cols, CV_8UC3);
 srcimage2.copyTo(temp(cv::Rect(0, 0, srcimage2.cols, srcimage2.rows)));
   srcimage2 = imagedisplacement(temp, 400, 200);
 // SURF检测器检测特征点
  int minHessian = 300;
 cv::SURF detector(minHessian);
 std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
 detector.detect(srcimage1, keypoints_1);
 detector.detect(srcimage2, keypoints_2);
 //计算SURF描述子(由特征点和原图构成的特征向量)
 cv::SURF extractor;
 cv::Mat descriptors_1, descriptors_2;
 extractor.compute(srcimage1, keypoints_1, descriptors_1);
 extractor.compute(srcimage2, keypoints_2, descriptors_2);
 //采用FLANN匹配
 cv::FlannBasedMatcher matcher;
 std::vector<cv::DMatch> matches, goodmatches;
 matcher.match(descriptors_1, descriptors_2, matches);
 std::vector<cv::Point2f>list1, list2;
 double min_dist = 100;
 for (int i = 0; i < descriptors_1.rows; i++)
 {
  if (matches[i].distance < min_dist) min_dist = matches[i].distance;
 }
 for (int i = 0; i < descriptors_1.rows; i++)//筛选好的匹配点
 {
  if (matches[i].distance < 4 * min_dist)
  {
   goodmatches.push_back(matches[i]);
  }
 }
 for (int i = 0; i < (int)goodmatches.size(); i++)
 {
  list1.push_back(keypoints_1[goodmatches[i].queryIdx].pt);
  list2.push_back(keypoints_2[goodmatches[i].trainIdx].pt);
 }
 //计算homography
 cv::Mat h = cv::findHomography(list1, list2, CV_RANSAC, 3);
 list1.clear();
 list2.clear();
 /*cv::Mat AdjustMat = (cv::Mat_<double>(3, 3) << 1.0, 0,200, 0, 1.0, 100, 0, 0, 1.0);
 cv::Mat adhomo=AdjustMat*h;*/
 //图像配准 
 cv::Mat imageTransform1;
 warpPerspective(srcimage1, imageTransform1, h, cv::Size(srcimage1.cols*2.2, srcimage1.rows *2.2));
 //imshow("直接经过透视矩阵变换", imageTransform1);
 cv::Mat dst(srcimage2.rows + srcimage1.rows, srcimage2.cols + srcimage1.cols, CV_8UC3);
 
 imageTransform1.copyTo(dst(cv::Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
 for (int i = 0; i < image1_2.rows; i++)
  for (int j = 0; j < image1_2.cols; j++)
  {
  if (!image1_2.at<Vec3b>(i, j)[0])
   continue;
  dst.at<Vec3b>(i, j) = image1_2.at<Vec3b>(i, j);
  }
 //cv::imshow("333", dst);
 return dst;
 
}
//cv::Mat Stitcher4pic(cv::Mat &srcimage1, cv::Mat &srcimage2, cv::Mat& image123)
//{
// cv::Mat temp(image123.rows, image123.cols , CV_8UC3);
// srcimage2.copyTo(temp(cv::Rect(0, 0, srcimage2.cols , srcimage2.rows)));
// /*srcimage2 = imagedisplacement(srcimage2, 100, 100);
// image123 = imagedisplacement(image123, 100, 100);*/
// //cv::imshow("b", image123);
// // SURF检测器检测特征点
// int minHessian = 300;
// cv::SURF detector(minHessian);
// std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
// detector.detect(srcimage1, keypoints_1);
// detector.detect(srcimage2, keypoints_2);
// //计算SURF描述子(由特征点和原图构成的特征向量)
// cv::SURF extractor;
// cv::Mat descriptors_1, descriptors_2;
// extractor.compute(srcimage1, keypoints_1, descriptors_1);
// extractor.compute(srcimage2, keypoints_2, descriptors_2);
// //采用FLANN匹配
// cv::FlannBasedMatcher matcher;
// std::vector<cv::DMatch> matches, goodmatches;
// matcher.match(descriptors_1, descriptors_2, matches);
// std::vector<cv::Point2f>list1, list2;
// double min_dist = 100;
// for (int i = 0; i < descriptors_1.rows; i++)
// {
//  if (matches[i].distance < min_dist) min_dist = matches[i].distance;
// }
// for (int i = 0; i < descriptors_1.rows; i++)//筛选好的匹配点
// {
//  if (matches[i].distance < 6 * min_dist)
//  {
//   goodmatches.push_back(matches[i]);
//  }
// }
//
// for (int i = 0; i < (int)goodmatches.size(); i++)
// {
//  list1.push_back(keypoints_1[goodmatches[i].queryIdx].pt);
//  list2.push_back(keypoints_2[goodmatches[i].trainIdx].pt);
// }
// //计算homography
// cv::Mat h = cv::findHomography(list1, list2, CV_RANSAC, 3);
// list1.clear();
// list2.clear();
// cv::Mat AdjustMat = (cv::Mat_<double>(3, 3) << 1.0, 0,41, 0, 1.0, 6, 0, 0, 1.0);//这个是调整位置(还没有解决BC两张图关于A的homography之后不能完全重叠的问题,按学长所说似乎本来就可能误差,只能这样抵消误差)
// cv::Mat adhomo=AdjustMat*h;
// //图像配准 
// cv::Mat imageTransform1;
// warpPerspective(srcimage1, imageTransform1, adhomo, cv::Size(srcimage1.cols*2.4, srcimage1.rows* 2.4));
// //imshow("直接经过透视矩阵变换", imageTransform1);
// cv::Mat dst(image123.rows, image123.cols, CV_8UC3);
// imageTransform1.copyTo(dst(cv::Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
// for (int i = 0; i < image123.rows; i++)
//  for (int j = 0; j < image123.cols; j++)
//  {
//  if (!image123.at<Vec3b>(i, j)[0])
//   continue;
//  dst.at<Vec3b>(i, j) = image123.at<Vec3b>(i, j);
//  }
// /*image123.copyTo(dst(cv::Rect(0, 0, image123.cols, image123.rows)));
// for (int i = 0; i <imageTransform1.rows; i++)
//  for (int j = 0; j <imageTransform1.cols-2; j++)
// {
// if (!imageTransform1.at<Vec3b>(i, j)[0])
//   continue;
//  dst.at<Vec3b>(i, j) = imageTransform1.at<Vec3b>(i, j);
//  }*/
// //移动图像在幕布上的位置和调整大小,便于观察结果
// dst = imagedisplacement(dst, 300, 50);
// cv::Mat result(dst.rows - 200, dst.cols - 200, CV_8UC3);
// for (int i = 100; i <dst.rows - 100; i++)
//  for (int j = 100; j <dst.cols - 100; j++)
//  {
//  result.at<Vec3b>(i - 100, j - 100) = dst.at<Vec3b>(i, j);
//  }
// resize(result, result, cv::Size(), 0.5, 0.5);
// cv::imwrite("dst.jpg", result);
// cv::imshow("bst", result);
// /*cv::Mat result(image123.rows-200, image123.cols-200, CV_8UC3);
// for (int i = 100; i < dst.rows - 100; i++)
//  for (int j = 100; j < dst.cols - 100; j++)
//   result.at<Vec3b>(i-100, j-100) = dst.at<Vec3b>(i, j);
//   cv::imshow("b_dst", result);*/
//   return dst;
//}
cv::Mat Stitcher6pic(cv::Mat &srcimage1, cv::Mat &srcimage2)
{
 imshow("src1", srcimage1);
 imshow("src2", srcimage2);
 //SURF检测器检测特征点
 int minHessian = 300;
 cv::SURF detector(minHessian);
 std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
 detector.detect(srcimage1, keypoints_1);
 detector.detect(srcimage2, keypoints_2);
 //计算SURF描述子(由特征点和原图构成的特征向量)
 cv::SURF extractor;
 cv::Mat descriptors_1, descriptors_2;
 extractor.compute(srcimage1, keypoints_1, descriptors_1);
 extractor.compute(srcimage2, keypoints_2, descriptors_2);
 //采用FLANN匹配
 cv::FlannBasedMatcher matcher;
 std::vector<cv::DMatch> matches, goodmatches;
 matcher.match(descriptors_1, descriptors_2, matches);
 std::vector<cv::Point2f>list1, list2;
 double min_dist = 100;
 for (int i = 0; i < descriptors_1.rows; i++)
 {
  if (matches[i].distance < min_dist) min_dist = matches[i].distance;
 }
 for (int i = 0; i < descriptors_1.rows; i++)//筛选好的匹配点
 {
  if (matches[i].distance < 4* min_dist)
  {
   goodmatches.push_back(matches[i]);
  }
 }
 cout << "find3" << endl;
 for (int i = 0; i < (int)goodmatches.size(); i++)
 {
  list1.push_back(keypoints_1[goodmatches[i].queryIdx].pt);
  list2.push_back(keypoints_2[goodmatches[i].trainIdx].pt);
 }
 //计算homography
 
 cout << "f" << list1.size() << list2.size() << endl;
 cv::Mat h = cv::findHomography(list1, list2, CV_RANSAC, 3);
 list1.clear();
 list2.clear();
 cv::Mat AdjustMat = (cv::Mat_<double>(3, 3) << 1.0, 0,-2, 0, 1.0, -1, 0, 0, 1.0);
 cv::Mat adhomo=AdjustMat*h;
 //图像配准 
 cv::Mat imageTransform1;
 warpPerspective(srcimage1, imageTransform1, adhomo, cv::Size(srcimage2.cols * 2, srcimage2.rows * 2));
 imshow("直接经过透视矩阵变换", imageTransform1);
 cv::Mat dst(srcimage2.rows + srcimage1.rows, srcimage2.cols + srcimage1.cols, CV_8UC3);
 dst.setTo(0);
 srcimage2.copyTo(dst(cv::Rect(0, 0, srcimage1.cols, srcimage1.rows)));
 for (int i = 0; i < imageTransform1.rows; i++)
  for (int j = 0; j < imageTransform1.cols; j++)
  {
  if (!imageTransform1.at<Vec3b>(i, j)[0])
   continue;
  dst.at<Vec3b>(i, j) = imageTransform1.at<Vec3b>(i, j);
  }
 
 cv::Mat result(dst.rows - 1300, dst.cols-2100, CV_8UC3);
 for (int i = 0; i <result.rows; i++)
  for (int j = 0; j <result.cols; j++)
  {
  
  result.at<Vec3b>(i, j) = dst.at<Vec3b>(i, j);
  }
 cv::imshow("b_dst1", result);
 imwrite("result.jpg", result);
 return dst;
}
作者 张星迪,有问题私信,需要转载联系。

}
压缩包中包含的具体内容: 对给定数据中的6个不同场景图像,进行全景拼接操作,具体要求如下: (1) 寻找关键点,获取关键点的位置和尺度信息(DoG检测子已由KeypointDetect文件夹中的detect_features_DoG.m文件实现;请参照该算子,自行编写程序实现Harris-Laplacian检测子)。 (2) 在每一幅图像中,对每个关键点提取待拼接图像的SIFT描述子(编辑SIFTDescriptor.m文件实现该操作,运行EvaluateSIFTDescriptor.m文件检查实现结果)。 (3) 比较来自两幅不同图像的SIFT描述子,寻找匹配关键点(编辑SIFTSimpleMatcher.m文件计算两幅图像SIFT描述子间的Euclidean距离,实现该操作,运行EvaluateSIFTMatcher.m文件检查实现结果)。 (4) 基于图像中的匹配关键点,对两幅图像进行配准。请分别采用最小二乘方法(编辑ComputeAffineMatrix.m文件实现该操作,运行EvaluateAffineMatrix.m文件检查实现结果)和RANSAC方法估计两幅图像间的变换矩阵(编辑RANSACFit.m 文件中的ComputeError()函数实现该操作,运行TransformationTester.m文件检查实现结果)。 (5) 基于变换矩阵,对其中一幅图像进行变换处理,将其与另一幅图像进行拼接。 (6) 对同一场景的多幅图像进行上述操作,实现场景的全景拼接(编辑MultipleStitch.m文件中的makeTransformToReferenceFrame函数实现该操作)。可以运行StitchTester.m查看拼接结果。 (7) 请比较DoG检测子和Harris-Laplacian检测子的实验结果。图像拼接的效果对实验数据中的几个场景效果不同,请分析原因。 已经实现这些功能,并且编译运行均不报错!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值