OpenCV代码
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/stitching/stitcher.hpp>
using namespace std;
using namespace cv;
bool try_use_gpu = false;
vector<Mat> imgs;
string result_name = "dst1.jpg";
int main(int argc, char * argv[])
{
Mat img1 = imread("34.jpg");
Mat img2 = imread("35.jpg");
imshow("p1", img1);
imshow("p2", img2);
if (img1.empty() || img2.empty())
{
cout << "Can't read image" << endl;
return -1;
}
imgs.push_back(img1);
imgs.push_back(img2);
Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
// 使用stitch函数进行拼接
Mat pano;
Stitcher::Status status = stitcher.stitch(imgs, pano);
if (status != Stitcher::OK)
{
cout << "Can't stitch images, error code = " << int(status) << endl;
return -1;
}
imwrite(result_name, pano);
Mat pano2 = pano.clone();
// 显示源图像,和结果图像
imshow("全景图像", pano);
if (waitKey() == 27)
return 0;
}
运行log
Finding features...
Features in image #1: 0
Features in image #2: 0
Finding features, time: 0.0100239 sec
Pairwise matching
1->2 matches: 70
1->2 & 2->1 matches: 109
.Pairwise matching, time: 0.0231749 sec
Estimating rotations...
Estimating rotations, time: 1.7138e-05 sec
Wave correcting...
Wave correcting, time: 3.2384e-05 sec
Warping images (auxiliary)...
Warping images, time: 0.00900702 sec
Exposure compensation...
Exposure compensation, time: 0.00382646 sec
Finding seams...
Finding seams, time: 0.105626 sec
Compositing...
Compositing image #1
after resize time: 0.000229414 sec
warp the current image: 0.0324935 sec
warp the current image mask: 0.0291552 sec
compensate exposure: 0.00421866 sec
other: 0.00200899 sec
other2: 0.00791113 sec
feed...
Add border to the source image, time: 0.00454921 sec
Create the source image Laplacian pyramid, time: 0.0116412 sec
Create the weight map Gaussian pyramid, time: 0.00511779 sec
Add weighted layer of the source image to the final Laplacian pyramid layer, time: 0.00628485 sec
feed time: 0.028174 sec
Compositing ## time: 0.104273 sec
Compositing image #2
after resize time: 6.87e-07 sec
warp the current image: 0.024671 sec
warp the current image mask: 0.0233189 sec
compensate exposure: 0.00380685 sec
other: 0.00153454 sec
other2: 1.39e-07 sec
feed...
Add border to the source image, time: 0.00124179 sec
Create the source image Laplacian pyramid, time: 0.00769192 sec
Create the weight map Gaussian pyramid, time: 0.00267358 sec
Add weighted layer of the source image to the final Laplacian pyramid layer, time: 0.00590453 sec
feed time: 0.0188611 sec
Compositing ## time: 0.0722708 sec
blend time: 0.0210398 sec
Compositing, time: 0.197634 sec
stitching completed successfully
result.jpg saved!
createDefault
Stitcher Stitcher::createDefault(bool try_use_gpu)
{
Stitcher stitcher;
stitcher.setRegistrationResol(0.6); //registr_resol_ = 0.6
stitcher.setSeamEstimationResol(0.1); // seam_est_resol_ = 0.1
stitcher.setCompositingResol(ORIG_RESOL);
stitcher.setPanoConfidenceThresh(1);
stitcher.setWaveCorrection(true);
stitcher.setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
stitcher.setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(try_use_gpu));
stitcher.setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
#ifdef HAVE_OPENCV_CUDALEGACY
if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0)
{
stitcher.setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
stitcher.setWarper(makePtr<SphericalWarperGpu>());
stitcher.setSeamFinder(makePtr<detail::GraphCutSeamFinderGpu>());
}
else
#endif
{
stitcher.setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
}
stitcher.setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
stitcher.setBlender(makePtr<detail::MultiBandBlender>(try_use_gpu));
stitcher.work_scale_ = 1;
stitcher.seam_scale_ = 1;
stitcher.seam_work_aspect_ = 1;
stitcher.warped_image_scale_ = 1;
return stitcher;
}
stitch
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, OutputArray pano)
{
CV_INSTRUMENT_REGION();
Status status = estimateTransform(images);
if (status != OK)
return status;
return composePanorama(pano);
}
estimateTransform
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images)
{
CV_INSTRUMENT_REGION();
return estimateTransform(images, std::vector<std::vector<Rect> >());
}
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois)
{
CV_INSTRUMENT_REGION();
images.getUMatVector(imgs_);
rois_ = rois;
Status status;
if ((status = matchImages()) != OK)
return status;
if ((status = estimateCameraParams()) != OK)
return status;
return OK;
}
matchImages
Stitcher::Status Stitcher::matchImages()
{
if ((int)imgs_.size() < 2)
{
LOGLN("Need more images");
return ERR_NEED_MORE_IMGS;
}
work_scale_ = 1;
seam_work_aspect_ = 1;
seam_scale_ = 1;
bool is_work_scale_set = false;
bool is_seam_scale_set = false;
UMat full_img, img;
features_.resize(imgs_.size());
seam_est_imgs_.resize(imgs_.size());
full_img_sizes_.resize(imgs_.size());
LOGLN("Finding features...");
#if ENABLE_LOG
int64 t = getTickCount();
#endif
std::vector<UMat> feature_find_imgs(imgs_.size());
std::vector<std::vector<Rect> > feature_find_rois(rois_.size());
for (size_t i = 0; i < imgs_.size(); ++i)
{
full_img = imgs_[i];
full_img_sizes_[i] = full_img.size();
if (registr_resol_ < 0) //registr_resol_ = 0.6
{
img = full_img;
work_scale_ = 1;
is_work_scale_set = true;
}
else
{
if (!is_work_scale_set) // if(true)
{
work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img.size().area()));
is_work_scale_set = true;
}
resize(full_img, img, Size(), work_scale_, work_scale_, INTER_LINEAR_EXACT);
} // work_scale = ; registr_resol_ = 0.6
if (!is_seam_scale_set) //if(true)
{
seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img.size().area()));
seam_work_aspect_ = seam_scale_ / work_scale_;
is_seam_scale_set = true;
}
if (rois_.empty())
feature_find_imgs[i] = img;
else
{
feature_find_rois[i].resize(rois_[i].size());
for (size_t j = 0; j < rois_[i].size(); ++j)
{
Point tl(cvRound(rois_[i][j].x * work_scale_), cvRound(rois_[i][j].y * work_scale_));
Point br(cvRound(rois_[i][j].br().x * work_scale_), cvRound(rois_[i][j].br().y * work_scale_));
feature_find_rois[i][j] = Rect(tl, br);
}
feature_find_imgs[i] = img;
}
features_[i].img_idx = (int)i;
LOGLN("Features in image #" << i+1 << ": " << features_[i].keypoints.size());
resize(full_img, img, Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
seam_est_imgs_[i] = img.clone();
}
// find features possibly in parallel
if (rois_.empty())
(*features_finder_)(feature_find_imgs, features_);
else
(*features_finder_)(feature_find_imgs, features_, feature_find_rois);
// Do it to save memory
features_finder_->collectGarbage();
full_img.release();
img.release();
feature_find_imgs.clear();
feature_find_rois.clear();
LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
LOG("Pairwise matching");
#if ENABLE_LOG
t = getTickCount();
#endif
(*features_matcher_)(features_, pairwise_matches_, matching_mask_);
features_matcher_->collectGarbage();
LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Leave only images we are sure are from the same panorama
indices_ = detail::leaveBiggestComponent(features_, pairwise_matches_, (float)conf_thresh_);
std::vector<UMat> seam_est_imgs_subset;
std::vector<UMat> imgs_subset;
std::vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
imgs_subset.push_back(imgs_[indices_[i]]);
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
}
seam_est_imgs_ = seam_est_imgs_subset;
imgs_ = imgs_subset;
full_img_sizes_ = full_img_sizes_subset;
if ((int)imgs_.size() < 2)
{
LOGLN("Need more images");
return ERR_NEED_MORE_IMGS;
}
return OK;
}