OpenCV 双光融合开发全攻略(C++ 版)
一、环境搭建与项目配置
1. 依赖安装(Ubuntu 20.04)
# 安装OpenCV官方库
sudo apt install libopencv-dev python3-opencv
# 验证安装
pkg-config --modversion opencv # 应输出4.5.5+
2. CMake 项目结构
# CMakeLists.txt
cmake_minimum_required(VERSION 3.10)
project(opencv_fusion)
find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc calib3d)
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(fusion_main
src/calibration.cpp
src/stereo_disparity.cpp
src/image_fusion.cpp
)
target_link_libraries(fusion_main ${OpenCV_LIBS})
3. 核心头文件包含
#include <opencv2/opencv.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
using namespace std;
二、双目视觉融合实现
1. 棋盘格校准(C++ 实现)
(1)角点检测与数据采集
Mat img = imread("calib_01.jpg");
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
Size chessboardSize(8, 6);
vector<Point2f> corners;
bool found = findChessboardCorners(gray, chessboardSize, corners,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK);
if(found) {
cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.1));
drawChessboardCorners(img, chessboardSize, corners, found);
}
(2)相机参数计算
vector<vector<Point3f>> objPoints;
vector<vector<Point2f>> imgPoints;
// 世界坐标系角点(Z=0)
vector<Point3f> objp;
for(int i=0; i<chessboardSize.height; i++) {
for(int j=0; j<chessboardSize.width; j++) {
objp.emplace_back(j*30, i*30, 0); // 棋盘格边长30mm
}
}
// 执行校准
Mat cameraMatrix, distCoeffs;
vector<Mat> rvecs, tvecs;
calibrateCamera(objPoints, imgPoints, gray.size(),
cameraMatrix, distCoeffs, rvecs, tvecs);
2. 立体视差计算
(1)极线校正
Mat R, T;
stereoCalibrate(objPoints, leftCorners, rightCorners,
cameraMatrix, distCoeffs, cameraMatrix, distCoeffs,
gray.size(), R, T, noArray(), noArray(),
CALIB_FIX_INTRINSIC);
Mat R1, R2, P1, P2, Q;
stereoRectify(cameraMatrix, distCoeffs,
cameraMatrix, distCoeffs, gray.size(),
R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 0, gray.size());
(2)视差图生成(SGBM 算法)
Ptr<StereoSGBM> sgbm = StereoSGBM::create(
0, 160, 11, 8*3*11*11, 32*3*11*11,
1, 0, 10, 100, 32
);
Mat disparity;
sgbm->compute(leftGray, rightGray, disparity);
disparity.convertTo(disparity, CV_32F, 1/16.0f);
(3)深度计算函数
float calculateDepth(float disparity, float focalLength, float baseline) {
if(disparity <= 0) return FLT_MAX; // 无效视差处理
return focalLength * baseline / disparity;
}
三、多模态图像融合
1. RGB 与红外线性融合
Mat rgbImg = imread("rgb.jpg");
Mat irImg = imread("ir.jpg", IMREAD_GRAYSCALE);
// 尺寸统一
resize(irImg, irImg, rgbImg.size());
cvtColor(irImg, irImg, COLOR_GRAY2BGR);
Mat fusedImg;
addWeighted(rgbImg, 0.7, irImg, 0.3, 0, fusedImg);
2. 金字塔融合算法
void pyramidBlend(const Mat& src1, const Mat& src2, Mat& dst, int levels) {
vector<Mat> gp1, gp2; // 高斯金字塔
vector<Mat> lp1, lp2; // 拉普拉斯金字塔
// 构建高斯金字塔
gp1.push_back(src1.clone());
gp2.push_back(src2.clone());
for(int i=0; i<levels; i++) {
Mat down1, down2;
pyrDown(gp1[i], down1);
pyrDown(gp2[i], down2);
gp1.push_back(down1);
gp2.push_back(down2);
}
// 构建拉普拉斯金字塔
for(int i=levels-1; i>=0; i--) {
Mat up1, up2;
if(i == levels-1) {
lp1.push_back(gp1[i]);
lp2.push_back(gp2[i]);
} else {
pyrUp(gp1[i+1], up1, gp1[i].size());
Mat lap1 = gp1[i] - up1;
lp1.push_back(lap1);
pyrUp(gp2[i+1], up2, gp2[i].size());
Mat lap2 = gp2[i] - up2;
lp2.push_back(lap2);
}
}
// 融合金字塔
vector<Mat> fusedLap;
for(int i=0; i<levels; i++) {
Mat fused;
addWeighted(lp1[i], 0.5, lp2[i], 0.5, 0, fused);
fusedLap.push_back(fused);
}
// 重建融合图像
dst = fusedLap[0];
for(int i=1; i<levels; i++) {
Mat upDst;
pyrUp(dst, upDst, fusedLap[i].size());
dst = upDst + fusedLap[i];
}
}
四、性能优化技巧
1. 并行计算优化(OpenMP)
#include <omp.h>
// 视差后处理并行化
#pragma omp parallel for
for(int y=0; y<disparity.rows; y++) {
for(int x=0; x<disparity.cols; x++) {
float d = disparity.at<float>(y, x);
if(d > 0) {
depth.at<float>(y, x) = calculateDepth(d, focal, baseline);
}
}
}
2. GPU 加速配置(CUDA)
// 启用CUDA模块
#include <opencv2/cudaimgproc.hpp>
Mat d_left, d_right;
cuda::GpuMat gpu_left, gpu_right;
gpu_left.upload(leftGray);
gpu_right.upload(rightGray);
Ptr<cuda::StereoBM> cuda_sgbm = cuda::StereoBM::create(160, 11);
cuda_sgbm->compute(gpu_left, gpu_right, d_disparity);
d_disparity.download(disparity);
3. 内存管理优化
// 使用连续内存存储
Mat img = imread("image.jpg", IMREAD_ANYDEPTH | IMREAD_COLOR);
if(img.isContinuous()) {
img = img.reshape(1, img.rows * img.cols);
}
五、完整项目示例(双目测距)
1. 主函数流程
int main() {
// 1. 读取左右图像
Mat leftImg = imread("left.jpg", IMREAD_GRAYSCALE);
Mat rightImg = imread("right.jpg", IMREAD_GRAYSCALE);
// 2. 相机校准(假设已完成参数计算)
Mat cameraMatrix = (Mat_<double>(3,3) <<
520.9, 0, 325.1,
0, 521.0, 249.7,
0, 0, 1);
Mat distCoeffs = Mat::zeros(4, 1, CV_64F);
// 3. 立体校正
Mat R = Mat::eye(3,3, CV_64F);
Mat T = (Mat_<double>(3,1) << 100, 0, 0); // 基线100mm
Mat R1, R2, P1, P2, Q;
stereoRectify(cameraMatrix, distCoeffs,
cameraMatrix, distCoeffs, leftImg.size(),
R, T, R1, R2, P1, P2, Q);
// 4. 视差计算
Ptr<StereoSGBM> sgbm = StereoSGBM::create();
Mat disparity;
sgbm->compute(leftImg, rightImg, disparity);
// 5. 深度图生成
Mat depthMap = Mat::zeros(leftImg.size(), CV_32F);
for(int y=0; y<leftImg.rows; y++) {
for(int x=0; x<leftImg.cols; x++) {
float d = disparity.at<float>(y,x);
depthMap.at<float>(y,x) = calculateDepth(d, 520.9, 100);
}
}
// 6. 结果显示
imshow("Disparity Map", disparity/16.0f);
waitKey(0);
return 0;
}
六、常见问题与 C++ 特解
1. 图像尺寸不匹配
// 自动调整尺寸函数
void resizeIfNeeded(Mat& src, const Mat& ref) {
if(src.size() != ref.size()) {
resize(src, src, ref.size(), 0, 0, INTER_AREA);
}
}
2. 矩阵运算错误
// 检查矩阵连续性
if(!img.isContinuous()) {
img = img.clone(); // 确保连续内存访问
}
3. 多线程编译错误
# CMake添加OpenMP支持
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fopenmp")
七、硬件适配方案
1. 工业相机配置(大恒 MER-230)
// 相机SDK初始化
#include "DahengCamera.h"
bool initCamera(CameraHandle& handle) {
CameraParam param;
param.PixelFormat = PIXEL_FORMAT_MONO8;
param.Width = 1920;
param.Height = 1200;
return ConnectCamera(handle, "192.168.0.1", param);
}
2. 深度相机适配(Intel RealSense)
// RealSense深度图获取
rs2::pipeline pipe;
rs2::config cfg;
cfg.enable_stream(RS2_STREAM_COLOR, 1920, 1080, RS2_FORMAT_BGR8, 30);
cfg.enable_stream(RS2_STREAM_DEPTH, 1280, 720, RS2_FORMAT_Z16, 30);
pipe.start(cfg);
while(true) {
rs2::frameset frames = pipe.wait_for_frames();
rs2::depth_frame depth = frames.get_depth_frame();
Mat depthMat(Size(1280,720), CV_16UC1, (void*)depth.get_data(), Mat::AUTO_STEP);
// 转换为OpenCV格式处理
}