#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>
int main() {
// 手动输入的左相机图像坐标
std::vector<std::vector<cv::Point3f>> objectPoints(1, {
cv::Point3f(0, 0, 0),
cv::Point3f(0, 20, 0),
cv::Point3f(20, 0, 0),
cv::Point3f(20, 20, 0),
});
// 手动输入的左相机图像坐标
std::vector<std::vector<cv::Point2f>> leftImagePoints(1, {
cv::Point2f(745, 270),
cv::Point2f(745, 400),
cv::Point2f(875, 270),
cv::Point2f(875, 400),
});
// 手动输入的右相机图像坐标
std::vector<std::vector<cv::Point2f>> rightImagePoints(1, {
cv::Point2f(145, 280),
cv::Point2f(145, 410),
cv::Point2f(275, 280),
cv::Point2f(275, 410),
});
// 手动输入的相机内参矩阵和畸变系数(假设没有畸变)
cv::Mat cameraMatrixLeft = (cv::Mat_<double>(3, 3) <<
1000, 0, 640,
0, 1000, 480,
0, 0, 1);
cv::Mat cameraMatrixRight = (cv::Mat_<double>(3, 3) <<
1000, 0, 640,
0, 1000, 480,
0, 0, 1);
cv::Mat distCoeffsLeft = cv::Mat::zeros(5, 1, CV_64F);
cv::Mat distCoeffsRight = cv::Mat::zeros(5, 1, CV_64F);
// 使用stereoCalibrate求解立体标定
cv::Mat R, T, E, F;
double rms = cv::stereoCalibrate(
objectPoints, leftImagePoints, rightImagePoints,
cameraMatrixLeft, distCoeffsLeft,
cameraMatrixRight, distCoeffsRight,
cv::Size(1280, 960), R, T, E, F,
cv::CALIB_FIX_INTRINSIC // 限制内参不变
);
// 输出相机位姿
std::cout << "立体标定 RMS 误差: " << rms << std::endl;
std::cout << "旋转矩阵 (R): " << R << std::endl;
std::cout << "平移向量 (T): " << T << std::endl;
return 0;
}