双目标定测距C++代码记录

一、双目标定

#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"

#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>

using namespace cv;
using namespace std;

static void StereoCalib(const vector<string>& imagelist, Size boardSize, float squareSize)
{
	bool displayCorners = false;  // 是否展示角点
	bool showRectified = true;  // 释放展示修正图

    if( imagelist.size() % 2 != 0 )
    {
        cout << "Error: the image list contains odd (non-even) number of elements\n";
        return;
    }

    vector<vector<Point2f> > imagePoints[2];  // 角点,像素坐标
    vector<vector<Point3f> > objectPoints;    // 角点,物理3D坐标

    Size imageSize;
    int i, j, k, nimages = (int)imagelist.size()/2;
    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    vector<string> goodImageList;
    for( i = j = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            const string& filename = imagelist[i*2+k];
            Mat img = imread(filename, 0);
            if(img.empty())
                break;
            if( imageSize == Size() )
                imageSize = img.size();
            else if( img.size() != imageSize )
            {
                cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
                break;
            }
            bool found = false;
            vector<Point2f>& corners = imagePoints[k][j];
            for( int scale = 1; scale <= 2; scale++ )
            {
                Mat timg;
                if( scale == 1 )
                    timg = img;
                else
                    resize(img, timg, Size(), scale, scale, INTER_LINEAR_EXACT);
                found = findChessboardCorners(timg, boardSize, corners,
                    CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
                if( found )
                {
                    if( scale > 1 )
                    {
                        Mat cornersMat(corners);
                        cornersMat *= 1./scale;
                    }
                    break;
                }
            }
            if( displayCorners )
            {
                cout << filename << endl;
                Mat cimg, cimg1;
                cvtColor(img, cimg, COLOR_GRAY2BGR);
                drawChessboardCorners(cimg, boardSize, corners, found);
                double sf = 640./MAX(img.rows, img.cols);
                resize(cimg, cimg1, Size(), sf, sf, INTER_LINEAR_EXACT);
                imshow("corners", cimg1);
                char c = (char)waitKey(500);
                if( c == 27 || c == 'q' || c == 'Q' )
                    exit(-1);
            }
            if( !found )
                break;
            cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
				TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01));
        }
        if( k == 2 )
        {
            goodImageList.push_back(imagelist[i*2]);
            goodImageList.push_back(imagelist[i*2+1]);
            j++;
        }
    }
    cout << j << " pairs have been successfully detected.\n";
    nimages = j;
    if( nimages < 2 )
    {
        cout << "Error: too little pairs to run the calibration\n";
        return;
    }

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    objectPoints.resize(nimages);
    for( i = 0; i < nimages; i++ )
		for (j = 0; j < boardSize.height; j++)
			for (k = 0; k < boardSize.width; k++)
				objectPoints[i].push_back(Point3f(k * squareSize, j * squareSize, 0));

    cout << "Running stereo calibration ...\n";
    Mat cameraMatrix[2], distCoeffs[2];
    cameraMatrix[0] = initCameraMatrix2D(objectPoints, imagePoints[0], imageSize, 0);
    cameraMatrix[1] = initCameraMatrix2D(objectPoints, imagePoints[1], imageSize, 0);
	cout << "initial cameraMatrix[0] = " << endl << cameraMatrix[0] << endl;  // 初始内参矩阵3*3
	cout << "initial cameraMatrix[1] = " << endl << cameraMatrix[1] << endl;  // 初始内参矩阵3*3

    Mat R, T, E, F;
    double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
                    cameraMatrix[0], distCoeffs[0],
                    cameraMatrix[1], distCoeffs[1],
                    imageSize, R, T, E, F,
                    CALIB_FIX_ASPECT_RATIO +
                    CALIB_ZERO_TANGENT_DIST +
                    CALIB_USE_INTRINSIC_GUESS +
                    CALIB_SAME_FOCAL_LENGTH +
                    CALIB_RATIONAL_MODEL +
                    CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
                    TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) );
    cout << "done with RMS error = " << rms << endl;
	cout << "cameraMatrix[0] = " << endl << cameraMatrix[0] << endl;  // 标定后内参矩阵3*3
	cout << "cameraMatrix[1] = " << endl << cameraMatrix[1] << endl;  // 标定后内参矩阵3*3
	cout << "distCoeffs[0] = " << endl << distCoeffs[0] << endl;  // 标定后畸变系数向量1*14
	cout << "distCoeffs[1] = " << endl << distCoeffs[1] << endl;  // 标定后畸变系数向量1*14
	cout << "R = " << endl << R << endl;  // 两相机之间的旋转矩阵3*3
	cout << "T = " << endl << T << endl;  // 两相机之间的平移向量3*1

    double err = 0;
    int npoints = 0;
    vector<Vec3f> lines[2];
    for( i = 0; i < nimages; i++ )
    {
        int npt = (int)imagePoints[0][i].size();
        Mat imgpt[2];
        for( k = 0; k < 2; k++ )
        {
            imgpt[k] = Mat(imagePoints[k][i]);
            undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
            computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
        }
        for( j = 0; j < npt; j++ )
        {
            double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
                                imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
                           fabs(imagePoints[1][i][j].x*lines[0][j][0] +
                                imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
            err += errij;
        }
        npoints += npt;
    }
    cout << "average epipolar err = " <<  err / npoints << endl;

    // save intrinsic parameters
    FileStorage fs("intrinsics.yml", FileStorage::WRITE);
    if( fs.isOpened() )
    {
        fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
            "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
        fs.release();
    }

    Mat R1, R2, P1, P2, Q;
    Rect validRoi[2];
    stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize, R, T, 
		R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
	cout << "P1 = " << endl << P1 << endl;  // 相机1在修正坐标系下的投影矩阵3*4
	cout << "P2 = " << endl << P2 << endl;  // 相机2在修正坐标系下的投影矩阵3*4
	cout << "roi1 = " << validRoi[0].x << " " << validRoi[0].y << " " << validRoi[0].width << " " << validRoi[0].height << endl;
	cout << "roi2 = " << validRoi[1].x << " " << validRoi[1].y << " " << validRoi[1].width << " " << validRoi[1].height << endl;

    fs.open("extrinsics.yml", FileStorage::WRITE);
    if( fs.isOpened() )
    {
        fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
        fs.release();
    }

    bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));  // 判断双目相机是否垂直放置

    Mat rmap[2][2];
    initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

    Mat canvas;
    double sf;
    int w, h;
    if( !isVerticalStereo )
    {
        sf = 600./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h, w*2, CV_8UC3);  // 双目水平放置
    }
    else
    {
        sf = 300./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h*2, w, CV_8UC3);  // 双目垂直放置
    }

    for( i = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
            remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
            cvtColor(rimg, cimg, COLOR_GRAY2BGR);
            Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
            resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
			Rect vroi(cvRound(validRoi[k].x* sf), cvRound(validRoi[k].y* sf),
				cvRound(validRoi[k].width* sf), cvRound(validRoi[k].height* sf));
			rectangle(canvasPart, vroi, Scalar(0, 0, 255), 3, 8);
        }
        if( !isVerticalStereo )
            for( j = 0; j < canvas.rows; j += 16 )
                line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
        else
            for( j = 0; j < canvas.cols; j += 16 )
                line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
        imshow("rectified", canvas);
        char c = (char)waitKey();
        if( c == 27 || c == 'q' || c == 'Q' )
            break;
    }
}

static bool readStringList( const string& filename, vector<string>& l )
{
    l.resize(0);
    FileStorage fs(filename, FileStorage::READ);
    if( !fs.isOpened() )
        return false;
    FileNode n = fs.getFirstTopLevelNode();
    if( n.type() != FileNode::SEQ )
        return false;
    FileNodeIterator it = n.begin(), it_end = n.end();
    for( ; it != it_end; ++it )
        l.push_back((string)*it);
    return true;
}

int main()
{
	Size boardSize = Size(9, 6);  // 9*6标定板
    float squareSize = 1.0;  // 棋盘格尺寸
    vector<string> imagelist;  // 图像名称列表

    bool ok = readStringList("stereo_calib.xml", imagelist);  // 读取xml内容
	if (!ok || imagelist.empty())
	{
		cout << "can not open file or the string list is empty" << endl;
		return -1;
	}
    
    StereoCalib(imagelist, boardSize, squareSize);
    return 0;
}

二、计算视差

#define _CRT_SECURE_NO_WARNINGS

#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/utility.hpp"
#include <stdio.h>
#include <iostream>

using namespace cv;
using namespace std;

static void saveXYZ(const char* filename, const Mat& mat)
{
    const double max_z = 1.0e4;
    FILE* fp = fopen(filename, "wt");
    for(int y = 0; y < mat.rows; y++)
    {
        for(int x = 0; x < mat.cols; x++)
        {
            Vec3f point = mat.at<Vec3f>(y, x);
            if(fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;
            fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]);
        }
    }
    fclose(fp);
}

int main()
{
	string img1_filename = "left01.jpg";
	string img2_filename = "right01.jpg";
	string intrinsic_filename = "intrinsics.yml";
	string extrinsic_filename = "extrinsics.yml";
	string disparity_filename = "disparity.jpg";
	string point_cloud_filename = "point.xyz";

    Mat img1 = imread(img1_filename, IMREAD_UNCHANGED);
    Mat img2 = imread(img2_filename, IMREAD_UNCHANGED);
    if (img1.empty())
    {
        printf("Command-line parameter error: could not load the first input image file\n");
        return -1;
    }
    if (img2.empty())
    {
        printf("Command-line parameter error: could not load the second input image file\n");
        return -1;
    }

    Size img_size = img1.size();
    Rect roi1, roi2;
    Mat Q;
    if( !intrinsic_filename.empty() )
    {
        FileStorage fs(intrinsic_filename, FileStorage::READ);  // 读取内参矩阵
        if(!fs.isOpened())
        {
            printf("Failed to open file %s\n", intrinsic_filename.c_str());
            return -1;
        }
        Mat M1, D1, M2, D2;
        fs["M1"] >> M1;
        fs["D1"] >> D1;
        fs["M2"] >> M2;
        fs["D2"] >> D2;

        fs.open(extrinsic_filename, FileStorage::READ);  // 读取外参矩阵
        if(!fs.isOpened())
        {
            printf("Failed to open file %s\n", extrinsic_filename.c_str());
            return -1;
        }
        Mat R, T, R1, P1, R2, P2;
        fs["R"] >> R;
        fs["T"] >> T;

        stereoRectify( M1, D1, M2, D2, img_size, R, T, 
			R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );
		cout << "roi1 = " << roi1.x << " " << roi1.y << " " << roi1.width << " " << roi1.height << endl;
		cout << "roi2 = " << roi2.x << " " << roi2.y << " " << roi2.width << " " << roi2.height << endl;
		cout << "R1 = " << endl << R1 << endl;
		cout << "P1 = " << endl << P1 << endl;
		cout << "R2 = " << endl << R2 << endl;
		cout << "P2 = " << endl << P2 << endl;

        Mat map11, map12, map21, map22;
        initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
        initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);

        Mat img1r, img2r;
        remap(img1, img1r, map11, map12, INTER_LINEAR);
        remap(img2, img2r, map21, map22, INTER_LINEAR);
        img1 = img1r;
        img2 = img2r;
		imwrite("remap_left.jpg", img1);
		imwrite("remap_right.jpg", img2);
    }

    int numberOfDisparities = ((img_size.width/8) + 15) & -16;
	cout << "numberOfDisparities = " << numberOfDisparities << endl;  // 80 = (640/8+15) & -16

	Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, 16, 3);  // SGBM算法
    sgbm->setPreFilterCap(63);  // 水平sobel预处理后,映射滤波器大小
	int cn = img1.channels();
    int sgbmWinSize = 7;
    sgbm->setBlockSize(sgbmWinSize);
    sgbm->setP1(8 * cn * sgbmWinSize * sgbmWinSize);  // 越大视差越平滑
    sgbm->setP2(32 * cn * sgbmWinSize * sgbmWinSize); // 越大视差越平滑
    sgbm->setMinDisparity(0);  // 最小视差
    sgbm->setNumDisparities(numberOfDisparities);  // 视差搜索范围
    sgbm->setUniquenessRatio(10);
    sgbm->setSpeckleWindowSize(100);  // 视差连通区域像素点个数的大小
    sgbm->setSpeckleRange(32);  // 视差连通条件
    sgbm->setDisp12MaxDiff(1);  // 左右一致性检测最大容许误差阈值
    sgbm->setMode(StereoSGBM::MODE_SGBM);
    Mat disp, disp8;  // 视差图
    sgbm->compute(img1, img2, disp);  // CV_16SC1
    disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));  // 16S--->8U

    if( true )
    {
        namedWindow("left", 1);
        imshow("left", img1);
        namedWindow("right", 1);
        imshow("right", img2);
        namedWindow("disparity", 0);
        imshow("disparity", disp8);
        printf("press any key to continue...");
        fflush(stdout);
        waitKey();
        printf("\n");
    }

	if (!disparity_filename.empty())
	{
		printf("save disparity file...");
		fflush(stdout);
		imwrite(disparity_filename, disp8);
		printf("\n");
	}   

    if(!point_cloud_filename.empty())
    {
        printf("storing the point cloud...");
        fflush(stdout);
        Mat xyz;
        reprojectImageTo3D(disp, xyz, Q, true);
        saveXYZ(point_cloud_filename.c_str(), xyz);
        printf("\n");
    }

    return 0;
}

三、函数介绍

initCameraMatrix2D()

Finds an initial camera matrix from 3D-2D point correspondences.

inputOutput:

objectPoints---vector<vector<Point3f>>---n个图片对,p*q个特征点的3D坐标

imagePoints1---vector<vector<Point2f>>---相机1n个图,p*q个特征点像素坐标

imageSize---Size---图片尺寸

cameraMatrix1---Mat---相机1的内参矩阵3*3---OutputArray

stereoCalibrate()

inputOutput:

objectPoints---vector<vector<Point3f>>---n个图片对,p*q个特征点的3D坐标

imagePoints1---vector<vector<Point2f>>---相机1n个图,p*q个特征点像素坐标

imagePoints2---vector<vector<Point2f>>---相机2n个图,p*q个特征点像素坐标

cameraMatrix1---Mat---相机1的内参矩阵3*3---InputOutputArray

distCoeffs1---Mat---相机1的畸变矩阵1*14---InputOutputArray

cameraMatrix2---Mat---相机2的内参矩阵3*3---InputOutputArray

distCoeffs2---Mat---相机2的畸变矩阵1*14---InputOutputArray

imageSize---Size---图片尺寸

R---Mat---相机1和相机2坐标系之间的旋转矩阵3*3 ---OutputArray

T---Mat---相机1和相机2坐标系之间的平移向量3*1 ---OutputArray

E---Mat---本质矩阵essential matrix 3*3 ---OutputArray

F---Mat---基本矩阵fundamental matrix 3*3 ---OutputArray

undistortPoints()

Computes the ideal point coordinates from the observed point coordinates.

stereoRectify()

Computes rectification transforms for each head of a calibrated stereo camera.

inputOutput:

cameraMatrix1---Mat---相机1的内参矩阵3*3

distCoeffs1---Mat---相机1的畸变矩阵1*14

cameraMatrix2---Mat---相机2的内参矩阵3*3

distCoeffs2---Mat---相机2的畸变矩阵1*14

imageSize---Size---图片尺寸

R---Mat---相机1和相机2坐标系之间的旋转矩阵3*3

T---Mat---相机1和相机2坐标系之间的平移向量3*1

R1---Mat---相机1的修正变换(旋转矩阵)3*3 --- OutputArray

R2---Mat---相机2的修正变换(旋转矩阵)3*3 --- OutputArray

P1---Mat---相机1在修正坐标系下的投影矩阵3*4 --- OutputArray

P2---Mat---相机2在修正坐标系下的投影矩阵3*4 --- OutputArray

Q---Mat---视差图到深度图的映射矩阵4*4 --- OutputArray

If alpha=-1 or absent, the function performs the default scaling. (size=img size)

If alpha=0 means rectified images are zoomed and shifted only valid pixels visible.

If alpha=1 means rectified image is decimated and shifted so that all the

pixels from the original images from the cameras are retained in the rectified images.

initUndistortRectifyMap()

Computes the undistortion and rectification transformation map.

inputOutput:

cameraMatrix1---Mat---相机1的内参矩阵3*3

distCoeffs1---Mat---相机1的畸变矩阵1*14

R1---Mat---相机1的修正变换(旋转矩阵)3*3

P1---Mat---相机1在修正坐标系下的投影矩阵3*4

imageSize---Size---图片尺寸

map1---Mat---第一个输出map OutputArray

map2---Mat---第二个输出map OutputArray

remap()

Applies a generic geometrical transformation to an image.

reprojectImageTo3D()

Reprojects a disparity image to 3D space.

重投影矩阵(透视变换矩阵)Q中cx和cy为左相机在图像中的坐标,f为焦距,Tx为两台相机投影中心间的平移值(负值),即基线baseline, cx为右相机主点在图像中的坐标。

实现效果:http://v.youku.com/v_show/id_XMTU2Mzk0NjU3Ng==.html 如何在你的电脑上运行这个程序? 1,它需要cvblobslib这一个opencv的扩展库来实现检测物体与给物体画框的功能,具体安装信息请见: http://dsynflo.blogspot.com/2010/02/cvblobskib-with-opencv-installation.html,当你配置好cvblobslib之后,你可以用这一的程序进行测试:http://dl.dropbox.com/u/110310945/Blobs%20test.rar 2,视频中两个摄像头之间的距离是6cm,你可以根据你摄像头的型号,来选择合适的距离来达到最好的效果。 3,在进行测距之前,首先需要对摄像头进行标定,那么如何标定呢? 在stdafx.h中把"#define CALIBRATION 0"改成 “#define CALIBRATION 1”表示进行标定标定之后,你就可以在工程目录下的"CalibFile" 文件夹中得到标定信息的文件。如果标定效果还不错,你就可以吧"#define CALIBRATION " 改成0,以后就不需要再标定,直接使用上一次的标定信息。你还需要把"#define ANALYSIS_MODE 1"这行代码放到stdafx.h中。 4,视频中使用的是10*7的棋牌格,共摄录40帧来计算摄像头的各种参数,如果你像使用其他棋盘格,可以在 "StereoFunctions.cpp"文件中修改相应参数。 5,如果你无法打摄像头,可以在 "StereoGrabber.cpp"文件中修改代码“cvCaptureFromCAM(index)”中index的值。 6,About computing distance: it interpolates the relationship between depth-value and real-distance to third degree polynomial. So i used excel file "interpolation" for interpolation to find k1 to k4, you should find your own value of these parameters. 7,你可以通过调整控制窗口中各个参数的滑块,从而来得到更好的视差图。 8,在目录下的”distance“文件夹中,有计算距离信息的matlab代码。 9,如果你想了解基本的理论,可以看一下这个文档:http://scholar.lib.vt.edu/theses/available/etd-12232009-222118/unrestricted/Short_NJ_T_2009.pdf 视频中环境:vs2008,opencv2.1
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值