双目标定、左右两图计算视图差、cvReprojectImageTo3D()源代码解析

来源:互联网 发布:java io流是什么 编辑:程序博客网 时间:2024/05/29 18:30



 reProjectImageTo3D 是怎样计算出三维坐标数据的?


cvReprojectImageTo3D (){for( y = 0; y < rows; y++ ){const float* sptr = (const float*)(src->data.ptr + src->step*y);   // 视差矩阵指针float* dptr0 = (float*)(dst->data.ptr + dst->step*y), *dptr = dptr0;   // 三维坐标矩阵指针// 每一行运算开始时,用 当前行号y 乘以Q阵第2列、再加上Q阵第4列,作为初始值// 记 qq=[qx, qy, qz, qw]’double qx = q[0][1]*y + q[0][3], qy = q[1][1]*y + q[1][3];    double qz = q[2][1]*y + q[2][3], qw = q[3][1]*y + q[3][3];   … // 每算完一个像素的三维坐标,向量qq 累加一次q阵第1列// 即:qq = qq + q(:,1)for( x = 0; x < cols; x++, qx += q[0][0], qy += q[1][0], qz += q[2][0], qw += q[3][0] ){double d = sptr[x];// 计算当前像素三维坐标// 将向量qq 加上 Q阵第3列与当前像素视差d的乘积,用所得结果的第4元素除前三位元素即可// [X,Y,Z,W]’ = qq + q(:,3) * d;   iW = 1/W; X=X*iW; Y=Y*iW; Z=Z*iW;double iW = 1./(qw + q[3][2]*d);double X = (qx + q[0][2]*d)*iW;double Y = (qy + q[1][2]*d)*iW;double Z = (qz + q[2][2]*d)*iW;if( fabs(d-minDisparity) <= FLT_EPSILON )Z = bigZ;   // 02713     const double bigZ = 10000.;dptr[x*3] = (float)X;dptr[x*3+1] = (float)Y;dptr[x*3+2] = (float)Z;}}

利用 cvReprojectImageTo3D 计算出的三维坐标数据矩阵一般是三通道浮点型的,需要注意的是这个矩阵存储的是三维坐标数据,而不是 RGB 颜色值,所以是不能调用cvShowImage() 或者 OpenCV2.1 版的 imshow() 等函数来显示这个矩阵,



2、计算左右两图视图差

求左右相机的视差。

// depthView.cpp : 定义控制台应用程序的入口点。//#include "stdafx.h"#include "opencv2/calib3d/calib3d.hpp"#include "opencv2/imgproc/imgproc.hpp"#include "opencv2/highgui/highgui.hpp"#include "opencv2/contrib/contrib.hpp"#include <stdio.h>#include <iostream>    #include <fstream> using namespace cv;using namespace std;static void saveXYZ(const char* filename, const Mat& mat){const double max_z = 1.0e4;FILE* fp = fopen(filename, "wt");for(int y = 0; y < mat.rows; y++){for(int x = 0; x < mat.cols; x++){Vec3f point = mat.at<Vec3f>(y, x);if(fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]);}}fclose(fp);}int main(int argc, char** argv){//left01.jpg right01.jpg --algorithm=bm -i intrinsics.yml -e extrinsics.ymlconst char* algorithm_opt = "--algorithm=";const char* maxdisp_opt = "--max-disparity=";const char* blocksize_opt = "--blocksize=";const char* nodisplay_opt = "--no-display";const char* scale_opt = "--scale=";if(argc < 3){return 0;}const char* img1_filename = 0;const char* img2_filename = 0;const char* intrinsic_filename = 0;const char* extrinsic_filename = 0;const char* disparity_filename = 0;const char* point_cloud_filename = 0;enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3 };int alg = STEREO_SGBM;int SADWindowSize = 0, numberOfDisparities = 0;bool no_display = false;float scale = 1.f;StereoBM bm;StereoSGBM sgbm;StereoVar var;img1_filename = "left01.jpg"; img2_filename = "right01.jpg";alg =0;intrinsic_filename ="intrinsics.yml";extrinsic_filename = "extrinsics.yml";//left01.jpg right01.jpg --algorithm=bm -i intrinsics.yml -e extrinsics.ymlif( !img1_filename || !img2_filename ){printf("Command-line parameter error: both left and right images must be specified\n");return -1;}if( (intrinsic_filename != 0) ^ (extrinsic_filename != 0) ){printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n");return -1;}if( extrinsic_filename == 0 && point_cloud_filename ){printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n");return -1;}int color_mode = alg == STEREO_BM ? 0 : -1;Mat img1 = imread(img1_filename, color_mode);Mat img2 = imread(img2_filename, color_mode);if (img1.empty()){printf("Command-line parameter error: could not load the first input image file\n");return -1;}if (img2.empty()){printf("Command-line parameter error: could not load the second input image file\n");return -1;}if (scale != 1.f){Mat temp1, temp2;int method = scale < 1 ? INTER_AREA : INTER_CUBIC;resize(img1, temp1, Size(), scale, scale, method);img1 = temp1;resize(img2, temp2, Size(), scale, scale, method);img2 = temp2;}Size img_size = img1.size();Rect roi1, roi2;Mat Q;if( intrinsic_filename ){// reading intrinsic parametersFileStorage fs(intrinsic_filename, CV_STORAGE_READ);if(!fs.isOpened()){printf("Failed to open file %s\n", intrinsic_filename);return -1;}Mat M1, D1, M2, D2;fs["M1"] >> M1;fs["D1"] >> D1;fs["M2"] >> M2;fs["D2"] >> D2;M1 *= scale;M2 *= scale;fs.open(extrinsic_filename, CV_STORAGE_READ);if(!fs.isOpened()){printf("Failed to open file %s\n", extrinsic_filename);return -1;}Mat R, T, R1, P1, R2, P2;fs["R"] >> R;fs["T"] >> T;stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );Mat map11, map12, map21, map22;initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);Mat img1r, img2r;remap(img1, img1r, map11, map12, INTER_LINEAR);remap(img2, img2r, map21, map22, INTER_LINEAR);img1 = img1r;img2 = img2r;}numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width/8) + 15) & -16;bm.state->roi1 = roi1;bm.state->roi2 = roi2;bm.state->preFilterCap = 31;bm.state->SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 9;bm.state->minDisparity = 0;bm.state->numberOfDisparities = numberOfDisparities;bm.state->textureThreshold = 10;bm.state->uniquenessRatio = 15;bm.state->speckleWindowSize = 100;bm.state->speckleRange = 32;bm.state->disp12MaxDiff = 1;sgbm.preFilterCap = 63;sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;int cn = img1.channels();sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;sgbm.minDisparity = 0;sgbm.numberOfDisparities = numberOfDisparities;sgbm.uniquenessRatio = 10;sgbm.speckleWindowSize = bm.state->speckleWindowSize;sgbm.speckleRange = bm.state->speckleRange;sgbm.disp12MaxDiff = 1;sgbm.fullDP = alg == STEREO_HH;var.levels = 3;                                 // ignored with USE_AUTO_PARAMSvar.pyrScale = 0.5;                             // ignored with USE_AUTO_PARAMSvar.nIt = 25;var.minDisp = -numberOfDisparities;var.maxDisp = 0;var.poly_n = 3;var.poly_sigma = 0.0;var.fi = 15.0f;var.lambda = 0.03f;var.penalization = var.PENALIZATION_TICHONOV;   // ignored with USE_AUTO_PARAMSvar.cycle = var.CYCLE_V;                        // ignored with USE_AUTO_PARAMSvar.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;Mat disp, disp8;//Mat img1p, img2p, dispp;//copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);//copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);int64 t = getTickCount();if( alg == STEREO_BM )bm(img1, img2, disp);else if( alg == STEREO_VAR ){var(img1, img2, disp);}else if( alg == STEREO_SGBM || alg == STEREO_HH )sgbm(img1, img2, disp);t = getTickCount() - t;printf("Time elapsed: %fms\n", t*1000/getTickFrequency());//disp = dispp.colRange(numberOfDisparities, img1p.cols);waitKey();if( alg != STEREO_VAR )disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));elsedisp.convertTo(disp8, CV_8U);cout<<"numberOfDisparities :"<<numberOfDisparities<<endl; // 80char* rute="dispdata.txt";    ofstream o_file(rute); //输出文件流,将数据输出到文件  for (int i=0;i<disp8.rows;i++){for (int j=0;j<disp8.cols;j++){o_file<<int(disp8.at<uchar>(cv::Point(j, i)))<<"   ";}o_file<<"\n";}if( !no_display ){namedWindow("左相机", 0);imshow("左相机", img1);namedWindow("右相机", 0);imshow("右相机", img2);       imshow("左右相机视差图", disp8);printf("press any key to continue...");fflush(stdout);waitKey();printf("\n");}disparity_filename ="disparity.bmp";//视差图if(disparity_filename)imwrite(disparity_filename, disp8);/////////////////////////////////////////////////////////////////////////point_cloud_filename ="point_cloud.txt";//保存云点if(point_cloud_filename){printf("storing the point cloud...");fflush(stdout);Mat xyz;reprojectImageTo3D(disp, xyz, Q, true);// 坐标数据校正// 乘以 16 得到 毫米 单位坐标,乘以 1.6 得到 厘米 单位坐标// 原理参见:http://blog.csdn.net/chenyusiyuan/article/details/5967291 /*xyz *= 1.6; // 校正 Y 方向数据,正负反转// 原理参见:http://blog.csdn.net/chenyusiyuan/article/details/5970799 for (int y = 0; y < xyz.rows; ++y){for (int x = 0; x < xyz.cols; ++x){cv::Point3f point = xyz.at<cv::Point3f>(y,x);point.y = -point.y;xyz.at<cv::Point3f>(y,x) = point;}}*/saveXYZ(point_cloud_filename, xyz);printf("\n");IplImage *image2 = (&(IplImage)xyz);  //同样只是创建图像头,而没有复制数据。cvSaveImage("image2.jpg",image2);//保存下来}return 0;}



0 0
原创粉丝点击