kinect深度图和彩图对准的源代码, 附上数据一组

来源:互联网 发布:淘宝保证金在哪里退 编辑:程序博客网 时间:2024/06/07 04:09

定义相机类 

<pre name="code" class="cpp">//作者:秦洪亮//时间:2014年4月28日# ifndef CAMERA_000#define  CAMERA_000#include<iostream>#include<opencv\cv.h>#include<opencv\highgui.h>class Camera{public: float mA[9];  //相机的本征参数矩阵 float mD[5]; //畸变系数数组 void SetCameraIntrisic( CvMat *K);   // void SetCameraDistort (CvMat *D);     void DistortNormalized(const float Xn, const float Yn, float &Xd, float &Yd) ; void ConvertRealWorld3DToProjective(const float *X, float &u, float &v) ;};class DepthCamera:public Camera   //深度相机是相机的子类,继承Camera类的性质。{////深度修正模型 dc(u,v)=d(u,v)+beta(u,v)*exp(a0-a1*d(u,v))private:float mDC[2]; //  dc(u,v)反投影成mm时的a, bfloat mda[2]; // a0,a1float R[9];float T[3];CvMat* mdb_;    //beta(u,v) 640*480 xml;public:void setmDC(CvMat *mdc);void setmda(CvMat *MDA);void setmdb_(CvMat *MDB_);void setR(CvMat *r);void setT(CvMat *t);float DepthCamera::ConvertOpenNIDepthToRawDisparity(float &OpenNI_Z);    //将openNI返回值转换为openkinect的返回的差异矩阵float DepthCamera::UndistortDisparity(float rawDisp, float u, float v) ;  //去畸变    float DepthCamera::DisparityToDepth(float Disp);                          //重新转回深度void  DepthCamera::ConvertProjectiveToNormalized (const float u, const float v, float &Xn, float &Yn) ; //由于相机是逆向畸变模型,所以要投影void  Transform3DPoint(float* XYZin, float* XYZout) ;};#endif



相机类成员函数的定义  

#include<iostream>#include"Camera.h"void Camera::SetCameraIntrisic(CvMat *K)  //K=[fx,0,cx;0,fy,cy;0,0,1]{mA[0]=CV_MAT_ELEM(*K,float,0,0);  //fxmA[2]=CV_MAT_ELEM(*K,float,0,2);  //cxmA[4]=CV_MAT_ELEM(*K,float,1,1);  //fymA[5]=CV_MAT_ELEM(*K,float,1,2);  //cy//std::cout<<"fx= "<<mA[0]<<" fy= "<<mA[4]<<std::endl;};void Camera::SetCameraDistort(CvMat *D) //opencv定义的畸变格式{mD[0]=CV_MAT_ELEM(*D,float,0,0); //径向畸变系数k1,k2,k3mD[1]=CV_MAT_ELEM(*D,float,1,0);mD[4]=CV_MAT_ELEM(*D,float,4,0);mD[2]=CV_MAT_ELEM(*D,float,2,0);  //切向畸变系数p1,p2mD[3]=CV_MAT_ELEM(*D,float,3,0);//std::cout<<"md2"<<mD[2]<<std::endl;};void Camera::DistortNormalized(const float Xn, const float Yn, float &Xd, float &Yd) { float k1=mD[0]; float k2=mD[1]; float k3=mD[4]; float p1=mD[2]; float p2=mD[3]; float r2 = Xn*Xn+Yn*Yn; float rc = 1 + k1*r2 +k2*r2*r2 + k3*r2*r2*r2; float tdx = 2*p1*Xn*Yn + p2*(r2 + 2*Xn*Xn); float tdy = 2*p2*Xn*Yn + p1*(r2 + 2*Yn*Yn); Xd=rc*Xn+tdx; Yd=rc*Yn+tdy; } ;void Camera::ConvertRealWorld3DToProjective(const float *X, float &u, float &v) { //Normalize float Xn=X[0]/X[2]; float Yn=X[1]/X[2]; //Apply distortion model float Xd,Yd; DistortNormalized(Xn, Yn, Xd, Yd); float fx = mA[0]; float cx = mA[2]; float fy = mA[4]; float cy = mA[5]; u=fx*Xd+cx; v= - fy*Yd+cy;            //Be aware of the minus sign ‘-‘ } ;void DepthCamera::setmDC(CvMat *mdc){for(int i=0;i<2;i++){mDC[i]=CV_MAT_ELEM(*mdc,float,i,0); }};void DepthCamera::setmda(CvMat *MDA){for(int i=0;i<2;i++){mda[i]=CV_MAT_ELEM(*MDA,float,i,0); }};void DepthCamera::setmdb_(CvMat *MDB_){mdb_=MDB_;};void DepthCamera::setR(CvMat *r){for(int i=0;i<3;i++)for(int j=0;j<3;j++){R[i*3+j]=CV_MAT_ELEM( *r, float, i, j );}};void DepthCamera::setT(CvMat *t){for(int i=0;i<3;i++)T[i]=CV_MAT_ELEM( *t, float, 0, i );};float DepthCamera::ConvertOpenNIDepthToRawDisparity(float &OpenNI_Z){float rawDisp;     //if (OpenNI_Z<200)  //可以去除认为不准确的点// return 0; //Use Nicolas Burrus parameters (http://nicolas.burrus.name/index.php/Research/KinectCalibration)  float a=-0.0030711016f;  float b=3.3309495161f; rawDisp=(1000/OpenNI_Z-b)/a; return rawDisp; };float DepthCamera::UndistortDisparity(float rawDisp, float u, float v){float correctedDisp; int u_=(int)(u+0.5f); int v_=(int)(v+0.5f);//int u_=(int)(u); //int v_=(int)(v); float beta = cvmGet(mdb_,u_,v_); float a0=mda[0]; float a1=mda[1]; correctedDisp=rawDisp+beta*exp(a0-a1*rawDisp); return correctedDisp; };float DepthCamera::DisparityToDepth(float Disp) { float a=mDC[1]; float b=mDC[0]; float depth = 1000/(a*Disp + b); return depth; } ;void DepthCamera::ConvertProjectiveToNormalized (const float u, const float v, float &Xn, float &Yn) { float fx = mA[0]; float cx = mA[2]; float fy = mA[4]; float cy = mA[5]; Xn = (u - cx)/fx; Yn = -(v - cy)/fy;              //Be aware of the minus sign ‘-‘ } ;void DepthCamera::Transform3DPoint(float* XYZin, float* XYZout){ float X=XYZin[0]; float Y=-XYZin[1];              //Be aware of the minus sign ‘-‘ float Z=XYZin[2]; for (int j=0;j<3;j++) XYZout[j] = R[3*j+0]*X + R[3*j+1]*Y + R[3*j+2]*Z + 1000*T[j]; XYZout[1]=-XYZout[1];           //Be aware of the minus sign ‘-‘ } ;

点云类的声明

#ifndef POINTCLOUD_0000#define POINTCLOUD_0000#include<iostream>#include<vector>class ColorPointCloud   //定义彩色点云的类{public:float  X; float  Y; float  Z; /*float  R; float  G; float  B;  float Alpha;*/unsigned char R;unsigned char G;unsigned char B;unsigned char Alpha;void SetPointPos(float * Pos);void SetPointColor(int *Color);};#endif

点云类的定义

#include"CPointcloud.h"void ColorPointCloud::SetPointPos(float *Pos){X=Pos[0];Y=Pos[1];Z=Pos[2];};void  ColorPointCloud::SetPointColor(int *Color){R=Color[0];G=Color[1];B=Color[2];Alpha=Color[3];};

主程序

//该程序读入Kinect的相机参数,以及相机拍摄的深度图和彩图,将其合成3D点云,存储在PLY格式中//作者:秦洪亮//邮箱:qinhl060343@mail.nwpu.edu.cn#include<iostream>#include"Camera.h"#include"CPointcloud.h"#include<opencv\cv.h>#include<opencv\highgui.h>#include<opencv2\opencv.hpp>#include<opencv2\highgui\highgui.hpp>#include<opencv2\core\core.hpp>#include<string>#include<fstream>#include<vector>#include<time.h>using namespace std;//存PLY格式点云的子函数void save_PLY(vector<ColorPointCloud> vPointCloud,string s) { char vertex_num[8];itoa(vPointCloud.size(),vertex_num,10);string ims= "element vertex ";string header=ims+vertex_num;ofstream outfile(s, std::ios::ate | std::ios::binary);outfile<<"ply"<<endl;outfile<<"format binary_little_endian 1.0"<<endl;outfile<<"comment VCGLIB generated"<<endl;outfile<<header<<endl;outfile<<"property float x"<<endl;outfile<<"property float y"<<endl;outfile<<"property float z"<<endl;outfile<<"property uchar red"<<endl;outfile<<"property uchar green"<<endl;outfile<<"property uchar blue"<<endl;outfile<<"property uchar alpha"<<endl;outfile<<"element face 0"<<endl;outfile<<"property list uchar int vertex_indices"<<endl;outfile<<"end_header"<<endl;for(int i=0;i< vPointCloud.size();i++){outfile.write((char*)(&vPointCloud[i]),sizeof(vPointCloud[i]));}outfile.close();} int main(){clock_t nTimeStart;//计时开始clock_t nTimefinish; //计时结束nTimeStart = clock();//定义彩色相机和深度相机Camera MyColorCam;DepthCamera MyDepthCam;//定义彩色点云vector<ColorPointCloud> MyPointCloud;ColorPointCloud Apoint;//读入校准参数///可以用新的Mat读入,访问元素时用at访问//cv::Mat CIntrinsic=(CvMat*)cvLoad("..\\..\\camera2_data\\IntrinsicsRGB1.xml");//cv::Mat  DIntrinsic=(CvMat*)cvLoad("..\\..\\camera2_data\\IntrinsicsIR1.xml");//for(int i=0;i<CIntrinsic.rows;i++)//for(int j=0;j<CIntrinsic.cols;j++)//{//CK[i*j+j]=CIntrinsic.at<float>(i,j);//DK[i*j+j]=DIntrinsic.at<float>(i,j);//}//旧的CvMat//相机自身参数CvMat *CIntrinsic=(CvMat*)cvLoad("..\\..\\camera2_data\\IntrinsicsRGB2.xml");CvMat *DIntrinsic=(CvMat*)cvLoad("..\\..\\camera2_data\\IntrinsicsIR2.xml");//相机畸变CvMat *DistortionRGB1=(CvMat*)cvLoad("..\\..\\camera2_data\\DistortionRGB2.xml");CvMat *DistortionIR1=(CvMat*)cvLoad("..\\..\\camera2_data\\DistortionIR2.xml");//CvMat *dc_a1=(CvMat*)cvLoad("..\\..\\camera2_data\\dc_a2.xml"); //a0,a1CvMat *dc_b1=(CvMat*)cvLoad("..\\..\\camera2_data\\dc_b2.xml"); //betaCvMat *dc1=(CvMat*)cvLoad("..\\..\\camera2_data\\dc2.xml"); //转回openNI所用的a b//深度和彩色之间的外参CvMat *R1=(CvMat*)cvLoad("..\\..\\camera2_data\\R2.xml");CvMat *T1=(CvMat*)cvLoad("..\\..\\camera2_data\\T2.xml");//将读取相机自身参数存储到K中// Intrinsic parametersMyColorCam.SetCameraIntrisic(CIntrinsic);MyDepthCam.SetCameraIntrisic(DIntrinsic);//将读取畸变系数存储到Distor中MyColorCam.SetCameraDistort(DistortionRGB1);MyDepthCam.SetCameraDistort(DistortionIR1);////将读取的a0,a1 赋给深度相机MyDepthCam.setmda(dc_a1);//将读取的a,b 赋给深度相机,用于反转换深度值MyDepthCam.setmDC(dc1);//将每个像素的修正值beta 赋给深度相机MyDepthCam.setmdb_(dc_b1);//将深度相机和彩色相机的相对位置赋给深度相机MyDepthCam.setR(R1);MyDepthCam.setT(T1);//读入彩色cv::Mat CImg=cv::imread("..\\..\\camera2_data\\data\\3c1125.jpg");if(CImg.data==0)                                                //判断图片的路径是否正确{cout<<"error in loading ColorImage"<<endl;exit(1);}if(CImg.channels()!=3){cout<<" not a color image"<<endl;exit(1);}//读入深度图将深度值存在二维数组里并修正其深度float(*DImg)[640] = new float[480][640];   //需要用new从堆分配内存,直接分配会内存溢出ifstream f("..\\..\\camera2_data\\data\\3d1125.txt",ios::in|ios::binary);     //读入深度图,并且测试是否正确if(!f){cout<<"error in loading DepthImage"<<endl;exit(1);}for(int i=0;i<480;i++)  {for(int j=0;j<640;j++){float Xn;  //转换成3D的x yfloat Yn;float Xd; //畸变修正过的x,yfloat Yd;float u;  //深度投影到彩色的索引,float v;float Pos[3];  //存储当前像素的3D位置int Color[4]; //存储当前点的r g b 值float NPos[3]; f.read(reinterpret_cast<char *>(&DImg[i][j]), sizeof(float));   //从二进制文件中读取深度值,需要指定读取长度。DImg[i][j]=MyDepthCam.ConvertOpenNIDepthToRawDisparity(DImg[i][j]);DImg[i][j]=MyDepthCam.UndistortDisparity(DImg[i][j],i,j); //形参类型为float型,做下转换DImg[i][j]=MyDepthCam.DisparityToDepth(DImg[i][j]);MyDepthCam.ConvertProjectiveToNormalized(j,i,Xn,Yn);  //********注意i j的顺序 j是fx方向, i是fy方向MyDepthCam.DistortNormalized(Xn,Yn,Xd,Yd);//记录下3D位置,并且赋给点云Pos[0]=Xd*DImg[i][j];Pos[1]=Yd*DImg[i][j];Pos[2]=DImg[i][j];//转换到彩色相机坐标系MyDepthCam.Transform3DPoint(Pos,NPos);//获取颜色 注意CV是BGR排序MyColorCam.ConvertRealWorld3DToProjective(NPos,u,v);//如果在图片索引范围内则加入点云if(u>=0&&u<640&&v>=0&&v<480&&&Pos[2]!=0){Apoint.SetPointPos(Pos);Color[0]=(int)CImg.at<cv::Vec3b>(v,u)[2];   //*****注意u v 的顺序 Color[1]=(int)CImg.at<cv::Vec3b>(v,u)[1];Color[2]=(int)CImg.at<cv::Vec3b>(v,u)[0];Color[3]=255;Apoint.SetPointColor(Color);}elsecontinue;//存进点云中MyPointCloud.push_back(Apoint);//}//cout<<endl;}delete[]DImg; //删除分配的内存//存储的点云路径string s="..\\..\\camera2_data\\res\\1125.ply";save_PLY(MyPointCloud,s);nTimefinish=clock();cout <<"共耗时:"<<(double)(nTimefinish - nTimeStart)/CLOCKS_PER_SEC<<"秒"<< endl;return 0;}


数据下载地址http://yunpan.cn/QhDtUahTsAnja  访问密码 90bb

3 0
原创粉丝点击