摄像机标定用于机械臂抓举等(利用标定将图像上的点映射为三维坐标)

来源:互联网 发布:虚拟实验室软件下载 编辑:程序博客网 时间:2024/06/04 22:50

               在实验室或者是工程上,我们常常需要将拍摄到的图像的二维图像坐标来计算三维坐标。

  

       如上图所示,我们有上图这样的一副图片。我们需要将物体和机械臂的三维位置算出来,使得机械臂能够精确的抓住物体。

       我们采用张正友标定的方法标定出内外参数,利用其内参与外参数算出其对应的三维坐标。具体公式略!可以参考张PAMI的论文。

     

/*///author:YeahPingYE//function://time:2014/11/25////////////*/#include<highgui.h>#include<cv.h>#include<iostream>#include<fstream>using namespace std;int main(){/*initialation//input:the number of images......num_image//      prj_board_w,prj_board_h//      cam_board_w,cam_board_h*/CvMat*cam_object_points2;CvMat*cam_image_points2;int cam_board_n;int successes = 0;int img_num, cam_board_w, cam_board_h;cout << "输入的图像的组数\n";cin >> img_num;cout << "输入**真实**棋盘格的##横轴##方向的角点个数\n";cin >> cam_board_w;cout << "输入**真实**棋盘格的##纵轴##方向的角点个数\n";cin >> cam_board_h;double cam_Dx = 30;//chessboard's widthdouble cam_Dy = 30;cout << "输入**真实**棋盘格的##横轴##大小dx\n";cin >> cam_Dx;cout << "输入**真实**棋盘格的##纵轴##大小dy\n";cin >> cam_Dy;/*int img_num =4;int cam_board_w = 10;int cam_board_h = 8;*/cam_board_n = cam_board_w*cam_board_h;double x_pixel, y_pixel;cout << "输入要求的位置的像素坐标\n横轴方向的像素坐标#u#\n";cin >> x_pixel;cout << "纵轴方向的像素坐标#v#\n";cin >> y_pixel;//x_pixel =0;//y_pixel = 0;//x_pixel = 6.01938965e+002;//y_pixel = 5.65429077e+002;//x_pixel = 7.04663452e+002;//y_pixel = 5.54746582e+002;//x_pixel = 8.88867798e+002;//y_pixel = 9.58109802e+002; /*//init////*///camera initCvSize cam_board_sz = cvSize(cam_board_w, cam_board_h);CvMat*cam_image_points = cvCreateMat(cam_board_n*(img_num), 2, CV_32FC1);CvMat*cam_object_points = cvCreateMat(cam_board_n*(img_num), 3, CV_32FC1);CvMat*cam_point_counts = cvCreateMat((img_num), 1, CV_32SC1);CvPoint2D32f*cam_corners = new CvPoint2D32f[cam_board_n];int cam_corner_count;int cam_step;CvMat*cam_intrinsic_matrix = cvCreateMat(3, 3, CV_32FC1);CvMat*cam_distortion_coeffs = cvCreateMat(4, 1, CV_32FC1);CvSize cam_image_sz;//window intitcvNamedWindow("window", 0);//get image sizeIplImage *cam_image_temp = cvLoadImage("..\\cam\\cam1.bmp", 0);cam_image_sz = cvGetSize(cam_image_temp);/*//extract cornner// camera image*/for (int ii = 1; ii < img_num + 1; ii++){char cambuf[20] = { 0 };sprintf(cambuf, "..\\cam\\cam%d.bmp", ii);IplImage *cam_image = cvLoadImage(cambuf, 0);//extract cam cornnerint cam_found = cvFindChessboardCorners(cam_image, cam_board_sz, cam_corners, &cam_corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);cvFindCornerSubPix(cam_image, cam_corners, cam_corner_count,cvSize(11, 11), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));cvDrawChessboardCorners(cam_image, cam_board_sz, cam_corners, cam_corner_count, cam_found);//when camare success store the resultif ((cam_corner_count == cam_board_n)) {//store cam resultcam_step = successes*cam_board_n;for (int i = cam_step, j = 0; j < cam_board_n; ++i, ++j) {CV_MAT_ELEM(*cam_image_points, float, i, 0) = cam_corners[j].x;CV_MAT_ELEM(*cam_image_points, float, i, 1) = cam_corners[j].y;CV_MAT_ELEM(*cam_object_points, float, i, 0) = (j/cam_board_w)*cam_Dx;CV_MAT_ELEM(*cam_object_points, float, i, 1) = (j %cam_board_w)*cam_Dy;CV_MAT_ELEM(*cam_object_points, float, i, 2) = 0.0f;}CV_MAT_ELEM(*cam_point_counts, int, successes, 0) = cam_board_n;successes++;cout << "success number" << successes << endl;cvShowImage("window", cam_image);cvWaitKey(500);}}/*//restore the success point*///camcam_image_points2 = cvCreateMat(cam_board_n*(successes), 2, CV_32FC1);cam_object_points2 = cvCreateMat(cam_board_n*(successes), 3, CV_32FC1);CvMat*cam_point_counts2 = cvCreateMat((successes), 1, CV_32SC1);for (int i = 0; i < successes*cam_board_n; ++i){CV_MAT_ELEM(*cam_image_points2, float, i, 0) = CV_MAT_ELEM(*cam_image_points, float, i, 0);CV_MAT_ELEM(*cam_image_points2, float, i, 1) = CV_MAT_ELEM(*cam_image_points, float, i, 1);CV_MAT_ELEM(*cam_object_points2, float, i, 0) = CV_MAT_ELEM(*cam_object_points, float, i, 0);CV_MAT_ELEM(*cam_object_points2, float, i, 1) = CV_MAT_ELEM(*cam_object_points, float, i, 1);CV_MAT_ELEM(*cam_object_points2, float, i, 2) = CV_MAT_ELEM(*cam_object_points, float, i, 2);}for (int i = 0; i < successes; ++i){CV_MAT_ELEM(*cam_point_counts2, int, i, 0) = CV_MAT_ELEM(*cam_point_counts, int, i, 0);}cvSave("..\\output\\XML\\cam_corners.xml", cam_image_points2);cvReleaseMat(&cam_object_points);cvReleaseMat(&cam_image_points);cvReleaseMat(&cam_point_counts);/*//calibration for camera //*///calib for camCV_MAT_ELEM(*cam_intrinsic_matrix, float, 0, 0) = 1.0f;CV_MAT_ELEM(*cam_intrinsic_matrix, float, 1, 1) = 1.0f;CvMat* cam_rotation_all = cvCreateMat( successes, 3, CV_32FC1);CvMat* cam_translation_vector_all = cvCreateMat( successes,3, CV_32FC1);cvCalibrateCamera2(cam_object_points2,cam_image_points2,cam_point_counts2,cam_image_sz,cam_intrinsic_matrix,cam_distortion_coeffs,cam_rotation_all,cam_translation_vector_all,0//CV_CALIB_FIX_ASPECT_RATIO  );cvSave("..\\output\\XML\\cam_intrinsic_matrix.xml", cam_intrinsic_matrix);cvSave("..\\output\\XML\\cam_distortion_coeffs.xml", cam_distortion_coeffs);cvSave("..\\output\\XML\\cam_rotation_all.xml", cam_rotation_all);cvSave("..\\output\\XML\\cam_translation_vector_all.xml", cam_translation_vector_all);CvMat* cam_dist_image_points = cvCreateMat(cam_board_n, 1, CV_32FC2);CvMat* cam_undist_image_points = cvCreateMat(cam_board_n, 1, CV_32FC2);for (int j = 0; j < cam_board_n; ++j){cvSet1D(cam_dist_image_points, j,cvScalar(CV_MAT_ELEM(*cam_image_points2, float, j, 0),CV_MAT_ELEM(*cam_image_points2, float,  j, 1)));//cout << CV_MAT_ELEM(*cam_dist_image_points, float, j, 0) << "\t" << CV_MAT_ELEM(*cam_dist_image_points, float, j, 1) << endl;}cvUndistortPoints(cam_dist_image_points, cam_undist_image_points,cam_intrinsic_matrix, cam_distortion_coeffs, NULL, NULL);cvReleaseMat(&cam_dist_image_points);CvMat* homography = cvCreateMat(3, 3, CV_32FC1);CvMat* cam_src = cvCreateMat(cam_board_n, 3, CV_32FC1);CvMat* cam_dst = cvCreateMat(cam_board_n, 3, CV_32FC1);for (int j = 0; j<cam_board_n; ++j){CvScalar pd = cvGet1D(cam_undist_image_points, j);CV_MAT_ELEM(*cam_src, float, j, 0) = (float)pd.val[0];CV_MAT_ELEM(*cam_src, float, j, 1) = (float)pd.val[1];CV_MAT_ELEM(*cam_src, float, j, 2) = 1.0;CV_MAT_ELEM(*cam_dst, float, j, 0) = CV_MAT_ELEM(*cam_object_points2, float,  j, 0);CV_MAT_ELEM(*cam_dst, float, j, 1) = CV_MAT_ELEM(*cam_object_points2, float,  j, 1);CV_MAT_ELEM(*cam_dst, float, j, 2) = 1.0;}cvReleaseMat(&cam_undist_image_points);//cvFindHomography(cam_src, cam_dst, homography);cvFindHomography(cam_src, cam_dst, homography);cvReleaseMat(&cam_src);cvReleaseMat(&cam_dst);/*cvInvert(homography, homography);*/cvSave("..\\output\\XML\\homography.xml", homography);CvMat* pixel_src = cvCreateMat(1,  1, CV_32FC2);CvMat* pixel_src2 = cvCreateMat(1, 1, CV_32FC2);CvMat* pixel_dst = cvCreateMat(1,  1, CV_32FC2);cvSet1D(pixel_src, 0,cvScalar(x_pixel, y_pixel));cvUndistortPoints(pixel_src, pixel_src2,cam_intrinsic_matrix, cam_distortion_coeffs, NULL, NULL);cvPerspectiveTransform(pixel_src2, pixel_dst, homography);cvSave("..\\output\\XML\\pixel_src.xml", pixel_src);cvSave("..\\output\\XML\\pixel_src2.xml", pixel_src2);cvSave("..\\output\\XML\\pixel_dst.xml", pixel_dst);CvScalar pd = cvGet1D(pixel_dst, 0);cout << "像素对应的三维坐标(以棋盘格的第一个位置的世界坐标为其基坐标)\n" << endl;cout <<"("<< pd.val[0] << "  ,"<<pd.val[1] <<"   ,0)"<< endl;}

上述中像素(969.449,635.648)对应的真实第一个棋盘格的坐标是(90,60,0)上图得到的结果误差比较小。

具体程序的操作步骤及相关文件见网址(vs2013配置了opencv可以直接运行):

http://download.csdn.net/download/zhouyelihua/8236557

click here


csdn的下载这个版块实在太差了:要下载的上百度云盘http://pan.baidu.com/s/1hefQe

点击打开链接


2 0
原创粉丝点击