Mastering Opencv ch4:SFM详解(二)
来源:互联网 发布:mac ds store 文件 编辑:程序博客网 时间:2024/05/18 00:21
前文分析了如何进行特征检测匹配,接下来分析如何求解摄像机矩阵。
I:求解基础矩阵
首先介绍两个代码段,关于关键点和Point2f相互转化的函数
void KeyPointsToPoints(const vector<KeyPoint>& kps, vector<Point2f>& ps) { ps.clear(); for (unsigned int i=0; i<kps.size(); i++) ps.push_back(kps[i].pt);}void PointsToKeyPoints(const vector<Point2f>& ps, vector<KeyPoint>& kps) { kps.clear(); for (unsigned int i=0; i<ps.size(); i++) kps.push_back(KeyPoint(ps[i],1.0f));}
这两个函数在求解基础矩阵的时候用的特别多。这里提前放着。
下面是求解每两幅图像的基础矩阵
void MultiCameraPnP::PruneMatchesBasedOnF() { //prune the match between <_i> and all views using the Fundamental matrix to prune//#pragma omp parallel for//求解每两幅图像的基础矩阵 for (int _i=0; _i < imgs.size() - 1; _i++) { for (unsigned int _j=_i+1; _j < imgs.size(); _j++) { int older_view = _i, working_view = _j; GetFundamentalMat( imgpts[older_view], imgpts[working_view], imgpts_good[older_view], imgpts_good[working_view], matches_matrix[std::make_pair(older_view,working_view)]#ifdef __SFM__DEBUG__ ,imgs_orig[older_view],imgs_orig[working_view]#endif ); //update flip matches as well#pragma omp critical matches_matrix[std::make_pair(working_view,older_view)] = FlipMatches(matches_matrix[std::make_pair(older_view,working_view)]); } }}核心函数GetFundamentalMat的实现:
计算出两幅图像的基础矩阵,并优化匹配组,基本去除错误匹配
Mat GetFundamentalMat(const vector<KeyPoint>& imgpts1, const vector<KeyPoint>& imgpts2, vector<KeyPoint>& imgpts1_good, vector<KeyPoint>& imgpts2_good, vector<DMatch>& matches#ifdef __SFM__DEBUG__ ,const Mat& img_1, const Mat& img_2#endif ) { //Try to eliminate keypoints based on the fundamental matrix //(although this is not the proper way to do this) vector<uchar> status(imgpts1.size());#ifdef __SFM__DEBUG__ std::vector< DMatch > good_matches_; std::vector<KeyPoint> keypoints_1, keypoints_2;#endif // undistortPoints(imgpts1, imgpts1, cam_matrix, distortion_coeff); // undistortPoints(imgpts2, imgpts2, cam_matrix, distortion_coeff); // imgpts1_good.clear(); imgpts2_good.clear(); vector<KeyPoint> imgpts1_tmp; vector<KeyPoint> imgpts2_tmp; if (matches.size() <= 0) { //points already aligned... imgpts1_tmp = imgpts1; imgpts2_tmp = imgpts2; } else { GetAlignedPointsFromMatch(imgpts1, imgpts2, matches, imgpts1_tmp, imgpts2_tmp);//选取特征点集中匹配的特征点 } Mat F; { vector<Point2f> pts1,pts2; KeyPointsToPoints(imgpts1_tmp, pts1);//特征点转化成Point2f点 KeyPointsToPoints(imgpts2_tmp, pts2);#ifdef __SFM__DEBUG__ cout << "pts1 " << pts1.size() << " (orig pts " << imgpts1_tmp.size() << ")" << endl; cout << "pts2 " << pts2.size() << " (orig pts " << imgpts2_tmp.size() << ")" << endl;#endif double minVal,maxVal; cv::minMaxIdx(pts1,&minVal,&maxVal); F = findFundamentalMat(pts1, pts2, FM_RANSAC, 0.006 * maxVal, 0.99, status); //threshold from [Snavely07 4.1]//核心函数,计算F,并得到优化的匹配组模板status } vector<DMatch> new_matches; cout << "F keeping " << countNonZero(status) << " / " << status.size() << endl; for (unsigned int i=0; i<status.size(); i++) { if (status[i]) //根据模板,重新优化匹配组及特征点,找到最优的匹配组,基本就没什么错误匹配了。 { imgpts1_good.push_back(imgpts1_tmp[i]); imgpts2_good.push_back(imgpts2_tmp[i]); if (matches.size() <= 0) { //points already aligned... new_matches.push_back(DMatch(matches[i].queryIdx,matches[i].trainIdx,matches[i].distance)); } else { new_matches.push_back(matches[i]); }#ifdef __SFM__DEBUG__ good_matches_.push_back(DMatch(imgpts1_good.size()-1,imgpts1_good.size()-1,1.0)); keypoints_1.push_back(imgpts1_tmp[i]); keypoints_2.push_back(imgpts2_tmp[i]);#endif } } cout << matches.size() << " matches before, " << new_matches.size() << " new matches after Fundamental Matrix\n"; matches = new_matches; //keep only those points who survived the fundamental matrix#if 0 //-- Draw only "good" matches#ifdef __SFM__DEBUG__ if(!img_1.empty() && !img_2.empty()) { vector<Point2f> i_pts,j_pts; Mat img_orig_matches; { //draw original features in red绘制原始特征点用红色 vector<uchar> vstatus(imgpts1_tmp.size(),1); vector<float> verror(imgpts1_tmp.size(),1.0); img_1.copyTo(img_orig_matches); KeyPointsToPoints(imgpts1_tmp, i_pts); KeyPointsToPoints(imgpts2_tmp, j_pts); drawArrows(img_orig_matches, i_pts, j_pts, vstatus, verror, Scalar(0,0,255)); } { //superimpose filtered features in green用绿色绘制经过此次优化的特征点 vector<uchar> vstatus(imgpts1_good.size(),1); vector<float> verror(imgpts1_good.size(),1.0); i_pts.resize(imgpts1_good.size()); j_pts.resize(imgpts2_good.size()); KeyPointsToPoints(imgpts1_good, i_pts); KeyPointsToPoints(imgpts2_good, j_pts); drawArrows(img_orig_matches, i_pts, j_pts, vstatus, verror, Scalar(0,255,0)); imshow( "Filtered Matches", img_orig_matches ); } int c = waitKey(0); if (c=='s') { imwrite("fundamental_mat_matches.png", img_orig_matches); } destroyWindow("Filtered Matches"); }#endif #endif return F;}其中从每幅图像的所有的特征集点中选取匹配的特征点函数
void GetAlignedPointsFromMatch(const std::vector<cv::KeyPoint>& imgpts1, const std::vector<cv::KeyPoint>& imgpts2, const std::vector<cv::DMatch>& matches, std::vector<cv::KeyPoint>& pt_set1, std::vector<cv::KeyPoint>& pt_set2) { for (unsigned int i=0; i<matches.size(); i++) {// cout << "matches[i].queryIdx " << matches[i].queryIdx << " matches[i].trainIdx " << matches[i].trainIdx << endl; assert(matches[i].queryIdx < imgpts1.size()); pt_set1.push_back(imgpts1[matches[i].queryIdx]); assert(matches[i].trainIdx < imgpts2.size()); pt_set2.push_back(imgpts2[matches[i].trainIdx]); } }
以上就求出了每两幅图像最佳匹配组,接下来就是对每两幅图像的最佳匹配进行处理。
2:计算两幅图像的单应内点
找到在两幅图像中的内点数,通过RANSAC筛选,得到单应矩阵收敛时符合单应变换的内点
//Following Snavely07 4.2 - find how many inliers are in the Homography between 2 views//找到在两幅图像中的内点数,通过RANSAC筛选int MultiCameraPnP::FindHomographyInliers2Views(int vi, int vj) { vector<cv::KeyPoint> ikpts,jkpts; vector<cv::Point2f> ipts,jpts; GetAlignedPointsFromMatch(imgpts[vi],imgpts[vj],matches_matrix[make_pair(vi,vj)],ikpts,jkpts); KeyPointsToPoints(ikpts,ipts); KeyPointsToPoints(jkpts,jpts); double minVal,maxVal; cv::minMaxIdx(ipts,&minVal,&maxVal); //TODO flatten point2d?? or it takes max of width and height vector<uchar> status; cv::Mat H = cv::findHomography(ipts,jpts,status,CV_RANSAC, 0.004 * maxVal); //threshold from Snavely07 return cv::countNonZero(status); //number of inliers}
使用map容器,按内点概率从高到底依次排列
list
//sort pairwise matches to find the lowest Homography inliers [Snavely07 4.2] cout << "Find highest match..."; list<pair<int,pair<int,int> > > matches_sizes; //TODO: parallelize! for(std::map<std::pair<int,int> ,std::vector<cv::DMatch> >::iterator i = matches_matrix.begin(); i != matches_matrix.end(); ++i) { if((*i).second.size() < 100) matches_sizes.push_back(make_pair(100,(*i).first)); else { int Hinliers = FindHomographyInliers2Views((*i).first.first,(*i).first.second); int percent = (int)(((double)Hinliers) / ((double)(*i).second.size()) * 100.0); cout << "[" << (*i).first.first << "," << (*i).first.second << " = "<<percent<<"] "; matches_sizes.push_back(make_pair((int)percent,(*i).first)); } } cout << endl; matches_sizes.sort(sort_by_first);//按内点概率从大到小排列匹配组3:根据上面排好序的匹配组,依次计算摄像机矩阵
bool goodF = false; int highest_pair = 0; m_first_view = m_second_view = 0; //reverse iterate by number of matches for(list<pair<int,pair<int,int> > >::iterator highest_pair = matches_sizes.begin(); highest_pair != matches_sizes.end() && !goodF; ++highest_pair) { m_second_view = (*highest_pair).second.second; m_first_view = (*highest_pair).second.first; std::cout << " -------- " << imgs_names[m_first_view] << " and " << imgs_names[m_second_view] << " -------- " <<std::endl; //what if reconstrcution of first two views is bad? fallback to another pair //See if the Fundamental Matrix between these two views is good goodF = FindCameraMatrices(K, Kinv, distortion_coeff, imgpts[m_first_view], imgpts[m_second_view], imgpts_good[m_first_view], imgpts_good[m_second_view], P, P1, matches_matrix[std::make_pair(m_first_view,m_second_view)], tmp_pcloud#ifdef __SFM__DEBUG__ ,imgs[m_first_view],imgs[m_second_view]#endif );这里预设
cv::Matx34d P(1,0,0,0, 0,1,0,0, 0,0,1,0), P1(1,0,0,0, 0,1,0,0, 0,0,1,0);4:计算基础矩阵,本质矩阵
Mat F = GetFundamentalMat(imgpts1,imgpts2,imgpts1_good,imgpts2_good,matches#ifdef __SFM__DEBUG__ ,img_1,img_2#endif ); if(matches.size() < 100) { // || ((double)imgpts1_good.size() / (double)imgpts1.size()) < 0.25 cerr << "not enough inliers after F matrix" << endl; return false; } //Essential matrix: compute then extract cameras [R|t] Mat_<double> E = K.t() * F * K; //according to HZ (9.12) //according to http://en.wikipedia.org/wiki/Essential_matrix#Properties_of_the_essential_matrix if(fabsf(determinant(E)) > 1e-07) { cout << "det(E) != 0 : " << determinant(E) << "\n"; P1 = 0; return false; }
通过计算出的基础矩阵计算本质矩阵E。
5:接下来,就是把本质矩阵进行SVD奇异值分解,得到旋转部分和平移部分。首先若fabsf(determinant(E)) > 1e-07,则E无法分解。若得出的determinant(R1)+1.0 < 1e-09,就把E取负值重新进行分解。若fabsf(determinant(R))-1.0 > 1e-07,那么分解得到的R1是错误的。
bool DecomposeEtoRandT( Mat_<double>& E, Mat_<double>& R1, Mat_<double>& R2, Mat_<double>& t1, Mat_<double>& t2) {#ifdef DECOMPOSE_SVD //Using HZ E decomposition Mat svd_u, svd_vt, svd_w; TakeSVDOfE(E,svd_u,svd_vt,svd_w);//进行SVD分解 //check if first and second singular values are the same (as they should be)检查第一个和第二个奇异值是不是相同,应该相同的。 double singular_values_ratio = fabsf(svd_w.at<double>(0) / svd_w.at<double>(1)); if(singular_values_ratio>1.0) singular_values_ratio = 1.0/singular_values_ratio; // flip ratio to keep it [0,1] if (singular_values_ratio < 0.7) { cout << "singular values are too far apart\n"; return false; } Matx33d W(0,-1,0, //HZ 9.13 1,0,0, 0,0,1); Matx33d Wt(0,1,0, -1,0,0, 0,0,1); R1 = svd_u * Mat(W) * svd_vt; //HZ 9.19 R2 = svd_u * Mat(Wt) * svd_vt; //HZ 9.19 t1 = svd_u.col(2); //u3 t2 = -svd_u.col(2); //u3#else //Using Horn E decomposition,使用另一种方法分解 DecomposeEssentialUsingHorn90(E[0],R1[0],R2[0],t1[0],t2[0]);#endif return true;}其中TakeSVDOfE函数实现了E的SVD分解,得到u,vt,w。
void TakeSVDOfE(Mat_<double>& E, Mat& svd_u, Mat& svd_vt, Mat& svd_w) {#if 1 //Using OpenCV's SVD SVD svd(E,SVD::MODIFY_A); svd_u = svd.u; svd_vt = svd.vt; svd_w = svd.w;#else //Using Eigen's SVD cout << "Eigen3 SVD..\n"; Eigen::Matrix3f e = Eigen::Map<Eigen::Matrix<double,3,3,Eigen::RowMajor> >((double*)E.data).cast<float>(); Eigen::JacobiSVD<Eigen::MatrixXf> svd(e, Eigen::ComputeThinU | Eigen::ComputeThinV); Eigen::MatrixXf Esvd_u = svd.matrixU(); Eigen::MatrixXf Esvd_v = svd.matrixV(); svd_u = (Mat_<double>(3,3) << Esvd_u(0,0), Esvd_u(0,1), Esvd_u(0,2), Esvd_u(1,0), Esvd_u(1,1), Esvd_u(1,2), Esvd_u(2,0), Esvd_u(2,1), Esvd_u(2,2)); Mat_<double> svd_v = (Mat_<double>(3,3) << Esvd_v(0,0), Esvd_v(0,1), Esvd_v(0,2), Esvd_v(1,0), Esvd_v(1,1), Esvd_v(1,2), Esvd_v(2,0), Esvd_v(2,1), Esvd_v(2,2)); svd_vt = svd_v.t(); svd_w = (Mat_<double>(1,3) << svd.singularValues()[0] , svd.singularValues()[1] , svd.singularValues()[2]);#endif cout << "----------------------- SVD ------------------------\n"; cout << "U:\n"<<svd_u<<"\nW:\n"<<svd_w<<"\nVt:\n"<<svd_vt<<endl; cout << "----------------------------------------------------\n";}
下面是另一种方法进行分解的E,得到R1,R2,T1,T2。
void DecomposeEssentialUsingHorn90(double _E[9], double _R1[9], double _R2[9], double _t1[3], double _t2[3]) { //from : http://people.csail.mit.edu/bkph/articles/Essential.pdf#ifdef USE_EIGEN using namespace Eigen; Matrix3d E = Map<Matrix<double,3,3,RowMajor> >(_E); Matrix3d EEt = E * E.transpose(); Vector3d e0e1 = E.col(0).cross(E.col(1)),e1e2 = E.col(1).cross(E.col(2)),e2e0 = E.col(2).cross(E.col(2)); Vector3d b1,b2;#if 1 //Method 1 Matrix3d bbt = 0.5 * EEt.trace() * Matrix3d::Identity() - EEt; //Horn90 (12) Vector3d bbt_diag = bbt.diagonal(); if (bbt_diag(0) > bbt_diag(1) && bbt_diag(0) > bbt_diag(2)) { b1 = bbt.row(0) / sqrt(bbt_diag(0)); b2 = -b1; } else if (bbt_diag(1) > bbt_diag(0) && bbt_diag(1) > bbt_diag(2)) { b1 = bbt.row(1) / sqrt(bbt_diag(1)); b2 = -b1; } else { b1 = bbt.row(2) / sqrt(bbt_diag(2)); b2 = -b1; }#else //Method 2 if (e0e1.norm() > e1e2.norm() && e0e1.norm() > e2e0.norm()) { b1 = e0e1.normalized() * sqrt(0.5 * EEt.trace()); //Horn90 (18) b2 = -b1; } else if (e1e2.norm() > e0e1.norm() && e1e2.norm() > e2e0.norm()) { b1 = e1e2.normalized() * sqrt(0.5 * EEt.trace()); //Horn90 (18) b2 = -b1; } else { b1 = e2e0.normalized() * sqrt(0.5 * EEt.trace()); //Horn90 (18) b2 = -b1; }#endif //Horn90 (19) Matrix3d cofactors; cofactors.col(0) = e1e2; cofactors.col(1) = e2e0; cofactors.col(2) = e0e1; cofactors.transposeInPlace(); //B = [b]_x , see Horn90 (6) and http://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication Matrix3d B1; B1 << 0,-b1(2),b1(1), b1(2),0,-b1(0), -b1(1),b1(0),0; Matrix3d B2; B2 << 0,-b2(2),b2(1), b2(2),0,-b2(0), -b2(1),b2(0),0; Map<Matrix<double,3,3,RowMajor> > R1(_R1),R2(_R2); //Horn90 (24) R1 = (cofactors.transpose() - B1*E) / b1.dot(b1); R2 = (cofactors.transpose() - B2*E) / b2.dot(b2); Map<Vector3d> t1(_t1),t2(_t2); t1 = b1; t2 = b2; cout << "Horn90 provided " << endl << R1 << endl << "and" << endl << R2 << endl;#endif}到这里就已经分解出R1,R2,t1,t2了,P0就是预设的单位矩阵,P1可以由这四个值组合成四种。应该取哪一个,下一章再分析
0 0
- Mastering Opencv ch4:SFM详解(二)
- Mastering Opencv ch4:SFM详解(二)
- Mastering Opencv ch4:SFM详解(一)
- Mastering Opencv ch4:SFM详解(三)
- Mastering Opencv ch4:SFM详解(一)
- Mastering Opencv ch4:SFM详解(三)
- OpenCV实现SfM(二):双目三维重建
- OpenCV实现SfM(二):双目三维重建
- OpenCV实现SfM(二):多目三维重建
- SFM总结(二)
- OpenCV SFM
- OpenCV实现SfM(一):相机模型
- OpenCV实现SfM(一):相机模型
- OpenCV实现SfM(四):Bundle Adjustment
- Mastering Opencv ch3: markerless AR(二)
- 三维重建-opencv实现sfm
- 翻译:Mastering OpenCV with Practical Computer Vision Projects(第8章)(二)
- OpenCV实现SfM(三):多目三维重建
- hdu 1004 Let the Balloon Rise
- Java IO流之缓冲流
- 对ACProtect1.32壳的分析以及对抽取代码的修补
- Struts2的优势及其工作原理
- Javascript引用类型之Array类型(一)
- Mastering Opencv ch4:SFM详解(二)
- 绪论
- 页面程序获取浏览器url的方法
- 跟着钱钟书和杨绛去春游
- redis之数据安全与性能保障详解
- Java内存访问重排序的研究
- 通过反射优化工厂模式
- javacv开发详解之1:调用本机摄像头视频(建议使用javaCV1.3版本)
- 了解 ArrayList 源码(二)