【OpenCV】之find_obj基础上的局部图像透视变换

来源:互联网 发布:淘宝助理5.5下载 编辑:程序博客网 时间:2024/05/18 00:25

图像透视变换常用于图像的校正,例如在移动机器人视觉导航研究中,由于摄像机与地面之间有一倾斜角,而不是直接垂直朝下(正投影),有时希望将图像校正成正投影的形式,就需要利用透视变换。然而opencv源码中是没有透视矩阵变换的。

①.透视变换通用的公式:


u,v是原图的左边,对应得到变换后的图片坐标x,y,其中

②.可以分成四部分,表示线性变换,表示平移,产生透视变换,[]表示图片大小变化倍数。


代码来了:

#include "opencv2/objdetect/objdetect.hpp"

#include "opencv2/features2d/features2d.hpp"

#include "opencv2/highgui/highgui.hpp"#include "opencv2/calib3d/calib3d.hpp"#include "opencv2/nonfree/nonfree.hpp"#include "opencv2/imgproc/imgproc_c.h"#include "opencv2/legacy/legacy.hpp"#include "opencv2/legacy/compat.hpp"#include <iostream>#include <vector>#include <stdio.h>using namespace std;using namespace cv;static void help(){    printf(        "This program demonstrated the use of the SURF Detector and Descriptor using\n"        "either FLANN (fast approx nearst neighbor classification) or brute force matching\n"        "on planar objects.\n"        "Usage:\n"        "./find_obj <object_filename> <scene_filename>, default is box.png  and box_in_scene.png\n\n");    return;}// define whether to use approximate nearest-neighbor search#define USE_FLANN#ifdef USE_FLANNstatic voidflannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,           const CvSeq*, const CvSeq* imageDescriptors, vector<int>& ptpairs ){    int length = (int)(objectDescriptors->elem_size/sizeof(float));    cv::Mat m_object(objectDescriptors->total, length, CV_32F);    cv::Mat m_image(imageDescriptors->total, length, CV_32F);    // copy descriptors    CvSeqReader obj_reader;    float* obj_ptr = m_object.ptr<float>(0);    cvStartReadSeq( objectDescriptors, &obj_reader );    for(int i = 0; i < objectDescriptors->total; i++ )    {        const float* descriptor = (const float*)obj_reader.ptr;        CV_NEXT_SEQ_ELEM( obj_reader.seq->elem_size, obj_reader );        memcpy(obj_ptr, descriptor, length*sizeof(float));        obj_ptr += length;    }    CvSeqReader img_reader;    float* img_ptr = m_image.ptr<float>(0);    cvStartReadSeq( imageDescriptors, &img_reader );    for(int i = 0; i < imageDescriptors->total; i++ )    {        const float* descriptor = (const float*)img_reader.ptr;        CV_NEXT_SEQ_ELEM( img_reader.seq->elem_size, img_reader );        memcpy(img_ptr, descriptor, length*sizeof(float));        img_ptr += length;    }    // find nearest neighbors using FLANN    cv::Mat m_indices(objectDescriptors->total, 2, CV_32S);    cv::Mat m_dists(objectDescriptors->total, 2, CV_32F);    cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4));  // using 4 randomized kdtrees    flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked    int* indices_ptr = m_indices.ptr<int>(0);    float* dists_ptr = m_dists.ptr<float>(0);    for (int i=0;i<m_indices.rows;++i) {        if (dists_ptr[2*i]<0.6*dists_ptr[2*i+1]) {            ptpairs.push_back(i);            ptpairs.push_back(indices_ptr[2*i]);        }    }}#elsestatic doublecompareSURFDescriptors( const float* d1, const float* d2, double best, int length ){    double total_cost = 0;    assert( length % 4 == 0 );    for( int i = 0; i < length; i += 4 )    {        double t0 = d1[i  ] - d2[i  ];        double t1 = d1[i+1] - d2[i+1];        double t2 = d1[i+2] - d2[i+2];        double t3 = d1[i+3] - d2[i+3];        total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3;        if( total_cost > best )            break;    }    return total_cost;}static intnaiveNearestNeighbor( const float* vec, int laplacian,                      const CvSeq* model_keypoints,                      const CvSeq* model_descriptors ){    int length = (int)(model_descriptors->elem_size/sizeof(float));    int i, neighbor = -1;    double d, dist1 = 1e6, dist2 = 1e6;    CvSeqReader reader, kreader;    cvStartReadSeq( model_keypoints, &kreader, 0 );    cvStartReadSeq( model_descriptors, &reader, 0 );    for( i = 0; i < model_descriptors->total; i++ )    {        const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;        const float* mvec = (const float*)reader.ptr;        CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );        CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );        if( laplacian != kp->laplacian )            continue;        d = compareSURFDescriptors( vec, mvec, dist2, length );        if( d < dist1 )        {            dist2 = dist1;            dist1 = d;            neighbor = i;        }        else if ( d < dist2 )            dist2 = d;    }    if ( dist1 < 0.6*dist2 )        return neighbor;    return -1;}static voidfindPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,           const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vector<int>& ptpairs ){    int i;    CvSeqReader reader, kreader;    cvStartReadSeq( objectKeypoints, &kreader );    cvStartReadSeq( objectDescriptors, &reader );    ptpairs.clear();    for( i = 0; i < objectDescriptors->total; i++ )    {        const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;        const float* descriptor = (const float*)reader.ptr;        CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );        CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );        int nearest_neighbor = naiveNearestNeighbor( descriptor, kp->laplacian, imageKeypoints, imageDescriptors );        if( nearest_neighbor >= 0 )        {            ptpairs.push_back(i);            ptpairs.push_back(nearest_neighbor);        }    }}#endif/* a rough implementation for object location */static intlocatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,                    const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,                    const CvPoint src_corners[4], CvPoint dst_corners[4] ){    double h[9];    CvMat _h = cvMat(3, 3, CV_64F, h);    vector<int> ptpairs;    vector<CvPoint2D32f> pt1, pt2;    CvMat _pt1, _pt2;    int i, n;#ifdef USE_FLANN    flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );#else    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );#endif    n = (int)(ptpairs.size()/2);    if( n < 4 )        return 0;    pt1.resize(n);    pt2.resize(n);    for( i = 0; i < n; i++ )    {        pt1[i] = ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;        pt2[i] = ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;    }    _pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );    _pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );    if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 ))        return 0;    for( i = 0; i < 4; i++ )    {        double x = src_corners[i].x, y = src_corners[i].y;        double Z = 1./(h[6]*x + h[7]*y + h[8]);        double X = (h[0]*x + h[1]*y + h[2])*Z;        double Y = (h[3]*x + h[4]*y + h[5])*Z;        dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));    }    return 1;}int main(int argc, char** argv){    const char* object_filename = argc == 3 ? argv[1] : "E:\\image0\\1.jpg";    const char* scene_filename = argc == 3 ? argv[2] : "E:\\image0\\2.jpg";    cv::initModule_nonfree();    help();    IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );    IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );    if( !object || !image )    {        fprintf( stderr, "Can not load %s and/or %s\n",            object_filename, scene_filename );        exit(-1);    }Mat image01=imread("E:\image0\\2.jpg");        Mat image02=imread("E:\image0\\1.jpg");      Mat image1,image2;        cvtColor(image01,image1,CV_RGB2GRAY);      cvtColor(image02,image2,CV_RGB2GRAY);      CvMemStorage* storage = cvCreateMemStorage(0);    cvNamedWindow("Object", 1);    cvNamedWindow("Object Correspond", 1);    static CvScalar colors[] =    {        {{0,0,255}},        {{0,128,255}},        {{0,255,255}},        {{0,255,0}},        {{255,128,0}},        {{255,255,0}},        {{255,0,0}},        {{255,0,255}},        {{255,255,255}}    };    IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);    cvCvtColor( object, object_color, CV_GRAY2BGR );    CvSeq* objectKeypoints = 0, *objectDescriptors = 0;    CvSeq* imageKeypoints = 0, *imageDescriptors = 0;    int i;    CvSURFParams params = cvSURFParams(500, 1);    double tt = (double)cvGetTickCount();    cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );    printf("Object Descriptors: %d\n", objectDescriptors->total);    cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );    printf("Image Descriptors: %d\n", imageDescriptors->total);    tt = (double)cvGetTickCount() - tt;    printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));    CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};    CvPoint dst_corners[4];    IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );    cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );    cvCopy( object, correspond );    cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );    cvCopy( image, correspond );    cvResetImageROI( correspond );#ifdef USE_FLANN    printf("Using approximate nearest neighbor search\n");#endif    if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,        imageDescriptors, src_corners, dst_corners ))    {        for( i = 0; i < 4; i++ )        {            CvPoint r1 = dst_corners[i%4];            CvPoint r2 = dst_corners[(i+1)%4];            cvLine( correspond, cvPoint(r1.x, r1.y+object->height ),                cvPoint(r2.x, r2.y+object->height ), colors[8],2);        }    }    vector<int> ptpairs;#ifdef USE_FLANN    flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );#else    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );#endif    for( i = 0; i < (int)ptpairs.size(); i += 2 )    {        CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );        CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] );        cvLine( correspond, cvPointFrom32f(r1->pt),            cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), colors[8] );    }    cvShowImage( "Object Correspond", correspond );    for( i = 0; i < objectKeypoints->total; i++ )    {        CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );        CvPoint center;        int radius;        center.x = cvRound(r->pt.x);        center.y = cvRound(r->pt.y);        radius = cvRound(r->size*1.2/9.*2);        cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );    }    cvShowImage( "Object", object_color );        //提取特征点        SurfFeatureDetector surfDetector(800);  // 海塞矩阵阈值      vector<KeyPoint> keyPoint1,keyPoint2;        surfDetector.detect(image1,keyPoint1);        surfDetector.detect(image2,keyPoint2);       //特征点描述,为下边的特征点匹配做准备        SurfDescriptorExtractor SurfDescriptor;        Mat imageDesc1,imageDesc2;        SurfDescriptor.compute(image1,keyPoint1,imageDesc1);        SurfDescriptor.compute(image2,keyPoint2,imageDesc2);            //获得匹配特征点,并提取最优配对         FlannBasedMatcher matcher;      vector<DMatch> matchePoints;        matcher.match(imageDesc1,imageDesc2,matchePoints,Mat());      sort(matchePoints.begin(),matchePoints.end()); //特征点排序          //获取排在前N个的最优匹配特征点      vector<Point2f> imagePoints1,imagePoints2;        for(int i=0;i<10;i++)      {                 imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);               imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);           }        //获取图像1到图像2的投影映射矩阵 尺寸为3*3      Mat homo=findHomography(imagePoints1,imagePoints2,CV_RANSAC);      ////也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差      //Mat   homo=getPerspectiveTransform(imagePoints1,imagePoints2);      cout<<"变换矩阵为:\n"<<homo<<endl<<endl; //输出映射矩阵          double adjustValue=image1.cols;      Mat adjustMat=(Mat_<double>(3,3)<<1.0,0,35,0,1.0,65,0,0,1.0);      cout<<"调整矩阵为:\n"<<adjustMat<<endl<<endl;      cout<<"调整后变换矩阵为:\n"<<adjustMat*homo<<endl;        //图像配准      Mat imageTransform1,imageTransform2,imageTransform3;      warpPerspective(image01,imageTransform1,homo,Size(image02.cols,image02.rows));      warpPerspective(image01,imageTransform2,adjustMat*homo,Size(image02.cols*1.3,image02.rows*1.8));cvtColor(imageTransform1,imageTransform3,CV_RGB2GRAY);     imshow("透视矩阵变换",imageTransform3);      cvWaitKey(0);    cvDestroyWindow("Object");    cvDestroyWindow("Object Correspond");    return 0;}
注:由于find_obj中载入图像用的是IplImage*类,而warpPerspective()是Mat类,所以在此我又重新读入了一次图片,方法还不够完善,但运行结果是正确的。

效果图如下:

由于矩阵变换中有一定的误差,透视变换后的图像效果没有原始目标图像清晰,字体会有模糊和略微变形。


原创粉丝点击