【CascadeClassifier】detectMultiScale函数学习

来源:互联网 发布:rfid 室内定位算法 编辑:程序博客网 时间:2024/04/27 19:06

转载标明出处即可~

CascadeClassifier检测的基本原理:

xml中存放的是训练后的特征池,特征size大小根据训练时的参数而定,检测的时候可以简单理解为就是将每个固定size特征(检测窗口)与输入图像的同样大小区域比较,如果匹配那么就记录这个矩形区域的位置,然后滑动窗口,检测图像的另一个区域,重复操作。由于输入的图像中特征大小不定,比如在输入图像中眼睛是50x50的区域,而训练时的是25x25,那么只有当输入图像缩小到一半的时候,才能匹配上,所以这里还有一个逐步缩小图像,也就是制作图像金字塔的流程。

参数解释

void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,                                          double scaleFactor, int minNeighbors,                                          int flags, Size minObjectSize, Size maxObjectSize){    vector<int> fakeLevels;    vector<double> fakeWeights;    detectMultiScale( image, objects, fakeLevels, fakeWeights, scaleFactor,        minNeighbors, flags, minObjectSize, maxObjectSize, false );}

参数:
1. const Mat& image:输入图像
2. vector& objects:输出的矩形向量组
3. double scaleFactor=1.1:这个是每次缩小图像的比例,默认是1.1
4. minNeighbors=3:匹配成功所需要的周围矩形框的数目,每一个特征匹配到的区域都是一个矩形框,只有多个矩形框同时存在的时候,才认为是匹配成功,比如人脸,这个默认值是3。
5. flags=0:可以取如下这些值:
CASCADE_DO_CANNY_PRUNING=1, 利用canny边缘检测来排除一些边缘很少或者很多的图像区域
CASCADE_SCALE_IMAGE=2, 正常比例检测
CASCADE_FIND_BIGGEST_OBJECT=4, 只检测最大的物体
CASCADE_DO_ROUGH_SEARCH=8 初略的检测
6. minObjectSize maxObjectSize:匹配物体的大小范围

根据函数调用顺序,注释代码

void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,                                          vector<int>& rejectLevels,                                          vector<double>& levelWeights,                                          double scaleFactor, int minNeighbors,                                          int flags, Size minObjectSize, Size maxObjectSize,                                          bool outputRejectLevels ){    const double GROUP_EPS = 0.2;    CV_Assert( scaleFactor > 1 && image.depth() == CV_8U );    if( empty() )        return;    //使用的还是老格式, 所以If之外的并不关心    if( isOldFormatCascade() )    {        MemStorage storage(cvCreateMemStorage(0));        CvMat _image = image;        CvSeq* _objects = cvHaarDetectObjectsForROC( &_image, oldCascade, storage, rejectLevels, levelWeights, scaleFactor,                                              minNeighbors, flags, minObjectSize, maxObjectSize, outputRejectLevels );        vector<CvAvgComp> vecAvgComp;        Seq<CvAvgComp>(_objects).copyTo(vecAvgComp);        objects.resize(vecAvgComp.size());        std::transform(vecAvgComp.begin(), vecAvgComp.end(), objects.begin(), getRect());        return;    }    ...}
CvSeq*cvHaarDetectObjectsForROC( const CvArr* _img,                     CvHaarClassifierCascade* cascade, CvMemStorage* storage,                     std::vector<int>& rejectLevels, std::vector<double>& levelWeights,                     double scaleFactor, int minNeighbors, int flags,                     CvSize minSize, CvSize maxSize, bool outputRejectLevels ){    const double GROUP_EPS = 0.2;    CvMat stub, *img = (CvMat*)_img;    cv::Ptr<CvMat> temp, sum, tilted, sqsum, normImg, sumcanny, imgSmall;    CvSeq* result_seq = 0;    cv::Ptr<CvMemStorage> temp_storage;    std::vector<cv::Rect> allCandidates;    std::vector<cv::Rect> rectList;    std::vector<int> rweights;    double factor;    int coi;    //根据flags设置一些变量    bool doCannyPruning = (flags & CV_HAAR_DO_CANNY_PRUNING) != 0;    bool findBiggestObject = (flags & CV_HAAR_FIND_BIGGEST_OBJECT) != 0;    bool roughSearch = (flags & CV_HAAR_DO_ROUGH_SEARCH) != 0;    cv::Mutex mtx;    if( !CV_IS_HAAR_CLASSIFIER(cascade) )        CV_Error( !cascade ? CV_StsNullPtr : CV_StsBadArg, "Invalid classifier cascade" );    if( !storage )        CV_Error( CV_StsNullPtr, "Null storage pointer" );    img = cvGetMat( img, &stub, &coi );    if( coi )        CV_Error( CV_BadCOI, "COI is not supported" );    if( CV_MAT_DEPTH(img->type) != CV_8U )        CV_Error( CV_StsUnsupportedFormat, "Only 8-bit images are supported" );    if( scaleFactor <= 1 )        CV_Error( CV_StsOutOfRange, "scale factor must be > 1" );    //FIND_BIGGEST_OBJECT与CV_HAAR_SCALE_IMAGE不共存    if( findBiggestObject )        flags &= ~CV_HAAR_SCALE_IMAGE;    if( maxSize.height == 0 || maxSize.width == 0 )    {        maxSize.height = img->rows;        maxSize.width = img->cols;    }    //这里创建的sum和sqsum后面会赋值,并在匹配函数的    temp = cvCreateMat( img->rows, img->cols, CV_8UC1 );    sum = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );    sqsum = cvCreateMat( img->rows + 1, img->cols + 1, CV_64FC1 );    if( !cascade->hid_cascade )        icvCreateHidHaarClassifierCascade(cascade);    if( cascade->hid_cascade->has_tilted_features )        tilted = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );    result_seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvAvgComp), storage );    if( CV_MAT_CN(img->type) > 1 )    {        cvCvtColor( img, temp, CV_BGR2GRAY );        img = temp;    }    if( findBiggestObject )        flags &= ~(CV_HAAR_SCALE_IMAGE|CV_HAAR_DO_CANNY_PRUNING);    //这里是CV_HAAR_SCALE_IMAGE分支    if( flags & CV_HAAR_SCALE_IMAGE )    {        //winSize0,这个就是训练时设置的特征窗口大小        CvSize winSize0 = cascade->orig_window_size;#ifdef HAVE_IPP        int use_ipp = cascade->hid_cascade->ipp_stages != 0;        if( use_ipp )            normImg = cvCreateMat( img->rows, img->cols, CV_32FC1 );#endif        imgSmall = cvCreateMat( img->rows + 1, img->cols + 1, CV_8UC1 );        //下面开始循环制作金字塔并进行检测,factor默认是每次循环扩大1.1倍        for( factor = 1; ; factor *= scaleFactor )        {            //这里是扩大特征窗口大小,只是用来在后面作为退出循环的条件            CvSize winSize = { cvRound(winSize0.width*factor),                                cvRound(winSize0.height*factor) };            //sz才是后面真正会用到,这里每次都会按比例缩小            CvSize sz = { cvRound( img->cols/factor ), cvRound( img->rows/factor ) };            //sz1有三处会用到,一是退出条件,二是用来生成norm1和mask1,这个后面匹配的时候会用到,三是用来计算stripcount,也就是匹配时多线程的数量            CvSize sz1 = { sz.width - winSize0.width + 1, sz.height - winSize0.height + 1 };            CvRect equRect = { icv_object_win_border, icv_object_win_border,                winSize0.width - icv_object_win_border*2,                winSize0.height - icv_object_win_border*2 };            CvMat img1, sum1, sqsum1, norm1, tilted1, mask1;            CvMat* _tilted = 0;            //这里就是一些退出和继续的条件判断了            if( sz1.width <= 0 || sz1.height <= 0 )                break;            if( winSize.width > maxSize.width || winSize.height > maxSize.height )                break;            if( winSize.width < minSize.width || winSize.height < minSize.height )                continue;            img1 = cvMat( sz.height, sz.width, CV_8UC1, imgSmall->data.ptr );            sum1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, sum->data.ptr );            sqsum1 = cvMat( sz.height+1, sz.width+1, CV_64FC1, sqsum->data.ptr );            if( tilted )            {                tilted1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, tilted->data.ptr );                _tilted = &tilted1;            }            norm1 = cvMat( sz1.height, sz1.width, CV_32FC1, normImg ? normImg->data.ptr : 0 );            mask1 = cvMat( sz1.height, sz1.width, CV_8UC1, temp->data.ptr );            //改变img的大小            cvResize( img, &img1, CV_INTER_LINEAR );            //生成积分图像,后面会用到            cvIntegral( &img1, &sum1, &sqsum1, _tilted );            //计算步长,也就是每次滑动的距离,如果factor大的时候,步长会小些            int ystep = factor > 2 ? 1 : 2;            const int LOCS_PER_THREAD = 1000;            //计算线程数,开多线程应该是为了性能考虑吧            int stripCount = ((sz1.width/ystep)*(sz1.height + ystep-1)/ystep + LOCS_PER_THREAD/2)/LOCS_PER_THREAD;            stripCount = std::min(std::max(stripCount, 1), 100);#ifdef HAVE_IPP            if( use_ipp )            {                cv::Mat fsum(sum1.rows, sum1.cols, CV_32F, sum1.data.ptr, sum1.step);                cv::Mat(&sum1).convertTo(fsum, CV_32F, 1, -(1<<24));            }            else#endif                //这个函数,看样子是将前面生成的积分图像,传给cascade,进行一些处理,不知道具体是干什么                cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );            cv::Mat _norm1(&norm1), _mask1(&mask1);            //这里就是主要的匹配函数了            cv::parallel_for_(cv::Range(0, stripCount),                         cv::HaarDetectObjects_ScaleImage_Invoker(cascade,                                (((sz1.height + stripCount - 1)/stripCount + ystep-1)/ystep)*ystep,                                factor, cv::Mat(&sum1), cv::Mat(&sqsum1), &_norm1, &_mask1,                                cv::Rect(equRect), allCandidates, rejectLevels, levelWeights, outputRejectLevels, &mtx));        }    }    //这里是canny、biggest、rough分支    else    {        int n_factors = 0;        cv::Rect scanROI;        cvIntegral( img, sum, sqsum, tilted );        //进行canny检测        if( doCannyPruning )        {            sumcanny = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );            cvCanny( img, temp, 0, 50, 3 );            cvIntegral( temp, sumcanny );        }        //这里将factor和n_factors设置到最大值,为了后面将特征窗口从大到小变换(与之前的从小到大,相反)?        for( n_factors = 0, factor = 1;             factor*cascade->orig_window_size.width < img->cols - 10 &&             factor*cascade->orig_window_size.height < img->rows - 10;             n_factors++, factor *= scaleFactor )            ;        //这里将scaleFactor由大于1变成小于1,由放大变成缩小?        if( findBiggestObject )        {            scaleFactor = 1./scaleFactor;            factor *= scaleFactor;        }        else            factor = 1;        //开始循环检测        for( ; n_factors-- > 0; factor *= scaleFactor )        {            const double ystep = std::max( 2., factor );            CvSize winSize = { cvRound( cascade->orig_window_size.width * factor ),                                cvRound( cascade->orig_window_size.height * factor )};            CvRect equRect = { 0, 0, 0, 0 };            int *p[4] = {0,0,0,0};            int *pq[4] = {0,0,0,0};            int startX = 0, startY = 0;            int endX = cvRound((img->cols - winSize.width) / ystep);            int endY = cvRound((img->rows - winSize.height) / ystep);            if( winSize.width < minSize.width || winSize.height < minSize.height )            {                if( findBiggestObject )                    break;                continue;            }            if ( winSize.width > maxSize.width || winSize.height > maxSize.height )            {                if( !findBiggestObject )                    break;                continue;            }            cvSetImagesForHaarClassifierCascade( cascade, sum, sqsum, tilted, factor );            cvZero( temp );            //?            if( doCannyPruning )            {                equRect.x = cvRound(winSize.width*0.15);                equRect.y = cvRound(winSize.height*0.15);                equRect.width = cvRound(winSize.width*0.7);                equRect.height = cvRound(winSize.height*0.7);                p[0] = (int*)(sumcanny->data.ptr + equRect.y*sumcanny->step) + equRect.x;                p[1] = (int*)(sumcanny->data.ptr + equRect.y*sumcanny->step)                            + equRect.x + equRect.width;                p[2] = (int*)(sumcanny->data.ptr + (equRect.y + equRect.height)*sumcanny->step) + equRect.x;                p[3] = (int*)(sumcanny->data.ptr + (equRect.y + equRect.height)*sumcanny->step)                            + equRect.x + equRect.width;                pq[0] = (int*)(sum->data.ptr + equRect.y*sum->step) + equRect.x;                pq[1] = (int*)(sum->data.ptr + equRect.y*sum->step)                            + equRect.x + equRect.width;                pq[2] = (int*)(sum->data.ptr + (equRect.y + equRect.height)*sum->step) + equRect.x;                pq[3] = (int*)(sum->data.ptr + (equRect.y + equRect.height)*sum->step)                            + equRect.x + equRect.width;            }            if( scanROI.area() > 0 )            {                //adjust start_height and stop_height                startY = cvRound(scanROI.y / ystep);                endY = cvRound((scanROI.y + scanROI.height - winSize.height) / ystep);                startX = cvRound(scanROI.x / ystep);                endX = cvRound((scanROI.x + scanROI.width - winSize.width) / ystep);            }            cv::parallel_for_(cv::Range(startY, endY),                cv::HaarDetectObjects_ScaleCascade_Invoker(cascade, winSize, cv::Range(startX, endX),                                                           ystep, sum->step, (const int**)p,                                                           (const int**)pq, allCandidates, &mtx ));            if( findBiggestObject && !allCandidates.empty() && scanROI.area() == 0 )            {                rectList.resize(allCandidates.size());                std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());                groupRectangles(rectList, std::max(minNeighbors, 1), GROUP_EPS);                if( !rectList.empty() )                {                    size_t i, sz = rectList.size();                    cv::Rect maxRect;                    for( i = 0; i < sz; i++ )                    {                        if( rectList[i].area() > maxRect.area() )                            maxRect = rectList[i];                    }                    allCandidates.push_back(maxRect);                    scanROI = maxRect;                    int dx = cvRound(maxRect.width*GROUP_EPS);                    int dy = cvRound(maxRect.height*GROUP_EPS);                    scanROI.x = std::max(scanROI.x - dx, 0);                    scanROI.y = std::max(scanROI.y - dy, 0);                    scanROI.width = std::min(scanROI.width + dx*2, img->cols-1-scanROI.x);                    scanROI.height = std::min(scanROI.height + dy*2, img->rows-1-scanROI.y);                    double minScale = roughSearch ? 0.6 : 0.4;                    minSize.width = cvRound(maxRect.width*minScale);                    minSize.height = cvRound(maxRect.height*minScale);                }            }        }    }    //后面是运用minNeighbors来筛选矩形区域, 并输出结果    rectList.resize(allCandidates.size());    if(!allCandidates.empty())        std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());    if( minNeighbors != 0 || findBiggestObject )    {        if( outputRejectLevels )        {            groupRectangles(rectList, rejectLevels, levelWeights, minNeighbors, GROUP_EPS );        }        else        {            groupRectangles(rectList, rweights, std::max(minNeighbors, 1), GROUP_EPS);        }    }    else        rweights.resize(rectList.size(),0);    if( findBiggestObject && rectList.size() )    {        CvAvgComp result_comp = {{0,0,0,0},0};        for( size_t i = 0; i < rectList.size(); i++ )        {            cv::Rect r = rectList[i];            if( r.area() > cv::Rect(result_comp.rect).area() )            {                result_comp.rect = r;                result_comp.neighbors = rweights[i];            }        }        cvSeqPush( result_seq, &result_comp );    }    else    {        for( size_t i = 0; i < rectList.size(); i++ )        {            CvAvgComp c;            c.rect = rectList[i];            c.neighbors = !rweights.empty() ? rweights[i] : 0;            cvSeqPush( result_seq, &c );        }    }    return result_seq;}

欢迎留言交流~

参考文章:
http://blog.csdn.net/gdut2015go/article/details/48826159

0 0
原创粉丝点击