opencv之HOG源代码注释

来源:互联网 发布:小米3抢购软件 编辑:程序博客网 时间:2024/06/05 11:34

在阅读的过程中主要参考tornadomeet的博文,在这里表示感谢。同时在阅读的过程中也发现了其中的一些不足,在我的注释中会一一指出。由于本人能力有限,对源代码的理解还存在不足,比如usecache部分,还有weight的计算过程都没有进行深究。由于代码本身过长,所以会另外写一篇文章对代码进行分析。下面进入正题,首先是对HOGDescriptor结构体的声明部分进行简单的注释。

struct CV_EXPORTS_W HOGDescriptor{public:    enum { L2Hys=0 };    enum { DEFAULT_NLEVELS=64 };    CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8),        cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1),        histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true),        nlevels(HOGDescriptor::DEFAULT_NLEVELS)    {}    CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride,                  Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1,                  int _histogramNormType=HOGDescriptor::L2Hys,                  double _L2HysThreshold=0.2, bool _gammaCorrection=false,                  int _nlevels=HOGDescriptor::DEFAULT_NLEVELS)    : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize),    nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma),    histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold),    gammaCorrection(_gammaCorrection), nlevels(_nlevels)    {}    //含参及无参构造函数    //可以导入文本文件进行初始化    CV_WRAP HOGDescriptor(const String& filename)    {        load(filename);//这里load函数为加载filename中第一个节点信息    }    HOGDescriptor(const HOGDescriptor& d)    {        d.copyTo(*this);//将d中的描述算子参数赋值给当前描述子    }    virtual ~HOGDescriptor() {}    //size_t是一个long unsigned int型    CV_WRAP size_t getDescriptorSize() const;//获取描述算子维度    CV_WRAP bool checkDetectorSize() const;//检查描述算子维数是否合法    CV_WRAP double getWinSigma() const;    //virtual为虚函数,在指针或引用时起函数多态作用    CV_WRAP virtual void setSVMDetector(InputArray _svmdetector);//设定SVM向量    virtual bool read(FileNode& fn);//将节点fn的描述算子参数信息读入,其中可能包含了SVMVec    virtual void write(FileStorage& fs, const String& objname) const;//将参数信息写入以objname命名的目标节点中    CV_WRAP virtual bool load(const String& filename, const String& objname=String());//加载filename中名为objname的节点信息    CV_WRAP virtual void save(const String& filename, const String& objname=String()) const;//保存当前参数    virtual void copyTo(HOGDescriptor& c) const;//拷贝    //计算输入原始图像img的所有扫描窗口或指定扫描窗口(由locations指定)的描述算子集合,并存放在descriptors中 CV_WRAP virtual void compute(const Mat& img, CV_OUT vector<float>& descriptors, Size winStride=Size(), Size padding=Size(), const vector<Point>& locations=vector<Point>()) const;    //对输入原始图像img的整体或部分(由searchLocations指定)进行扫描,并记录检测到含有行人的扫描窗口的矩形信息(由foundLocations保存)及置信度(weights)    CV_WRAP virtual void detect(const Mat& img, CV_OUT vector<Point>& foundLocations,                        CV_OUT vector<double>& weights,                        double hitThreshold=0, Size winStride=Size(),                        Size padding=Size(),                        const vector<Point>& searchLocations=vector<Point>()) const;    //不含weight    virtual void detect(const Mat& img, CV_OUT vector<Point>& foundLocations,                        double hitThreshold=0, Size winStride=Size(),                        Size padding=Size(),                        const vector<Point>& searchLocations=vector<Point>()) const;    //多尺度扫描,通过对原始图像img进行尺寸变换来实现尺度检测过程    CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,                                  CV_OUT vector<double>& foundWeights, double hitThreshold=0,                                  Size winStride=Size(), Size padding=Size(), double scale=1.05,                                  double finalThreshold=2.0,bool useMeanshiftGrouping = false) const;    //不含weight    virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,                                  double hitThreshold=0, Size winStride=Size(),                                  Size padding=Size(), double scale=1.05,                                  double finalThreshold=2.0, bool useMeanshiftGrouping = false) const;    //计算原始输入图像img的梯度幅度图grad,梯度方向图qangle    //这里作者出现一个小小的失误,声明变量中的angleOfs,其实应该是qangle    CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs,                                 Size paddingTL=Size(), Size paddingBR=Size()) const;    CV_WRAP static vector<float> getDefaultPeopleDetector();    CV_WRAP static vector<float> getDaimlerPeopleDetector();//获得两个检测器?    CV_PROP Size winSize;//扫描窗口大小,默认(64*128)    CV_PROP Size blockSize;//block块大小,默认为(16*16)    CV_PROP Size blockStride;//block块每次移动距离,默认为(8*8)    CV_PROP Size cellSize;//block中每个cell的大小,默认(8*8)    CV_PROP int nbins;//每个cell最终产生梯度直方图的bin个数,默认为9    CV_PROP int derivAperture;//不了解    CV_PROP double winSigma;//高斯    CV_PROP int histogramNormType;//归一化类型?    CV_PROP double L2HysThreshold;//归一化操作阈值?    CV_PROP bool gammaCorrection;//是否进行伽马校正    CV_PROP vector<float> svmDetector;//svm检测器    CV_PROP int nlevels;//金字塔层数};


具体的实现细节:

namespace cv{//获取描述算子大小size_t HOGDescriptor::getDescriptorSize() const{    CV_Assert(blockSize.width % cellSize.width == 0 &&        blockSize.height % cellSize.height == 0);    CV_Assert((winSize.width - blockSize.width) % blockStride.width == 0 &&        (winSize.height - blockSize.height) % blockStride.height == 0 );    return (size_t)nbins*        (blockSize.width/cellSize.width)*        (blockSize.height/cellSize.height)*        ((winSize.width - blockSize.width)/blockStride.width + 1)*        ((winSize.height - blockSize.height)/blockStride.height + 1);        //9*(16/8)*(16/8)*((64-16)/8+1)*((128-16)/8+1)=9*2*2*7*15=3780,实际上的检测算子为3781,多一维表示偏置}//获得Sigma值double HOGDescriptor::getWinSigma() const{    return winSigma >= 0 ? winSigma : (blockSize.width + blockSize.height)/8.;}//检查探测器detector维度:可以为0;descriptor维度;descriptor维度加一bool HOGDescriptor::checkDetectorSize() const{    size_t detectorSize = svmDetector.size(), descriptorSize = getDescriptorSize();    return detectorSize == 0 ||        detectorSize == descriptorSize ||        detectorSize == descriptorSize + 1;}//根据传递参数设定svmDetector内容,并检查detector尺寸void HOGDescriptor::setSVMDetector(InputArray _svmDetector){    _svmDetector.getMat().convertTo(svmDetector, CV_32F);    CV_Assert( checkDetectorSize() );}#define CV_TYPE_NAME_HOG_DESCRIPTOR "opencv-object-detector-hog"//读操作,根据fileNode内容对当前HOGdescriptor参数进行设定,其中包可能括svmDetector的设定bool HOGDescriptor::read(FileNode& obj){    if( !obj.isMap() )        return false;    FileNodeIterator it = obj["winSize"].begin();    it >> winSize.width >> winSize.height;    it = obj["blockSize"].begin();    it >> blockSize.width >> blockSize.height;    it = obj["blockStride"].begin();    it >> blockStride.width >> blockStride.height;    it = obj["cellSize"].begin();    it >> cellSize.width >> cellSize.height;    obj["nbins"] >> nbins;    obj["derivAperture"] >> derivAperture;    obj["winSigma"] >> winSigma;    obj["histogramNormType"] >> histogramNormType;    obj["L2HysThreshold"] >> L2HysThreshold;    obj["gammaCorrection"] >> gammaCorrection;    FileNode vecNode = obj["SVMDetector"];    if( vecNode.isSeq() )    {        vecNode >> svmDetector;        CV_Assert(checkDetectorSize());    }    return true;}//将当前HOGDescriptor的参数内容写入文件进行保存,并使用objname进行区分。其中可能包括svmDetector内容void HOGDescriptor::write(FileStorage& fs, const String& objName) const{    if( !objName.empty() )        fs << objName;    fs << "{" CV_TYPE_NAME_HOG_DESCRIPTOR    << "winSize" << winSize    << "blockSize" << blockSize    << "blockStride" << blockStride    << "cellSize" << cellSize    << "nbins" << nbins    << "derivAperture" << derivAperture    << "winSigma" << getWinSigma()    << "histogramNormType" << histogramNormType    << "L2HysThreshold" << L2HysThreshold    << "gammaCorrection" << gammaCorrection;    if( !svmDetector.empty() )        fs << "SVMDetector" << "[:" << svmDetector << "]";    fs << "}";}//在文件中读取名为objname节点的信息(信息内容为descriptor参数)bool HOGDescriptor::load(const String& filename, const String& objname){    FileStorage fs(filename, FileStorage::READ);    FileNode obj = !objname.empty() ? fs[objname] : fs.getFirstTopLevelNode();    return read(obj);}//将当前descriptor参数写入名为objname的目标节点void HOGDescriptor::save(const String& filename, const String& objName) const{    FileStorage fs(filename, FileStorage::WRITE);    write(fs, !objName.empty() ? objName : FileStorage::getDefaultObjectName(filename));}//由上可以判定出该fileStorage的结构:objname---param;objname---param;...一个文件中可以含有多组参数,并能够实现随机读取//复制操作,将当前数据复制给descriptor cvoid HOGDescriptor::copyTo(HOGDescriptor& c) const{    c.winSize = winSize;    c.blockSize = blockSize;    c.blockStride = blockStride;    c.cellSize = cellSize;    c.nbins = nbins;    c.derivAperture = derivAperture;    c.winSigma = winSigma;    c.histogramNormType = histogramNormType;    c.L2HysThreshold = L2HysThreshold;    c.gammaCorrection = gammaCorrection;    c.svmDetector = svmDetector;}//计算图像img的梯度幅度图像grad和梯度方向图像qangle.//img原始输入图像//grad、qangle保存计算结果//paddingTL(TopLeft)为需要在原图像img左上角扩增的尺寸,//paddingBR(BottomRight)为需要在img图像右下角扩增的尺寸。//确定每个像素对应的梯度幅度值及方向void HOGDescriptor::computeGradient(const Mat& img, Mat& grad, Mat& qangle,                                    Size paddingTL, Size paddingBR) const{    CV_Assert( img.type() == CV_8U || img.type() == CV_8UC3 );//判定img数据类型    Size gradsize(img.cols + paddingTL.width + paddingBR.width,                  img.rows + paddingTL.height + paddingBR.height);//确定梯度图的尺寸大小,原始图像大小加扩展部分    grad.create(gradsize, CV_32FC2);  //    qangle.create(gradsize, CV_8UC2); // 均为2通道    Size wholeSize;    Point roiofs;    img.locateROI(wholeSize, roiofs);    //locateROI的作用是,定位当前矩阵(可能是原始矩阵中的部分)在原始矩阵中的位置,    //whloeSize表示原始矩阵的尺寸,roiofs表示当前矩阵在原始矩阵的相对位置。    //这里假设img为独立存在,则wholeSize表示img大小,ofs为(0,0)这里的img是整幅图像,而不是滑动窗口内图像    int i, x, y;    int cn = img.channels();//获得img的通道数    Mat_<float> _lut(1, 256);    const float* lut = &_lut(0,0);//获得指向_lut初始位置的指针    //对_lut进行初始化操作,这里分两种情形:gamma校正or not,    if( gammaCorrection )        for( i = 0; i < 256; i++ )            _lut(0,i) = std::sqrt((float)i);//所谓校正就是开平方,lut(0,i)取值范围缩小    else        for( i = 0; i < 256; i++ )            _lut(0,i) = (float)i;    //在opencv的core.hpp里面有AutoBuffer<>()函数,该函数为自动分配一段指定大小的内存,并且可以指定内存中数据的类型。    AutoBuffer<int> mapbuf(gradsize.width + gradsize.height + 4);    //这里开辟空间大小为梯度图宽+高+4,类型为整型    int* xmap = (int*)mapbuf + 1;//两个指针,xmap指向mapBuf的头部加一,基本上是width范围内    int* ymap = xmap + gradsize.width + 2;//ymap指向hight范围内    //简单了解xmap,ymap指向位置 mapBuf :[_[xmap...]_ _[ymap...]_](前者长度为 width,后者长度为 height)    const int borderType = (int)BORDER_REFLECT_101;    //确定边界扩充类型为BORDER_REFLECT_101,扩充格式如右:BORDER_REFLECT_101   gfedcb|abcdefgh|gfedcba    /*int borderInterpolate(int p, int len, int borderType)      该函数的作用是已知扩展后像素位置,反推该像素在原始图像中的位置,这里的位置的相对坐标原点都是原始图像中的左上方位置      p表示在扩展后沿某坐标轴像素位置(img坐标原点),len表示沿该坐标轴原始图像宽度,borderType表示扩展类型    */    //xmap中内容指向x方向扩展图像grad像素位置(下标)对应原始图像的位置(值),    //ymap 指向y方向扩张图像中像素位置对应原始图像中的位置    for( x = -1; x < gradsize.width + 1; x++ )        xmap[x] = borderInterpolate(x - paddingTL.width + roiofs.x,                        wholeSize.width, borderType) - roiofs.x;    for( y = -1; y < gradsize.height + 1; y++ )        ymap[y] = borderInterpolate(y - paddingTL.height + roiofs.y,                        wholeSize.height, borderType) - roiofs.y;    //mapBuf最终内容是位置一一对应关系,grad中位置(grad图像坐标原点)--->img中原始位置(原始img图像坐标原点),目的是方便查找    //x = -1、width在扩展图像grad中也是不存在的。这里-1的作用就是在y == 0 的时候能够取到其perPixel(grad图像中)    int width = gradsize.width;    AutoBuffer<float> _dbuf(width*4);//创建新的内存单元,用来存储单步计算得得到的 dx、dy、mag、angle    float* dbuf = _dbuf    //Dx为水平梯度,Dy为垂直梯度,Mag为梯度幅度,Angle为梯度方向,可知在内存中是连续存在的    //缓存,暂时存放数据    Mat Dx(1, width, CV_32F, dbuf);    Mat Dy(1, width, CV_32F, dbuf + width);    Mat Mag(1, width, CV_32F, dbuf + width*2);    Mat Angle(1, width, CV_32F, dbuf + width*3);//非常不错的编程技巧,对内存单元的灵活把握    int _nbins = nbins;//得到bin值,默认为9    float angleScale = (float)(_nbins/CV_PI);//确定角度值变换系数,用来将得到的角度划分到9个bin中#ifdef HAVE_IPP    Mat lutimg(img.rows,img.cols,CV_MAKETYPE(CV_32F,cn));    Mat hidxs(1, width, CV_32F);    Ipp32f* pHidxs  = (Ipp32f*)hidxs.data;    Ipp32f* pAngles = (Ipp32f*)Angle.data;    IppiSize roiSize;    roiSize.width = img.cols;    roiSize.height = img.rows;    for( y = 0; y < roiSize.height; y++ )    {       const uchar* imgPtr = img.data + y*img.step;       float* imglutPtr = (float*)(lutimg.data + y*lutimg.step);       for( x = 0; x < roiSize.width*cn; x++ )       {          imglutPtr[x] = lut[imgPtr[x]];       }    }#endif    //按行遍历计算梯度图    for( y = 0; y < gradsize.height; y++ )    {#ifdef HAVE_IPP        const float* imgPtr  = (float*)(lutimg.data + lutimg.step*ymap[y]);        const float* prevPtr = (float*)(lutimg.data + lutimg.step*ymap[y-1]);        const float* nextPtr = (float*)(lutimg.data + lutimg.step*ymap[y+1]);#else        //获得原始img图像中对应像素,垂直方向上一个像素、下一个像素,简单的图像遍历        const uchar* imgPtr  = img.data + img.step*ymap[y];        const uchar* prevPtr = img.data + img.step*ymap[y-1];        const uchar* nextPtr = img.data + img.step*ymap[y+1];//通过简单的映射表得到梯度图像中每个像素对应的原始img像素#endif        //获得grad图像当前像素指针,获得梯度方向当前像素指针        float* gradPtr = (float*)grad.ptr(y);        uchar* qanglePtr = (uchar*)qangle.ptr(y);        if( cn == 1 )//如果原始img图像为单通道图像        {            for( x = 0; x < width; x++ )//遍历当前行所有列像素,并将计算结果保存到dbuf中,水平梯度,垂直梯度            {                int x1 = xmap[x];//获得x位置对应原始图像中位置x1#ifdef HAVE_IPP                dbuf[x] = (float)(imgPtr[xmap[x+1]] - imgPtr[xmap[x-1]]);                dbuf[width + x] = (float)(nextPtr[x1] - prevPtr[x1]);#else                //计算水平梯度值                //xmap[x+1]得到原始图像中右侧像素位置,imgPtr[xmap[x+1]]得到该像素的像素值(0~255)                //lut[...]得到float型值,可以进行gamma校正,存在一一映射关系                //dx = [-1 0 +1]                dbuf[x] = (float)(lut[imgPtr[xmap[x+1]]] - lut[imgPtr[xmap[x-1]]]);//Dx                //计算垂直梯度值                //同理dy = [-1 0 +1]T                dbuf[width + x] = (float)(lut[nextPtr[x1]] - lut[prevPtr[x1]]);//Dy#endif            }        }        else//原始图像为多通道时        {            for( x = 0; x < width; x++ )            {                int x1 = xmap[x]*3;                float dx0, dy0, dx, dy, mag0, mag;#ifdef HAVE_IPP                const float* p2 = imgPtr + xmap[x+1]*3;                const float* p0 = imgPtr + xmap[x-1]*3;                dx0 = p2[2] - p0[2];                dy0 = nextPtr[x1+2] - prevPtr[x1+2];                mag0 = dx0*dx0 + dy0*dy0;                dx = p2[1] - p0[1];                dy = nextPtr[x1+1] - prevPtr[x1+1];                mag = dx*dx + dy*dy;                if( mag0 < mag )                {                    dx0 = dx;                    dy0 = dy;                    mag0 = mag;                }                dx = p2[0] - p0[0];                dy = nextPtr[x1] - prevPtr[x1];                mag = dx*dx + dy*dy;#else                const uchar* p2 = imgPtr + xmap[x+1]*3;                const uchar* p0 = imgPtr + xmap[x-1]*3;                dx0 = lut[p2[2]] - lut[p0[2]];                dy0 = lut[nextPtr[x1+2]] - lut[prevPtr[x1+2]];                mag0 = dx0*dx0 + dy0*dy0;                dx = lut[p2[1]] - lut[p0[1]];                dy = lut[nextPtr[x1+1]] - lut[prevPtr[x1+1]];                mag = dx*dx + dy*dy;                if( mag0 < mag )                {                    dx0 = dx;                    dy0 = dy;                    mag0 = mag;                }                dx = lut[p2[0]] - lut[p0[0]];                dy = lut[nextPtr[x1]] - lut[prevPtr[x1]];                mag = dx*dx + dy*dy; #endif                if( mag0 < mag )                {                    dx0 = dx;                    dy0 = dy;                    mag0 = mag;                }                dbuf[x] = dx0;                dbuf[x+width] = dy0;                //最终dbuf中存放的是较大的值,                //三个通道分别计算dx、dy、mag,通过比较mag的值,将最大的一组Dx、Dy进行保存            }        }#ifdef HAVE_IPP        ippsCartToPolar_32f((const Ipp32f*)Dx.data, (const Ipp32f*)Dy.data, (Ipp32f*)Mag.data, pAngles, width);        for( x = 0; x < width; x++ )        {           if(pAngles[x] < 0.f)             pAngles[x] += (Ipp32f)(CV_PI*2.);        }        ippsNormalize_32f(pAngles, pAngles, width, 0.5f/angleScale, 1.f/angleScale);        ippsFloor_32f(pAngles,(Ipp32f*)hidxs.data,width);        ippsSub_32f_I((Ipp32f*)hidxs.data,pAngles,width);        ippsMul_32f_I((Ipp32f*)Mag.data,pAngles,width);        ippsSub_32f_I(pAngles,(Ipp32f*)Mag.data,width);        ippsRealToCplx_32f((Ipp32f*)Mag.data,pAngles,(Ipp32fc*)gradPtr,width);#else        //计算二维向量的幅值及角度值,这里的Dx、Dy表示的该行所有像素经由模板计算的到的所有值        //mag = sqrt(Dx*Dx + Dy*Dy) angle = atan2(Dy,Dx)*180 / pi        //参数false指明这里的Angle得到结果为弧度表示,取值范围是[0,2*pi](tornadomeet中的一个失误,导致其后面的hidx等值的计算均出现问题)        cartToPolar( Dx, Dy, Mag, Angle, false );        //所有计算值都已经保存到dbuf中了#endif        //遍历该行中的所有像素,将角度值进行划分为九个单元,        for( x = 0; x < width; x++ )        {#ifdef HAVE_IPP            int hidx = (int)pHidxs[x];#else            float mag = dbuf[x+width*2], angle = dbuf[x+width*3]*angleScale - 0.5f; //得到某个像素对应幅值与角度值(-0.5<angle<17.5)            int hidx = cvFloor(angle);//hidx = {-1,...17},向下取整            angle -= hidx;//angle表示与下端bin的弧度值之差            gradPtr[x*2] = mag*(1.f - angle);            gradPtr[x*2+1] = mag*angle;            //利用角度的小数部分作为权重,对梯度幅值重分配#endif            if( hidx < 0 )                hidx += _nbins;            else if( hidx >= _nbins )                hidx -= _nbins;            //这里的hidx的求解结果原始为{-1,0,1...8,9,10...16,17}->{8,0,1...8,0,1...7,8}            //也就是将[0,2*pi]范围弧度值,分配到9个单元格中            assert( (unsigned)hidx < (unsigned)_nbins );//这里的hidx值必定是小于9的            qanglePtr[x*2] = (uchar)hidx;            hidx++;            hidx &= hidx < _nbins ? -1 : 0;            //与-1相与为其本身,与0相与为0,因而这里的hidx自加一之后,如果没有超出则保留,超出则置为零            qanglePtr[x*2+1] = (uchar)hidx;        }    }    //完成对整个原始img图像的梯度幅值计算,梯度方向值计算    //每个像素对应两个梯度值与方向值。根据角度的位置关系,将梯度值信息分配给相邻的两个bins}//这里HOGCache结构体的作用是,针对特定输入的img,及block、cell参数//计算得到一个普通滑动窗口内所有block的信息,block产生直方图位置,block在滑动窗口内的相对位置//单个block中所有像素的对不同cell产生直方图的贡献情况,分三种情况讨论//进而在之后的描述算子(直方图)的计算过程中,直接对单个像素套用blockData、pixData信息得到其对应直方图的值struct HOGCache{    struct BlockData//存储block数据内容,1个BlockData结构体是对应的一个block数据    {        BlockData() : histOfs(0), imgOffset() {}        int histOfs;//histOfs表示当前block对整个滑动窗口内HOG描述算子的对应的描述向量的起始位置        Point imgOffset;//imgOffset表示为当前block在滑动窗口图片中的相对坐标(当然是指左上角坐标)    };    struct PixData//存取某个像素内容,1个PixData结构体是对应的block中1个像素点的数据    {        //这里的注释与tornadomeet中给出的含义解释也不同        size_t gradOfs, qangleOfs;//gradOfs表示当前像素相对所属block起始位置 在grad图像中的偏移量                            //同理qangle        int histOfs[4];//histOfs[]//这里指的是当前pixel所贡献cell产生向量相对其所属block向量起始位置的偏移量 (贡献cell最多有4个)                    //通过该值可以很直接的确定当前像素对应幅值,方向值的最终归属        float histWeights[4];//histWeight[]贡献权重??        float gradWeight;//gradWeight表示该点本身由于处在block中位置的不同因而对梯度直方图贡献也不同,                         //其权值按照二维高斯分布(以block中心为二维高斯的中心)来决定??    };    HOGCache();    //含参构造函数,内部调用了init函数    HOGCache(const HOGDescriptor* descriptor,        const Mat& img, Size paddingTL, Size paddingBR,        bool useCache, Size cacheStride);    virtual ~HOGCache() {};    //完成对HOGCache的初始化工作,细节之后进行讨论    virtual void init(const HOGDescriptor* descriptor,        const Mat& img, Size paddingTL, Size paddingBR,        bool useCache, Size cacheStride);    Size windowsInImage(Size imageSize, Size winStride) const;//得到单幅图像中扫描窗口的个数    Rect getWindow(Size imageSize, Size winStride, int idx) const;//确定idx索引得到的扫描窗口具体信息,返回内容为一个矩阵    const float* getBlock(Point pt, float* buf);//获得指向block的直方图    virtual void normalizeBlockHistogram(float* histogram) const;//归一化block直方图    vector<PixData> pixData;//存数所有像素的数据    vector<BlockData> blockData;//存储单个滑动窗口内所有block的数据    bool useCache;//标志是否使用缓存    vector<int> ymaxCached;//与缓存使用相关,暂时不考虑    Size winSize, cacheStride;//winSize,扫描窗口大小;    Size nblocks, ncells;//单个滑动窗口内block个数,单个block中cell的个数    int blockHistogramSize;//单个block计算得到描述算子维数    int count1, count2, count4;//统计一个block中不同类型像素的个数    Point imgoffset;//偏移量    Mat_<float> blockCache;//与cache相关不先考虑    Mat_<uchar> blockCacheFlags;//不先考虑    Mat grad, qangle;//存储梯度幅度图,梯度方向图    const HOGDescriptor* descriptor;//HOG};HOGCache::HOGCache(){    useCache = false;    blockHistogramSize = count1 = count2 = count4 = 0;    descriptor = 0;}//_useCache == 0HOGCache::HOGCache(const HOGDescriptor* _descriptor,        const Mat& _img, Size _paddingTL, Size _paddingBR,        bool _useCache, Size _cacheStride){    init(_descriptor, _img, _paddingTL, _paddingBR, _useCache, _cacheStride);}//对缓存结构体进行初始化操作//完成对原始img图像的梯度图grad,方向图qangle的计算工作//初始化工作,定位了在一个扫描窗口中每一个block对应整体特征向量初始位置和其在扫描窗口中的相对位置//另外明确了一个block中每个像素对应产生贡献的cell,及其分别相应的权重void HOGCache::init(const HOGDescriptor* _descriptor,        const Mat& _img, Size _paddingTL, Size _paddingBR,        bool _useCache, Size _cacheStride){    descriptor = _descriptor;    cacheStride = _cacheStride;//缓存一次移动的距离??有待进一步精确描述,为winStride与blockStride的最大公约数,    useCache = _useCache;//这里的参数输入为0,表示不启用,暂时先不予考虑    //这里的grad及qangle都是descriptor结构体内部变量    descriptor->computeGradient(_img, grad, qangle, _paddingTL, _paddingBR);//计算当前图像img的梯度方向矩阵    //经过计算得到当前img的每个像素的梯度幅度值与方向值(各有两个)    imgoffset = _paddingTL;//原始img图像填充的尺寸大小    winSize = descriptor->winSize;//扫描窗口大小(64*128)    Size blockSize = descriptor->blockSize;//当前descriptor的block大小,(16*16)    Size blockStride = descriptor->blockStride;//表示block每次跨越像素个数(x方向,y方向)    Size cellSize = descriptor->cellSize;//cell大小(8*8)    Size winSize = descriptor->winSize;//窗口大小(64*128),与上方的winSize重复了吧?    int i, j, nbins = descriptor->nbins;//9    int rawBlockSize = blockSize.width*blockSize.height;//block内像素个数    nblocks = Size((winSize.width - blockSize.width)/blockStride.width + 1,                   (winSize.height - blockSize.height)/blockStride.height + 1);//一个扫描窗口内包含block个数(7,15)存在重叠    ncells = Size(blockSize.width/cellSize.width, blockSize.height/cellSize.height);//一个block中包含cell个数(2,2)    blockHistogramSize = ncells.width*ncells.height*nbins;//一个block生成特征向量维数(2*2*9每个cell生成一个直方图,每个直方图含有9个bins)    if( useCache )//这里的使用缓存的含义是?跳过    {        Size cacheSize((grad.cols - blockSize.width)/cacheStride.width+1,                       (winSize.height/cacheStride.height)+1);//设置缓存大小        blockCache.create(cacheSize.height, cacheSize.width*blockHistogramSize);        blockCacheFlags.create(cacheSize);        size_t i, cacheRows = blockCache.rows;        ymaxCached.resize(cacheRows);        for( i = 0; i < cacheRows; i++ )            ymaxCached[i] = -1;    }    //weights为一个尺寸为blockSize的二维高斯表,下面的代码就是计算二维高斯的系数    //作用是参与计算block内像素权重    Mat_<float> weights(blockSize);//(16*16)大小    float sigma = (float)descriptor->getWinSigma();    float scale = 1.f/(sigma*sigma*2);    for(i = 0; i < blockSize.height; i++)        for(j = 0; j < blockSize.width; j++)        {            float di = i - blockSize.height*0.5f;            float dj = j - blockSize.width*0.5f;            weights(i,j) = std::exp(-(di*di + dj*dj)*scale);        }    blockData.resize(nblocks.width*nblocks.height);//共保存width*height数目的block数据,这里的resize有可能重新申请内存空间    //这里是简单的一个扫描窗口内含有的还有的block个数    pixData.resize(rawBlockSize*3);//rawBlockSize表示block中像素个数,这里*3表示不是三通道而是划分为三类像素区分存放                                   //这里申请的内存远远超过所需,但为了方便计算,需先如此申请,后再进行压缩    //主要是因为这里得到的值,不是梯度方向值,仅仅是用于计算该扫描窗口参与计算描述算子的像素的计算公式。    //之后将扫描窗口值进行带入,才得到最终的descriptor。很有可能,    // Initialize 2 lookup tables, pixData & blockData.//初始化两个查找表:pixData&blockData    // Here is why://原因如下    //    // The detection algorithm runs in 4 nested loops (at each pyramid layer):    //  loop over the windows within the input image    //    loop over the blocks within each window    //      loop over the cells within each block    //        loop over the pixels in each cell    // As each of the loops runs over a 2-dimensional array,    // we could get 8(!) nested loops in total, which is very-very slow.    // To speed the things up, we do the following:    //   1. loop over windows is unrolled in the HOGDescriptor::{compute|detect} methods;    //         inside we compute the current search window using getWindow() method.    //         Yes, it involves some overhead (function call + couple of divisions),    //         but it's tiny in fact.    //   2. loop over the blocks is also unrolled. Inside we use pre-computed blockData[j]    //         to set up gradient and histogram pointers.    //   3. loops over cells and pixels in each cell are merged    //       (since there is no overlap between cells, each pixel in the block is processed once)    //      and also unrolled. Inside we use PixData[k] to access the gradient values and    //      update the histogram    // 检测过程在每个金字塔层进行四次嵌套查询(暂时没有看出金字塔来),    // 利用滑动窗口扫描整幅图像,在滑动窗口内循环遍历每个block,在block中遍历每个cells,在cells中遍历每个像素    // 每次检测循环过程都要处理2维数组,这也就是说要进行8次嵌套,这是非常耗时的    //   利用HOGDescriptor用的compute|detect方法循环遍历每个窗口,在内部利用getWindow方法计算得到当前窗口值,设计一些开销,但是只是很少一部分    //   遍历cell和pixel的方法融合到一起,由于各个cell之间没有重叠,因而各个pixel仅仅计算一次    //   同样unrolled,利用PixData[k]得到梯度值,并更新直方图    //   遍历block的方法也同样是unrolled的,利用预先计算你的blockData去进一步计算梯度和直方图指针    //针对block个体,其中包含4个cell,对pixData进行初始化操作,    count1 = count2 = count4 = 0;//记录对不同数目cell做出贡献的像素个数    //遍历每个像素    //遍历扫描窗口内某个block    //计算单个block中的内所有像素的pixData值    //对单个block进行区域划分如下:    //{[A][B] [C][D]}    //{[E][F] [G][H]}    //    //{[I][J] [K][L]}    //{[M][N] [O][P]}    //参考tornadomeet文章内容    for( j = 0; j < blockSize.width; j++ )//blockSize.width == 16        for( i = 0; i < blockSize.height; i++ )//blockSize.height == 16        {            PixData* data = 0;//新建PixData指针            float cellX = (j+0.5f)/cellSize.width - 0.5f;//cellSize.width == 8            int icellX0 = cvFloor(cellX);            int icellX1 = icellX0 + 1;            cellX -= icellX0;//差值            //j = [0,3] icellX0 = -1,icellX1 = 0;            //j = [4,11] icellX0 = 0;icellX1 = 1            //j = [12,15] icellX0 = 1,icell1 = 2            float cellY = (i+0.5f)/cellSize.height - 0.5f;            int icellY0 = cvFloor(cellY);            int icellY1 = icellY0 + 1;            cellY -= icellY0;            //i = [0,3]  icellY0 = -1,icellY1 = 0            //i = [4,11] icellY0 = 0,icellY1 = 1            //i = [12,15] icellY0 = 1,icellY1 = 2            //cellY表示差值            //ncells(2,2),宽高均为2            if( (unsigned)icellX0 < (unsigned)ncells.width &&                (unsigned)icellX1 < (unsigned)ncells.width )            {                if( (unsigned)icellY0 < (unsigned)ncells.height &&                    (unsigned)icellY1 < (unsigned)ncells.height )                {                    //区域 F、G、J、K                    //注意这里的unsigned,这里满足该约束条件的只能是icellX0 == 0;icellY0 == 0                    //当前区域内像素对四个cell值均有影响                    //需要明确的是,无论怎样,最终的结果仍然是每个cell产生一个九维向量,一个block共4*9 = 36维特征向量                    //ncells.height == 2                    data = &pixData[rawBlockSize*2 + (count4++)];//跳过前两类,直接对第三类(4)进行赋值操作                    data->histOfs[0] = (icellX0*ncells.height + icellY0)*nbins;//0*nbins                    data->histWeights[0] = (1.f - cellX)*(1.f - cellY);//权重,比较巧妙的计算,节省很多繁琐的过程                    data->histOfs[1] = (icellX1*ncells.height + icellY0)*nbins;//2*nbins                    data->histWeights[1] = cellX*(1.f - cellY);                    data->histOfs[2] = (icellX0*ncells.height + icellY1)*nbins;//1*bins                    data->histWeights[2] = (1.f - cellX)*cellY;                    data->histOfs[3] = (icellX1*ncells.height + icellY1)*nbins;//(2 + 1)*bins                    data->histWeights[3] = cellX*cellY;                    //histOfs表示当前像素对哪个直方图做出贡献,histWeight表示对 对应直方图做出贡献的权重                    //其他依次类推                }                else                {                    //区域B、C、N、O                    data = &pixData[rawBlockSize + (count2++)];                    if( (unsigned)icellY0 < (unsigned)ncells.height )//unsigned(-1) > 2                    {                        //N、O                        icellY1 = icellY0;//icellY1 = 1,原值为2                        cellY = 1.f - cellY;                    }                    data->histOfs[0] = (icellX0*ncells.height + icellY1)*nbins;                    data->histWeights[0] = (1.f - cellX)*cellY;                    data->histOfs[1] = (icellX1*ncells.height + icellY1)*nbins;                    data->histWeights[1] = cellX*cellY;                    //设定两类权重                    data->histOfs[2] = data->histOfs[3] = 0;                    data->histWeights[2] = data->histWeights[3] = 0;                }            }            else            {                if( (unsigned)icellX0 < (unsigned)ncells.width )//icellX0 == 1                {                    icellX1 = icellX0;                    cellX = 1.f - cellX;                }                if( (unsigned)icellY0 < (unsigned)ncells.height &&                    (unsigned)icellY1 < (unsigned)ncells.height )                {                    //区域E、H、I、L                    data = &pixData[rawBlockSize + (count2++)];                    data->histOfs[0] = (icellX1*ncells.height + icellY0)*nbins;                    data->histWeights[0] = cellX*(1.f - cellY);                    data->histOfs[1] = (icellX1*ncells.height + icellY1)*nbins;                    data->histWeights[1] = cellX*cellY;                    data->histOfs[2] = data->histOfs[3] = 0;                    data->histWeights[2] = data->histWeights[3] = 0;                }                else                {                    //区域A、D、M、P                    data = &pixData[count1++];                    if( (unsigned)icellY0 < (unsigned)ncells.height )                    {                        icellY1 = icellY0;                        cellY = 1.f - cellY;                    }                    data->histOfs[0] = (icellX1*ncells.height + icellY1)*nbins;                    data->histWeights[0] = cellX*cellY;                    //近对自身所在cell产生影响                    data->histOfs[1] = data->histOfs[2] = data->histOfs[3] = 0;                    data->histWeights[1] = data->histWeights[2] = data->histWeights[3] = 0;                }            }            data->gradOfs = (grad.cols*i + j)*2;//tornadomeet给出的内容表示怀疑            data->qangleOfs = (qangle.cols*i + j)*2;//具体含义是,当前坐标(i,j)在grad/qangle图中相对block左上角坐标的位置偏移量            data->gradWeight = weights(i,j);//权重,比较容易理解        }//for循环结束,完成对pixData的赋值工作,明确一个block中每个像素负责的bins,及其贡献权重    assert( count1 + count2 + count4 == rawBlockSize );//最终保证每个像素均参与处理,其总和应为rawBlockSize    // defragment pixData//碎片整理,保证连续性,也就是对其进行移动    for( j = 0; j < count2; j++ )        pixData[j + count1] = pixData[j + rawBlockSize];    for( j = 0; j < count4; j++ )        pixData[j + count1 + count2] = pixData[j + rawBlockSize*2];    count2 += count1;    count4 += count2;//记录各自的起始位置    // initialize blockData    for( j = 0; j < nblocks.width; j++ )        for( i = 0; i < nblocks.height; i++ )        {            BlockData& data = blockData[j*nblocks.height + i];//得到第(i,j)个block的地址            data.histOfs = (j*nblocks.height + i)*blockHistogramSize;//定位当前block产生的描述算子在扫描窗口整体描述算子中的起始位置            data.imgOffset = Point(j*blockStride.width,i*blockStride.height);//定位其在扫描窗口内的相对坐标        }}//pt为该block在原始img图像中的左上角坐标(可以用来确定其梯度方向值等等),buf为当前block所贡献直方图的起始位置//很奇怪的是这里的参数buf指向的内容和最终程序返回的内容是同一个内容吧?解答:在利用cache处理的时候会出现不同的值const float* HOGCache::getBlock(Point pt, float* buf){    float* blockHist = buf;//得到指向直方图位置的指针    assert(descriptor != 0);    Size blockSize = descriptor->blockSize;//block块的大小(16*16)    pt += imgoffset;//pt+填充偏移量,表示当前block在扩展图像中的坐标位置(grad/qangle)    //验证pt满足约束条件,不能超出grad下边界-XX距离    CV_Assert( (unsigned)pt.x <= (unsigned)(grad.cols - blockSize.width) &&               (unsigned)pt.y <= (unsigned)(grad.rows - blockSize.height) );    //使用缓存,暂时不考虑,跳过    if( useCache )    {        CV_Assert( pt.x % cacheStride.width == 0 &&                   pt.y % cacheStride.height == 0 );        Point cacheIdx(pt.x/cacheStride.width,                      (pt.y/cacheStride.height) % blockCache.rows);        if( pt.y != ymaxCached[cacheIdx.y] )        {            Mat_<uchar> cacheRow = blockCacheFlags.row(cacheIdx.y);            cacheRow = (uchar)0;            ymaxCached[cacheIdx.y] = pt.y;        }        blockHist = &blockCache[cacheIdx.y][cacheIdx.x*blockHistogramSize];        uchar& computedFlag = blockCacheFlags(cacheIdx.y, cacheIdx.x);        if( computedFlag != 0 )            return blockHist;        computedFlag = (uchar)1; // set it at once, before actual computing    }    //分别得到影响不同数量cell的像素个数    int k, C1 = count1, C2 = count2, C4 = count4;    //获得grad、qangle中指向该block位置的指针    //step:是一个数组,定义了矩阵的布局step[0]表示第一位的长度,step[1]表示第二维的长度,以此类推    const float* gradPtr = (const float*)(grad.data + grad.step*pt.y) + pt.x*2;//*2 的原因是每个单元均为2维元素    const uchar* qanglePtr = qangle.data + qangle.step*pt.y + pt.x*2;    CV_Assert( blockHist != 0 );#ifdef HAVE_IPP    ippsZero_32f(blockHist,blockHistogramSize);#else    for( k = 0; k < blockHistogramSize; k++ )        blockHist[k] = 0.f;//对当前直方图进行初始化操作,初始化为0.f#endif    const PixData* _pixData = &pixData[0];//获得pixData的指针    //pixData的存储方式是连续存放的[...C1...C2...C4],所以可以经由k值依次读取,可以完成对一个block中所有像素的遍历    //先对影响个数为1的像素进行统计,也就是四个角的区域    for( k = 0; k < C1; k++ )    {        const PixData& pk = _pixData[k];        const float* a = gradPtr + pk.gradOfs;//这里的gradPtr指向的是当前block对应grad中的起始位置,与gradOfs进行相加,得到                                              //当前像素在grad中的位置信息,这里因为不再是i、j遍历,所以用这种方式得到当前像素的位置信息        float w = pk.gradWeight*pk.histWeights[0];//权重的计算,因为只有一个影响cell,所以只需一次计算        const uchar* h = qanglePtr + pk.qangleOfs;        int h0 = h[0], h1 = h[1];//得到两个bin方向值        float* hist = blockHist + pk.histOfs[0];//确定当前像素影响的cell        float t0 = hist[h0] + a[0]*w;//累加,对应不同bin值        float t1 = hist[h1] + a[1]*w;//累加        hist[h0] = t0; hist[h1] = t1;//对影响cell的对应的直方图进行赋值    }    for( ; k < C2; k++ )//类似计算    {        const PixData& pk = _pixData[k];        const float* a = gradPtr + pk.gradOfs;        float w, t0, t1, a0 = a[0], a1 = a[1];        const uchar* h = qanglePtr + pk.qangleOfs;        int h0 = h[0], h1 = h[1];        float* hist = blockHist + pk.histOfs[0];        w = pk.gradWeight*pk.histWeights[0];        t0 = hist[h0] + a0*w;        t1 = hist[h1] + a1*w;        hist[h0] = t0; hist[h1] = t1;        hist = blockHist + pk.histOfs[1];        w = pk.gradWeight*pk.histWeights[1];        t0 = hist[h0] + a0*w;        t1 = hist[h1] + a1*w;        hist[h0] = t0; hist[h1] = t1;    }    for( ; k < C4; k++ )//类似计算    {        const PixData& pk = _pixData[k];        const float* a = gradPtr + pk.gradOfs;        float w, t0, t1, a0 = a[0], a1 = a[1];        const uchar* h = qanglePtr + pk.qangleOfs;        int h0 = h[0], h1 = h[1];        float* hist = blockHist + pk.histOfs[0];        w = pk.gradWeight*pk.histWeights[0];        t0 = hist[h0] + a0*w;        t1 = hist[h1] + a1*w;        hist[h0] = t0; hist[h1] = t1;        hist = blockHist + pk.histOfs[1];        w = pk.gradWeight*pk.histWeights[1];        t0 = hist[h0] + a0*w;        t1 = hist[h1] + a1*w;        hist[h0] = t0; hist[h1] = t1;        hist = blockHist + pk.histOfs[2];        w = pk.gradWeight*pk.histWeights[2];        t0 = hist[h0] + a0*w;        t1 = hist[h1] + a1*w;        hist[h0] = t0; hist[h1] = t1;        hist = blockHist + pk.histOfs[3];        w = pk.gradWeight*pk.histWeights[3];        t0 = hist[h0] + a0*w;        t1 = hist[h1] + a1*w;        hist[h0] = t0; hist[h1] = t1;    }    normalizeBlockHistogram(blockHist);//对生成的blockHist进行归一化处理    return blockHist;//将最终得到的block直方图返回}//对block梯度方向直方图进行归一化处理,两次处理..void HOGCache::normalizeBlockHistogram(float* _hist) const{    float* hist = &_hist[0];#ifdef HAVE_IPP    size_t sz = blockHistogramSize;#else    size_t i, sz = blockHistogramSize;//blockHistogramSize表示直方图所含维数#endif    float sum = 0;#ifdef HAVE_IPP    ippsDotProd_32f(hist,hist,sz,&sum);#else    for( i = 0; i < sz; i++ )        sum += hist[i]*hist[i];//平方和?#endif    float scale = 1.f/(std::sqrt(sum)+sz*0.1f), thresh = (float)descriptor->L2HysThreshold;    //获得变换系数,及最大阈值#ifdef HAVE_IPP    ippsMulC_32f_I(scale,hist,sz);    ippsThreshold_32f_I( hist, sz, thresh, ippCmpGreater );    ippsDotProd_32f(hist,hist,sz,&sum);#else    for( i = 0, sum = 0; i < sz; i++ )    {        hist[i] = std::min(hist[i]*scale, thresh);//在第一次的基础上继续求解平方和        sum += hist[i]*hist[i];    }#endif    scale = 1.f/(std::sqrt(sum)+1e-3f);#ifdef HAVE_IPP    ippsMulC_32f_I(scale,hist,sz);#else    for( i = 0; i < sz; i++ )        hist[i] *= scale;//直接乘以系数,得到最终的归一化结果#endif}//得到单幅图像中扫描窗口的个数Size HOGCache::windowsInImage(Size imageSize, Size winStride) const{    return Size((imageSize.width - winSize.width)/winStride.width + 1,                (imageSize.height - winSize.height)/winStride.height + 1);}//确定idx索引得到的扫描窗口具体位置Rect HOGCache::getWindow(Size imageSize, Size winStride, int idx) const{    int nwindowsX = (imageSize.width - winSize.width)/winStride.width + 1;//x轴方向排列扫描窗口个数    int y = idx / nwindowsX;//得到索引idx扫描窗口的y轴坐标    int x = idx - nwindowsX*y;//得到索引idx扫描窗口的x轴坐标    return Rect( x*winStride.width, y*winStride.height, winSize.width, winSize.height );    //确定idx个扫描串口所在的位置,主要是左上角坐标,其宽度与高度已经确定}//HOGCache结束..//HOGCache是一次计算得到直方图计算公式,之后利用grad、qangle套用公式则可以得到每个扫描窗口的特征向量//这里计算的最终结果是输入图像中 所有扫描窗口或若干指定扫描窗口(locations决定)的特征向量的集合//img,输入图像;descriptors;计算得到的特征向量,winStride,扫描窗口每次移动位置;padding,填充区域大小//locations,定位若干个扫描窗口进行特征向量的计算void HOGDescriptor::compute(const Mat& img, vector<float>& descriptors,                            Size winStride, Size padding,                            const vector<Point>& locations) const{    if( winStride == Size() )        winStride = cellSize;//如果winStride的为空,则将其设定为cellSize,这里的winStride是滑动窗口每次移动的距离    //gcd(x,y)求两数的最大公约数,    Size cacheStride(gcd(winStride.width, blockStride.width),                     gcd(winStride.height, blockStride.height));//缓存每次移动距离,在useCache == 0的情形下,并没有发挥作用,    size_t nwindows = locations.size();//确定扫描窗口的个数    //alignSize(m, n)返回n的倍数大于等于m的最小值eg.alignSize(7,3) == 9    padding.width = (int)alignSize(std::max(padding.width, 0), cacheStride.width);    padding.height = (int)alignSize(std::max(padding.height, 0), cacheStride.height);    Size paddedImgSize(img.cols + padding.width*2, img.rows + padding.height*2);//确定填充后img的尺寸,这里的填充是为了计算梯度值    //对原始图像进行缓冲结构体的计算,    //每次都要进行一次计算,不是一种浪费么?== 这里的compute的函数是在怎样的情形下进行调用的还不清楚,也许只调用一次呢    HOGCache cache(this, img, padding, padding, nwindows == 0, cacheStride);//得到HOG缓冲体    //完成梯度方向图的计算,并对blockData及pixData进行初始化操作,    if( !nwindows )//如果nwindows == 0,则表示locations为空,则对所有扫描窗口进行计算        nwindows = cache.windowsInImage(paddedImgSize, winStride).area();//整幅图像中包含扫描窗口个数    const HOGCache::BlockData* blockData = &cache.blockData[0];//获得缓冲体内block指针    int nblocks = cache.nblocks.area();//单个扫描窗口内包含block的个数    int blockHistogramSize = cache.blockHistogramSize;//单个block贡献直方图维数    size_t dsize = getDescriptorSize();//获得一个滑动窗口内特征向量大小    descriptors.resize(dsize*nwindows);//整幅原始img图像所包含特征向量维数(若干个扫描窗口的特征向量集合)    for( size_t i = 0; i < nwindows; i++ )//对每个扫描窗口进行计算,这里要逐一计算特征向量    {        float* descriptor = &descriptors[i*dsize];//得到该扫描窗口对应特征向量在descriptors中的起始位置,        Point pt0;        if( !locations.empty() )//非空,表示存在若干个指定位置的扫描窗口        {            pt0 = locations[i];            //不满足约束条件 继续,不进行处理            if( pt0.x < -padding.width || pt0.x > img.cols + padding.width - winSize.width ||                pt0.y < -padding.height || pt0.y > img.rows + padding.height - winSize.height )                continue;        }        else//并未指定,则依次遍历        {            pt0 = cache.getWindow(paddedImgSize, winStride, (int)i).tl() - Point(padding);//第i个扫描窗口在原始img图像中的左上坐标            CV_Assert(pt0.x % cacheStride.width == 0 && pt0.y % cacheStride.height == 0);        }        //pt0,表示扫描窗口在原始img图像中的位置        //针对单个扫描窗口内,计算其每个block直方图值,        for( int j = 0; j < nblocks; j++ )        {            const HOGCache::BlockData& bj = blockData[j];//定位当前block在blockData中的位置            Point pt = pt0 + bj.imgOffset;//该block在原始img中的左上角坐标:扫描窗口在img中坐标+block在扫描窗口中坐标            float* dst = descriptor + bj.histOfs;//当前block贡献直方图的起始位置            const float* src = cache.getBlock(pt, dst);//计算当前block直方图,并得到指向指针            if( src != dst )//一般情况下是应该相等的,什么时候会出现不相等的情形,利用cache进行加速的时候#ifdef HAVE_IPP               ippsCopy_32f(src,dst,blockHistogramSize);#else                for( int k = 0; k < blockHistogramSize; k++ )                    dst[k] = src[k];//利用计算得到的直方图对最终直方图(descriptors)中的对应区域进行赋值操作。#endif        }    }    //对所有扫描窗口进行计算特征向量并保存到descriptors中}//检测过程,是对单幅图像进行的检测过程,可以利用这里的locations做点文章//无论怎样这里的grad、qangle值 还有blockdata、pixData都是要进行计算的,//可以进一步优化的地方在于对于blockData、pixData的计算结果进行保存,在相同尺寸的原始图像中不需要再次进行计算//img待检测图像;hits存储检测到行人的扫描窗口的左上角坐标,//weights存储检测到行人的扫描窗口的s值//hitThreshold检测阈值;winstride,padding,locationsvoid HOGDescriptor::detect(const Mat& img,    vector<Point>& hits, vector<double>& weights, double hitThreshold,     Size winStride, Size padding, const vector<Point>& locations) const{    hits.clear();    if( svmDetector.empty() )        return;    if( winStride == Size() )        winStride = cellSize;    Size cacheStride(gcd(winStride.width, blockStride.width),                     gcd(winStride.height, blockStride.height));    size_t nwindows = locations.size();    //alignSize(m, n)返回n的倍数大于等于m的最小值eg.alignSize(7,3) == 9    padding.width = (int)alignSize(std::max(padding.width, 0), cacheStride.width);    padding.height = (int)alignSize(std::max(padding.height, 0), cacheStride.height);    Size paddedImgSize(img.cols + padding.width*2, img.rows + padding.height*2);//确定填充后图像尺寸    HOGCache cache(this, img, padding, padding, nwindows == 0, cacheStride);    //完成梯度图,方向图的计算及blockData、pixData的初始化工作    if( !nwindows )        nwindows = cache.windowsInImage(paddedImgSize, winStride).area();    const HOGCache::BlockData* blockData = &cache.blockData[0];    int nblocks = cache.nblocks.area();//一个扫描窗口中block的个数    int blockHistogramSize = cache.blockHistogramSize;//一个block中特征向量的维数    size_t dsize = getDescriptorSize();//一个扫描窗口中特征向量的维数    double rho = svmDetector.size() > dsize ? svmDetector[dsize] : 0;//暂时不了解含义    vector<float> blockHist(blockHistogramSize);//确定维数的block直方图    for( size_t i = 0; i < nwindows; i++ )//对所有待处理扫描窗口进行计算    {        Point pt0;        if( !locations.empty() )//locations非空        {            pt0 = locations[i];            //不满足约束条件            if( pt0.x < -padding.width || pt0.x > img.cols + padding.width - winSize.width ||                pt0.y < -padding.height || pt0.y > img.rows + padding.height - winSize.height )                continue;        }        else        {            pt0 = cache.getWindow(paddedImgSize, winStride, (int)i).tl() - Point(padding);            CV_Assert(pt0.x % cacheStride.width == 0 && pt0.y % cacheStride.height == 0);        }        double s = rho;        const float* svmVec = &svmDetector[0];//指向检测器#ifdef HAVE_IPP        int j;#else        int j, k;#endif        //对当前指定扫描窗口进行计算,对包含的每个block进行一个判别过程        for( j = 0; j < nblocks; j++, svmVec += blockHistogramSize )        {            const HOGCache::BlockData& bj = blockData[j];//一个扫描窗口中第j个block的信息            Point pt = pt0 + bj.imgOffset;//当前block在原始img图像中的位置            const float* vec = cache.getBlock(pt, &blockHist[0]);//得到当前block的直方图#ifdef HAVE_IPP            Ipp32f partSum;            ippsDotProd_32f(vec,svmVec,blockHistogramSize,&partSum);            s += (double)partSum;#else            //每个产生的block描述算子分别参与分类判别            for( k = 0; k <= blockHistogramSize - 4; k += 4 )                s += vec[k]*svmVec[k] + vec[k+1]*svmVec[k+1] +                    vec[k+2]*svmVec[k+2] + vec[k+3]*svmVec[k+3];            for( ; k < blockHistogramSize; k++ )                s += vec[k]*svmVec[k];            //需要了解的是这里个svmVec的内容是什么,与特征向量同等维度的向量值,然后呢            //s = vec * svmVec            //设定4是为了减少循环次数,            //又需要明确svmVec的产生过程,这里先暂时放在这里,只需要知道S = vec * svmVec,之后与阈值hitThreshold进行判断,足矣,#endif        }        if( s >= hitThreshold )//超过既定阈值,说明包含行人        {            hits.push_back(pt0);            weights.push_back(s);        }    }}//不保留weights的检测过程void HOGDescriptor::detect(const Mat& img, vector<Point>& hits, double hitThreshold,                            Size winStride, Size padding, const vector<Point>& locations) const{    vector<double> weightsV;    detect(img, hits, weightsV, hitThreshold, winStride, padding, locations);}//这个结构体的作用又是什么呢?在之后的detectMultiScale中会用到struct HOGInvoker//构造一个供parallel_for使用的循环结构体{    //出现金字塔相关内容了:levelScale    HOGInvoker( const HOGDescriptor* _hog, const Mat& _img,                double _hitThreshold, Size _winStride, Size _padding,                const double* _levelScale, ConcurrentRectVector* _vec,                 ConcurrentDoubleVector* _weights=0, ConcurrentDoubleVector* _scales=0 )     {        hog = _hog;        img = _img;        hitThreshold = _hitThreshold;        winStride = _winStride;        padding = _padding;        levelScale = _levelScale;        vec = _vec;        weights = _weights;        scales = _scales;    }    void operator()( const BlockedRange& range ) const    {        int i, i1 = range.begin(), i2 = range.end();        double minScale = i1 > 0 ? levelScale[i1] : i2 > 1 ? levelScale[i1+1] : std::max(img.cols, img.rows);        //最小尺度的计算,        /*if i1 > 0         *  minScale = levelScale[i1];         *else if i2 > 1         *  minScale = levelScale[i1 + 1]         *else         *  minScale = max(img.cols,img.rows)??         **/        Size maxSz(cvCeil(img.cols/minScale), cvCeil(img.rows/minScale));//得到最大可能内存空间?        Mat smallerImgBuf(maxSz, img.type());//创建内存空间,        vector<Point> locations;//定位        vector<double> hitsWeights;//        for( i = i1; i < i2; i++ )        {            double scale = levelScale[i];//当前变化比例            Size sz(cvRound(img.cols/scale), cvRound(img.rows/scale));//变化后的图像尺寸            Mat smallerImg(sz, img.type(), smallerImgBuf.data);//从buf中划分适当的内存空间给当前smallerImg            if( sz == img.size() )//对原始img图像进行比例变化,得到smallerImg                smallerImg = Mat(sz, img.type(), img.data, img.step);            else                resize(img, smallerImg, sz);//进行尺度变换            //对变化后的samllerImg进行detect操作            hog->detect(smallerImg, locations, hitsWeights, hitThreshold, winStride, padding);            //这里的location,hitWeight保存检测含有行人的扫描窗口的相关信息            Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale));            //得到当前尺度下扫描窗口(64*128)对应变换前原始img尺寸大小            for( size_t j = 0; j < locations.size(); j++ )            {                vec->push_back(Rect(cvRound(locations[j].x*scale),                                    cvRound(locations[j].y*scale),                                    scaledWinSize.width, scaledWinSize.height));                //保存期在原始图像中的位置信息                if (scales) {                    scales->push_back(scale);//保存比例信息                }            }                        if (weights && (!hitsWeights.empty()))            {                for (size_t j = 0; j < locations.size(); j++)                {                    weights->push_back(hitsWeights[j]);//如果detect过程保留了weight,则进一步保存weight信息                }            }                }    }    const HOGDescriptor* hog;//descriptor    Mat img;//待检测图像    double hitThreshold;//svm检测阈值    Size winStride;//窗口移动距离    Size padding;//填充尺寸    const double* levelScale;//&levelScale[0]    ConcurrentRectVector* vec;//得到扫描窗口在原始img图像中位置    ConcurrentDoubleVector* weights;//权重    ConcurrentDoubleVector* scales;//每个保留窗口对应scale值};//重点看一下如何解决多尺度问题//scale0表示多尺度之间的变化系数,scale0 <= 1时,表示无多尺度变化void HOGDescriptor::detectMultiScale(    const Mat& img, vector<Rect>& foundLocations, vector<double>& foundWeights,    double hitThreshold, Size winStride, Size padding,    double scale0, double finalThreshold, bool useMeanshiftGrouping) const  {    double scale = 1.;    int levels = 0;    vector<double> levelScale;    //这里可以看到scale的值不断变大,也就是说这里的尺度变化时对原始img图像不断等比例缩小的过程,也就是所谓的金字塔了    //原始图像比例不断变化,而扫描窗口尺寸保持不变    for( levels = 0; levels < nlevels; levels++ )    {        levelScale.push_back(scale);//记录每个level上的变化尺度,当变化结果不满足一个扫描窗口大小时,提前结束        //cvRound返回最接近参数的整数        if( cvRound(img.cols/scale) < winSize.width ||            cvRound(img.rows/scale) < winSize.height ||            scale0 <= 1 )            break;        scale *= scale0;//scale0 > 0,因而scale的数值成倍增加,同样意味着img的尺寸成倍缩小    }    levels = std::max(levels, 1);//确定可以变化的层数,(不一定是参数中给定的nlevels)    levelScale.resize(levels);//对levelScale尺寸进行确定,    ConcurrentRectVector allCandidates;//保留所有检测到的扫描窗口的矩形框信息    ConcurrentDoubleVector tempScales;//保留检测到扫描窗口对应的尺度信息    ConcurrentDoubleVector tempWeights;//保留检测到的扫面窗口时的svm求和结果    vector<double> foundScales;//    //parallel_for 循环访问一个索引范围,并在每次迭代时以并行方式执行用户提供的函数    //并行处理每个尺度的计算过程    parallel_for(BlockedRange(0, (int)levelScale.size()),                 HOGInvoker(this, img, hitThreshold, winStride, padding,                            &levelScale[0], &allCandidates, &tempWeights, &tempScales));    std::copy(tempScales.begin(), tempScales.end(), back_inserter(foundScales));    foundLocations.clear();    std::copy(allCandidates.begin(), allCandidates.end(), back_inserter(foundLocations));    foundWeights.clear();    std::copy(tempWeights.begin(), tempWeights.end(), back_inserter(foundWeights));    //将得到的临时数据插入foundXX中保存    if ( useMeanshiftGrouping )    {        //领用meanshift对矩形框进行聚类操作        groupRectangles_meanshift(foundLocations, foundWeights, foundScales, finalThreshold, winSize);    }    else    {        //对矩形框进行简单聚类        groupRectangles(foundLocations, (int)finalThreshold, 0.2);    }}void HOGDescriptor::detectMultiScale(const Mat& img, vector<Rect>& foundLocations,                                      double hitThreshold, Size winStride, Size padding,                                     double scale0, double finalThreshold, bool useMeanshiftGrouping) const  {    vector<double> foundWeights;    detectMultiScale(img, foundLocations, foundWeights, hitThreshold, winStride,                      padding, scale0, finalThreshold, useMeanshiftGrouping);}




1 0