360Lib:CPP-PSNR

来源:互联网 发布:nginx动静分离配置 编辑:程序博客网 时间:2024/06/05 02:08

CPP-PSNR是360Lib采纳的一种360视频的客观质量评估标准,通过投影到CPP格式计算PSNR。

TCPPPSNRMetric

下面来看360Lib中定义的CPP-PSNR类TCPPPSNRMetric,其中定义了重要函数xCalculateCPPPSNR,用于计算CPP-PSNR。

class TCPPPSNRMetric        //CPP PSNR{private:  Bool          m_bCPPPSNREnabled;    Double        m_dCPPPSNR[3];  CPos2D*       m_pCart2D;  IPos2D*       m_fpTable;  Int           m_iSphNumPoints;  Int           m_cppWidth;   //CPP宽度  Int           m_cppHeight;     //CPP高度  Int           m_outputBitDepth[MAX_NUM_CHANNEL_TYPE];         ///< bit-depth of output file 输出文件的bit深度  Int           m_referenceBitDepth[MAX_NUM_CHANNEL_TYPE];      ///< bit-depth of reference file 参考文件的bit深度  ChromaFormat  m_chromaFormatIDC;  //颜色格式#if SVIDEO_CPP_FIX  InputGeoParam m_refGeoParam;  InputGeoParam m_outGeoParam;#else  InputGeoParam m_cppGeoParam; #endif  SVideoInfo    m_cppCodingVideoInfo;  SVideoInfo    m_cppRefVideoInfo;  SVideoInfo    m_cppVideoInfo;  Void          setDefaultFramePackingParam(SVideoInfo& sVideoInfo);        //设置默认参数  Void          fillSourceSVideoInfo(SVideoInfo& sVidInfo, Int inputWidth, Int inputHeight);    //根据投影格式填充源视频信息  TGeometry     *m_pcOutputGeomtry;  TGeometry     *m_pcReferenceGeomtry;  TGeometry     *m_pcOutputCPPGeomtry;  TGeometry     *m_pcRefCPPGeomtry;public:  TCPPPSNRMetric();  virtual ~TCPPPSNRMetric();  Bool    getCPPPSNREnabled()  { return m_bCPPPSNREnabled; }        //获取m_bCPPPSNREnabled  Void    setCPPPSNREnabledFlag(Bool bEnabledFlag)  { m_bCPPPSNREnabled = bEnabledFlag; }       //设置m_bCPPPSNREnabled  Void    setOutputBitDepth(Int iOutputBitDepth[MAX_NUM_CHANNEL_TYPE]);     //设置输出bit深度  Void    setReferenceBitDepth(Int iReferenceBitDepth[MAX_NUM_CHANNEL_TYPE]);   //设置参考图形bit深度  Void    setCPPWidth(Int iCPPWidth);       //设置CPP宽度  Void    setCPPHeight(Int iCPPheight);     //设置CPP高度  Void    setChromaFormatIDC(ChromaFormat iChromaFormatIDC);        //设置颜色格式#if SVIDEO_CPP_FIX  Void    setGeoParam(InputGeoParam iCPPGeoParam);#else  Void    setCPPGeoParam(InputGeoParam iCPPGeoParam);#endif  Void    setCPPVideoInfo(SVideoInfo iCppCodingVdideoInfo, SVideoInfo iCppRefVdideoInfo);       //设置CPP视频的编码视频和参考视频信息  Double* getCPPPSNR() {return m_dCPPPSNR;}     //获取CPP-PSNR  //Void    sphSampoints(Char* cSphDataFile);  Void    sphToCart(CPos2D*, CPos3D*);      //2D to 3D  Void    xCalculateCPPPSNR( TComPicYuv* pcOrgPicYuv, TComPicYuv* pcPicD );     //计算CPP-PSNR  Void    initCPPPSNR(InputGeoParam inputGeoParam, Int cppWidth, Int cppHeight, SVideoInfo codingvideoInfo, SVideoInfo referenceVideoInfo);     //初始化CPP-PSNR};

xCalculateCPPPSNR

下面来看xCalculateCPPPSNR函数,可以分为两部分:
1.将参考图像和输出图像转换为了CPP格式。
2.计算CPP格式下的PSNR即为CPP-PSNR。
CPP下存在无效图像区域,因此在计算PSNR前先计算了有效区域坐标。

//计算CPP-PSNRVoid TCPPPSNRMetric::xCalculateCPPPSNR( TComPicYuv* pcOrgPicYuv, TComPicYuv* pcPicD){  Int iBitDepthForPSNRCalc[MAX_NUM_CHANNEL_TYPE];       //计算时的bit深度  Int iReferenceBitShift[MAX_NUM_CHANNEL_TYPE];         //参考图像bit偏移  Int iOutputBitShift[MAX_NUM_CHANNEL_TYPE];        //输出bit偏移  TComPicYuv *TPicYUVRefCPP;        //参考图像  TComPicYuv *TPicYUVOutCPP;        //输出图像  //计算时的bit深度为输出bit深度和输入bit深度的最大值  iBitDepthForPSNRCalc[CHANNEL_TYPE_LUMA] = std::max(m_outputBitDepth[CHANNEL_TYPE_LUMA], m_referenceBitDepth[CHANNEL_TYPE_LUMA]);  iBitDepthForPSNRCalc[CHANNEL_TYPE_CHROMA] = std::max(m_outputBitDepth[CHANNEL_TYPE_CHROMA], m_referenceBitDepth[CHANNEL_TYPE_CHROMA]);  //参考图像bit偏移 = 计算时的bit深度 - 参考图像bit深度  iReferenceBitShift[CHANNEL_TYPE_LUMA] = iBitDepthForPSNRCalc[CHANNEL_TYPE_LUMA] - m_referenceBitDepth[CHANNEL_TYPE_LUMA];  iReferenceBitShift[CHANNEL_TYPE_CHROMA] = iBitDepthForPSNRCalc[CHANNEL_TYPE_CHROMA] - m_referenceBitDepth[CHANNEL_TYPE_CHROMA];  //输出图像bit偏移 = 计算时的bit深度 - 输出图像bit深度  iOutputBitShift[CHANNEL_TYPE_LUMA] = iBitDepthForPSNRCalc[CHANNEL_TYPE_LUMA] - m_outputBitDepth[CHANNEL_TYPE_LUMA];  iOutputBitShift[CHANNEL_TYPE_CHROMA] = iBitDepthForPSNRCalc[CHANNEL_TYPE_CHROMA] - m_outputBitDepth[CHANNEL_TYPE_CHROMA];  memset(m_dCPPPSNR, 0, sizeof(Double)*3);  Double SCPPDspsnr[3]={0, 0 ,0};  // Convert Output and Ref to CPP_Projection  //初始化CPP格式参考图像  TPicYUVRefCPP = new TComPicYuv;  TPicYUVRefCPP->createWithoutCUInfo  ( m_cppWidth, m_cppHeight, m_chromaFormatIDC, true );  //初始化CPP格式输出图像  TPicYUVOutCPP = new TComPicYuv;  TPicYUVOutCPP->createWithoutCUInfo  ( m_cppWidth, m_cppHeight, m_chromaFormatIDC, true );  // Converting Reference to CPP  //将参考图像转换为CPP格式  if ((m_pcReferenceGeomtry->getSVideoInfo()->geoType == SVIDEO_OCTAHEDRON || m_pcReferenceGeomtry->getSVideoInfo()->geoType == SVIDEO_ICOSAHEDRON) && m_pcReferenceGeomtry->getSVideoInfo()->iCompactFPStructure)  {    m_pcReferenceGeomtry->compactFramePackConvertYuv(pcOrgPicYuv);  }  else  {    m_pcReferenceGeomtry->convertYuv(pcOrgPicYuv);  }  m_pcReferenceGeomtry->geoConvert(m_pcRefCPPGeomtry);      //投影格式转换  m_pcRefCPPGeomtry->framePack(TPicYUVRefCPP);        // Converting Output to CPP  //将输出图像转换为CPP格式  if ((m_pcOutputGeomtry->getSVideoInfo()->geoType == SVIDEO_OCTAHEDRON || m_pcOutputGeomtry->getSVideoInfo()->geoType == SVIDEO_ICOSAHEDRON) && m_pcOutputGeomtry->getSVideoInfo()->iCompactFPStructure)  {    m_pcOutputGeomtry->compactFramePackConvertYuv(pcPicD);  }  else  {    m_pcOutputGeomtry->convertYuv(pcPicD);  }  m_pcOutputGeomtry->geoConvert(m_pcOutputCPPGeomtry);  //投影格式转换  m_pcOutputCPPGeomtry->framePack(TPicYUVOutCPP); for(Int chan=0; chan<pcPicD->getNumberValidComponents(); chan++)  {    //初始化亮度、色度的信息    const ComponentID ch=ComponentID(chan);    const Pel*  pOrg       = TPicYUVOutCPP->getAddr(ch);        //原始图像首地址    const Int   iOrgStride = TPicYUVOutCPP->getStride(ch);  //原始图像跨度    const Pel*  pRec       = TPicYUVRefCPP->getAddr(ch);        //重构图像首地址    const Int   iRecStride = TPicYUVRefCPP->getStride(ch);      //重构图像跨度    const Int   iWidth     = TPicYUVRefCPP->getWidth (ch);  //图像宽度    const Int   iHeight    = TPicYUVRefCPP->getHeight(ch);  //图像高度    Int   iSize            = 0;    double fPhi, fLambda;    double fIdxX, fIdxY;    double fLamdaX, fLamdaY;    //计算CPP格式下有效图像区域的PSNR    for(Int y=0;y<iHeight;y++)    {      for(Int x=0;x<iWidth;x++)      {        fLamdaX = ((double)x / (iWidth)) * (2 * S_PI) - S_PI;        fLamdaY = ((double)y / (iHeight)) * S_PI - (S_PI_2);        fPhi = 3 * sasin(fLamdaY / S_PI);        fLambda = fLamdaX / (2 * scos(2 * fPhi / 3) - 1);        fLamdaX = (fLambda + S_PI) / 2 / S_PI * (iWidth);        fLamdaY = (fPhi + (S_PI / 2)) / S_PI *  (iHeight);        fIdxX = (int)((fLamdaX < 0) ? fLamdaX - 0.5 : fLamdaX + 0.5);        fIdxY = (int)((fLamdaY < 0) ? fLamdaY - 0.5 : fLamdaY + 0.5);        if(fIdxY >= 0 && fIdxX >= 0 && fIdxX < iWidth && fIdxY < iHeight)       //判断是否越界        {#if SVIDEO_CPP_FIX          Intermediate_Int iDifflp = (Intermediate_Int)((pOrg[x] << iOutputBitShift[toChannelType(ch)]) - (pRec[x] << iReferenceBitShift[toChannelType(ch)]));#else          Intermediate_Int iDifflp  = (Intermediate_Int)((pOrg[x]<<iReferenceBitShift[toChannelType(ch)]) - (pRec[x]<<iOutputBitShift[toChannelType(ch)]) );#endif          SCPPDspsnr[chan]         += iDifflp * iDifflp;        //平方差累加          iSize++;      //记录累加点的数量        }      }      pOrg += iOrgStride;      pRec += iRecStride;    }    SCPPDspsnr[chan] /= iSize;      //求MSE  }  for (Int ch_indx = 0; ch_indx < pcPicD->getNumberValidComponents(); ch_indx++)  {    const ComponentID ch=ComponentID(ch_indx);    const Int maxval = 255<<(iBitDepthForPSNRCalc[toChannelType(ch)]-8) ;    Double fReflpsnr = maxval*maxval;    //求CPP-PSNR    m_dCPPPSNR[ch_indx] = ( SCPPDspsnr[ch_indx] ? 10.0 * log10( fReflpsnr / (Double)SCPPDspsnr[ch_indx] ) : 999.99 );     }  //清理内存  if(TPicYUVRefCPP)  {    TPicYUVRefCPP->destroy();    delete TPicYUVRefCPP;    TPicYUVRefCPP = NULL;  }  if(TPicYUVOutCPP)  {    TPicYUVOutCPP->destroy();    delete TPicYUVOutCPP;    TPicYUVOutCPP = NULL;  }}

从代码来看,CPP-PSNR计算过程中,计算坐标fIdxX和fIdxY时存在舍入误差,引入了失真。在格式转换中,还使用了插值滤波器,也会引入失真。

ERP到CPP格式的转换

在360Lib中,读入的360视频都是以2D形式表示,在xCalculateCPPPSNR中,CPP格式由原始视频2D格式转换得到。以参考图像为例,这里我只研究输入为ERP的情况,那执行的代码为:

  //投影格式转换  m_pcReferenceGeomtry->convertYuv(pcOrgPicYuv);  m_pcReferenceGeomtry->geoConvert(m_pcRefCPPGeomtry);        m_pcRefCPPGeomtry->framePack(TPicYUVRefCPP);      

转换过程分为3步:
1.扩展到360Lib 2D平面:通过convertYuv,将ERP图像填充扩展到360Lib 2D坐标平面上。
2.几何信息转换:通过geoConvert进行几何信息转换。
3.YUV信息转换:通过framePack对YUV信息的转换。

实际格式转换由geoConvert实现,格式转换中需要进行插值,因此会引入失真。

原创粉丝点击