混合高斯模型 opencv
来源:互联网 发布:手机看报软件 编辑:程序博客网 时间:2024/05/16 06:02
一、GMM发展历史及现状
背景建模方法有很多种,如中值法、均值法、卡尔曼滤波器模型、码本背景模型等,其中混合高斯模型是最经典的算法。GMM最早是由CHris Stauffer等在[1]中提出的,该方法是按照高斯分布对每个像素建立模型, 并通过基于回归滤波的在线 EM 近似方法对模型参数进行更新,它能鲁棒地克服光照变化、 树枝摇动等造成的影响,但该方法也存在一些问题:1)该方法对运动物体在场景中停止不动或者长时间停止时检测失效,而且带有初始学习速度慢,在线更新费时、计算量大;2)无法完整准确地检测大并且运动缓慢的运动目标,运动目标的像素点不集中,只能检测到运动目标的部分轮廓,无法提取出目标对象的完整区域;3)无法将背景显露区域与运动目标区域很好地区分开;4)当运动目标由静止缓慢转化为运动时,易将背景显露区检测为前景,出现“影子”现象。
二、GMM缺点及改进方法
针对上述问题,一些科学研究者又在GMM算法的基础上做了很多的改进:张、白等人[2]引入分块思想,把图像分为L*L块;黄、胡等人[3]也引入了分块的思想,但是他们的分块理念是以当前像素点的8邻域作为一块;华、刘[4]把GMM与改进的帧差法(相邻两帧图像对应像素点8邻域像素值相减之和)相结合,提高了计算效率;Suo等人[5]是将混合高斯模型中的模型个数采改进为自适应的;刘等人[6]融合帧间差分法,检测背景显露区域和运动区域,很好的解决了问题4。除此之外,还有基于纹理的混合高斯模。
三、GMM算法流程
四、代码实现
代码是下部分使用了下面网页的代码:http://blog.csdn.net/xw20084898/article/details/41826445自己在阅读的时候因为变量太多有些分不清,在参数更新中,和自己阅读的论文有些不同。注释部分是原作参数更新算法:
#include<opencv.hpp>#include<highgui.h>#include<cv.h>using namespace cv;using namespace std;#define COMPONET 5 //混合高斯模型个数#define ALPHA 0.03 //学习率#define SD_INIT 6 //方差初始值#define THRESHOLD 0.25 //前景所占比例#define D 2.5 int main(){ CvCapture *capture = cvCreateFileCapture("E:\\project2\\videos\\video.avi"); IplImage *frame, *grayFrame, *foreground, *background; int *foreg, *backg, *rank_index; double *weight, *mean, *sigma, *u_diff, *rank; double p = ALPHA / (1 / (double)COMPONET); double rank_temp = 0; int rank_index_temp = 0; CvRNG state; //随机生成状态器 int match, height, width; frame = cvQueryFrame(capture); grayFrame = cvCreateImage(CvSize(frame->width, frame->height), IPL_DEPTH_8U, 1); foreground = cvCreateImage(CvSize(frame->width, frame->height), IPL_DEPTH_8U, 1); background = cvCreateImage(CvSize(frame->width, frame->height), IPL_DEPTH_8U, 1); height = grayFrame->height; width = grayFrame->widthStep; foreg = (int*)malloc(sizeof(int)*width*height); backg = (int*)malloc(sizeof(int)*width*height); rank = (double*)malloc(sizeof(double) * 1 * COMPONET); //优先级 weight = (double*)malloc(sizeof(double)*width*height*COMPONET); //权重 mean = (double *)malloc(sizeof(double)*width*height*COMPONET); //pixel means sigma = (double *)malloc(sizeof(double)*width*height*COMPONET); //pixel standard deviations u_diff = (double *)malloc(sizeof(double)*width*height*COMPONET); //difference of each pixel from mean //初始化均值、方差、权重 for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { for (int k = 0; k < COMPONET; k++) { mean[i*width*COMPONET + j*COMPONET + k] = cvRandReal(&state) * 255; sigma[i*width*COMPONET + j*COMPONET + k] = SD_INIT; weight[i*width*COMPONET + j*COMPONET + k] = (double)1 / COMPONET; } } } while (1){ rank_index = (int *)malloc(sizeof(int)*COMPONET); cvCvtColor(frame, grayFrame, CV_BGR2GRAY); // calculate difference of pixel values from mean for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { for (int k = 0; k < COMPONET; k++) { u_diff[i*width*COMPONET + j*COMPONET + k] = abs((uchar)grayFrame->imageData[i*width + j] - mean[i*width*COMPONET + j*COMPONET + k]); } } } //update gaussian components for each pixel for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { match = 0; double sum_weight = 0; for (int k = 0; k < COMPONET; k++) { if (u_diff[i*width*COMPONET + j*COMPONET + k] <= D*sigma[i*width*COMPONET + j*COMPONET + k]) //pixel matches component { match = 1; // variable to signal component match //update weights, mean, sd, p weight[i*width*COMPONET + j*COMPONET + k] = (1 - ALPHA)*weight[i*width*COMPONET + j*COMPONET + k] + ALPHA; /*p = ALPHA / weight[i*width*COMPONET + j*COMPONET + k]; mean[i*width*COMPONET + j*COMPONET + k] = (1 - p)*mean[i*width*COMPONET + j*COMPONET + k] + p*(uchar)grayFrame->imageData[i*width + j]; sigma[i*width*COMPONET + j*COMPONET + k] = sqrt((1 - p)*(sigma[i*width*COMPONET + j*COMPONET + k] * sigma[i*width*COMPONET + j*COMPONET + k]) + p*(pow((uchar)grayFrame->imageData[i*width + j] - mean[i*width*COMPONET + j*COMPONET + k], 2)));*/ mean[i*width*COMPONET + j*COMPONET + k] = (1 - ALPHA)*mean[i*width*COMPONET + j*COMPONET + k] + ALPHA*(uchar)grayFrame->imageData[i*width + j]; sigma[i*width*COMPONET + j*COMPONET + k] = sqrt((1 - ALPHA)*(sigma[i*width*COMPONET + j*COMPONET + k] * sigma[i*width*COMPONET + j*COMPONET + k]) + ALPHA*(pow((uchar)grayFrame->imageData[i*width + j] - mean[i*width*COMPONET + j*COMPONET + k], 2))); } //else{ // weight[i*width*COMPONET + j*COMPONET + k] = (1 - ALPHA)*weight[i*width*COMPONET + j*COMPONET + k]; // weight slighly decreases //} sum_weight += weight[i*width*COMPONET + j*COMPONET + k]; } //权重归一化 for (int k = 0; k < COMPONET; k++) { weight[i*width*COMPONET + j*COMPONET + k] = weight[i*width*COMPONET + j*COMPONET + k] / sum_weight; } //获取权重最小下标 double temp = weight[i*width*COMPONET + j*COMPONET]; int min_index = 0; backg[i*width + j] = 0; for (int k = 0; k < COMPONET; k++) { backg[i*width + j] = backg[i*width + j] + mean[i*width*COMPONET + j*COMPONET + k] * weight[i*width*COMPONET + j*COMPONET + k]; if (weight[i*width*COMPONET + j*COMPONET + k] < temp) { min_index = k; temp = weight[i*width*COMPONET + j*COMPONET + k]; } rank_index[k] = k; } background->imageData[i*width + j] = (uchar)backg[i*width + j]; //if no components match, create new component if (match == 0) { mean[i*width*COMPONET + j*COMPONET + min_index] = (uchar)grayFrame->imageData[i*width + j]; sigma[i*width*COMPONET + j*COMPONET + min_index] = SD_INIT; weight[i*width*COMPONET + j*COMPONET + min_index] = 1 / COMPONET; } //计算优先级 for (int k = 0; k < COMPONET; k++) { rank[k] = weight[i*width*COMPONET + j*COMPONET + k] / sigma[i*width*COMPONET + j*COMPONET + k]; } //sort rank values for (int k = 1; k < COMPONET; k++) { for (int m = 0; m < k; m++) { if (rank[k] > rank[m]) { //swap max values rank_temp = rank[m]; rank[m] = rank[k]; rank[k] = rank_temp; //swap max index values rank_index_temp = rank_index[m]; rank_index[m] = rank_index[k]; rank_index[k] = rank_index_temp; } } } //calculate foreground match = 0; int b = 0; while ((match == 0) && (b < COMPONET)){ if (weight[i*width*COMPONET + j*COMPONET + rank_index[b]] >= THRESHOLD) { if (abs(u_diff[i*width*COMPONET + j*COMPONET + rank_index[b]]) <= D*sigma[i*width*COMPONET + j*COMPONET + rank_index[b]]) { foreground->imageData[i*width + j] = 0; match = 1; } else { foreground->imageData[i*width + j] = (uchar)grayFrame->imageData[i*width + j]; } } b++; } } } frame = cvQueryFrame(capture); cvShowImage("fore", foreground); cvShowImage("back", background); cvShowImage("frame", frame); char s = cvWaitKey(33); if (s == 27) break; free(rank_index); } return 0;}
五、参考文献
[1]Chris Stauffer,W.E.L Grimson.Adaptive background mixture models for real-time tracking
[2]张燕平、白云球.应用改进混合高斯模型的运动目标检测
[3]黄大卫、胡文翔。改进单高斯模型的视频前景提取与破碎目标合并算法
[4]华媛蕾、刘万军.改进混合高斯模型的运动目标检测算法
[5]Peng Suo, Yanjiang Wang.Improved Adaptive Gaussian Mixture Model for Background Subtraction
[6]刘鑫、刘辉.混合高斯模型和帧间差分相融合的自适应背景模型
- opencv高斯混合模型
- opencv混合高斯模型
- 混合高斯模型 opencv
- OpenCv的混合高斯模型
- OpenCV混合高斯模型前景分离
- OpenCV混合高斯模型前景分离
- OPENCV混合高斯模型原理
- OpenCV 中的GMM模型 高斯混合模型
- OPENCV中混合高斯背景模型的实现
- OPENCV中混合高斯背景模型的实现
- Opencv学习笔记(十)高斯混合模型
- OPENCV中混合高斯背景模型的实现
- OpenCV中混合高斯模型的实现
- OpenCV混合高斯模型函数注释说明
- OpenCV混合高斯模型代码分析与完善
- 混合高斯背景模型及opencv实现
- OpenCv中混合高斯模型的实现
- 高斯混合模型在opencv中的源码详解
- 反转有序链单链表
- 直播平台的高并发架构设计3.1-推流端
- Java中的HashMap和HashTable到底哪不同?
- 无法解析的外部符号 __imp__waveOutOpen解决方法
- 17.5节练习
- 混合高斯模型 opencv
- HandlerInterceptorAdapter 学习
- LeetCode 191,Number of 1 Bits
- 设置样式“height: 100%”
- Android笔记--handler的使用的一个例子
- Uva12096 集合栈计算机
- 景阳冈
- 内外网无缝切换
- (3) tcpdump -i veth_1 > veth_1.txt