简单的Kinect前景抠图
来源:互联网 发布:python开发应用 编辑:程序博客网 时间:2024/06/08 05:30
这个是用Kinect做三维扫描时的第一步,就是将感兴趣的前景部分取下来。大家都知道,Kinect的最大特点就是可以采集到深度数据,利用深度数据就可以将前景和背景区分开来。
长话短说,先上效果图吧。
再上源代码:
/**************************************** Description:This program can rebuild * models by scanning objects with kinect** Methods of application:* ** Author:Lixam* Time:2012-11****************************************/#include "stdafx.h"#include <XnCppWrapper.h> #include <opencv/highgui.h>#include <opencv/cv.h>#include <vector>using namespace cv;using namespace xn;using namespace std;#define IMAGEWIDTH 640#define IMAGEHEIGTH 480typedef struct {unsigned char R;unsigned char G;unsigned char B;}RGBPIXEL;typedef struct {unsigned char data_L;//Low bits unsigned char data_H;//Heigh bits}DEPTHPIXEL;//Generator ImageGenerator m_imageGenerator;DepthGenerator m_depthGenerator;Context m_context;/************************************************************************* * * GetROIdata() * * Parameters: * * IplImage* image- Source of color image data * IplImage* depth- Source of color depth data * vector<RGBPIXEL>& ROIimage- Color iamge data that we are interested in * vector<DEPTHPIXEL>& ROIdepth- Depth data that we are interested in * vector<CvPoint>& ROIimagePoint- Coordinates of ROI points * * Return: * * void * * Describtion: * * This function can get the data we are interested in from image source * ************************************************************************/void GetROIdata(IplImage* image,IplImage* depth,vector<RGBPIXEL>& ROIimage,vector<DEPTHPIXEL>& ROIdepth,vector<CvPoint>& ROIimagePoint){RGBPIXEL rgbData;DEPTHPIXEL depthData;CvPoint point;for(int j = 0;j < IMAGEHEIGTH;j++)//We just want to get depth datas rang from 1000mm to 1500mmfor(int i = 0;i < IMAGEWIDTH;i++)if(CV_IMAGE_ELEM(depth,unsigned char,j,i*2)+(CV_IMAGE_ELEM(depth,unsigned char,j,i*2+1)<<8) >= 1000&& CV_IMAGE_ELEM(depth,unsigned char,j,i*2)+(CV_IMAGE_ELEM(depth,unsigned char,j,i*2+1)<<8) <= 1500){rgbData.R = CV_IMAGE_ELEM(image,unsigned char,j,i*3);//Get image datargbData.G = CV_IMAGE_ELEM(image,unsigned char,j,i*3 + 1);rgbData.B = CV_IMAGE_ELEM(image,unsigned char,j,i*3 + 2);depthData.data_L = CV_IMAGE_ELEM(depth,unsigned char,j,i*2);//Get depth datadepthData.data_H = CV_IMAGE_ELEM(depth,unsigned char,j,i*2 + 1);point.x = i;point.y = j;ROIimagePoint.push_back(point);ROIimage.push_back(rgbData);ROIdepth.push_back(depthData);}}/**********Test function**************/void ShowROIimage(vector<RGBPIXEL> ROIimage1,vector<RGBPIXEL> ROIimage2, vector<CvPoint> ROIimagePoint1,vector<CvPoint> ROIimagePoint2){IplImage* image1 = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);IplImage* image2 = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);cvZero(image1);cvZero(image2);vector<CvPoint>::iterator pointItr;vector<RGBPIXEL>::iterator rgbItr;if(ROIimagePoint1.size()&&ROIimagePoint2.size()){/***Process first pcture****/for(pointItr=ROIimagePoint1.begin(),rgbItr=ROIimage1.begin();pointItr != ROIimagePoint1.end(),rgbItr!=ROIimage1.end();pointItr++,rgbItr++){CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3) = (*rgbItr).R;CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3+1) = (*rgbItr).G;CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3+2) = (*rgbItr).B;}//end for/***Process second pcture****/for(pointItr=ROIimagePoint2.begin(),rgbItr=ROIimage2.begin();pointItr != ROIimagePoint2.end(),rgbItr!=ROIimage2.end();pointItr++,rgbItr++){CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3) = (*rgbItr).R;CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3+1) = (*rgbItr).G;CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3+2) = (*rgbItr).B;}//end for}//end if/********show images********/cvCvtColor(image1,image1,CV_RGB2BGR);cvCvtColor(image2,image2,CV_RGB2BGR);cvShowImage("image1",image1);cvShowImage("image2",image2);}int main(int argc, _TCHAR* argv[]){XnStatus res;vector<RGBPIXEL> ROIimage1,ROIimage2;vector<DEPTHPIXEL> ROIdepth1,ROIdepth2;vector<CvPoint> ROIimagePoint1,ROIimagePoint2;/**********Initate a My3D intance********/res = m_context.Init();res = m_imageGenerator.Create(m_context);res = m_depthGenerator.Create(m_context);ImageMetaData ImageMap;DepthMetaData DepthMap;IplImage* depthImg = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_16U,1);IplImage* image = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);//Set out modeXnMapOutputMode mapMode;mapMode.nXRes = IMAGEWIDTH; mapMode.nYRes = IMAGEHEIGTH; mapMode.nFPS = 30; res = m_imageGenerator.SetMapOutputMode(mapMode);res = m_depthGenerator.SetMapOutputMode(mapMode);/*******Test codes*************/cvNamedWindow("image1",CV_WINDOW_AUTOSIZE);cvNamedWindow("image2",CV_WINDOW_AUTOSIZE);/*********/m_depthGenerator.GetAlternativeViewPointCap().SetViewPoint(m_imageGenerator);//Generate data m_context.StartGeneratingAll(); res = m_context.WaitAndUpdateAll(); int imageCount = 0;//Count frame's number while(!m_context.WaitAndUpdateAll()) { res = m_context.WaitAndUpdateAll(); m_imageGenerator.GetMetaData(ImageMap);m_depthGenerator.GetMetaData(DepthMap);memcpy(image->imageData,ImageMap.Data(),IMAGEWIDTH*IMAGEHEIGTH*3);memcpy(depthImg->imageData,DepthMap.Data(),IMAGEWIDTH*IMAGEHEIGTH*2);/*************Get tow pictures from different time**********/if(0 == imageCount){/*************Get interested rect*************/GetROIdata(image,depthImg,ROIimage1,ROIdepth1,ROIimagePoint1);imageCount++;}else if(1 == imageCount){GetROIdata(image,depthImg,ROIimage2,ROIdepth2,ROIimagePoint2);imageCount = 0;ShowROIimage(ROIimage1,ROIimage2, ROIimagePoint1,ROIimagePoint2)/***********Clear all vector*******************/ROIimage1.erase(ROIimage1.begin(),ROIimage1.end());ROIimage2.erase(ROIimage2.begin(),ROIimage2.end());ROIdepth1.erase(ROIdepth1.begin(),ROIdepth1.end());ROIdepth2.erase(ROIdepth2.begin(),ROIdepth2.end());ROIimagePoint1.erase(ROIimagePoint1.begin(),ROIimagePoint1.end());ROIimagePoint2.erase(ROIimagePoint2.begin(),ROIimagePoint2.end());}elseimageCount++;cvWaitKey(20); }//end while m_context.StopGeneratingAll(); m_context.Shutdown(); cvReleaseImage(&depthImg); cvReleaseImage(&image); return 0;}
以上代码运行结果跟我贴的图有所区别,这是我修改过的,没有边缘检测和显示部分,最后显示的是不同时刻采集到的两幅RGB图像。当物体距离传感器1米到1.5米范围内会被显示。
程序代码就不解释了,比较基础,大家自己看吧。。。适合给刚接触Kinect的朋友们参考。
- 简单的Kinect前景抠图
- Kinect实现简单的三维重建
- Kinect实现简单的三维重建
- Kinect实现简单的三维重建
- Hello Kinect -- 一个简单的kinect 人体3D追踪程序
- 简单谈谈3D打印培训的发展和前景
- Processing 结合OpenNI开发kinect的简单笔记
- kinect 2.0:利用深度进行前景分割(c++实现)
- Kinect v2.0原理介绍之六:Kinect深度图与彩色图的坐标校准
- Kinect v1和Kinect v2的比较
- 看不到程序员的前景
- XForms的前景蓝图
- .net的前景如何?
- 股市的前景美好?
- web开发的前景
- JSF的前景如何?
- java 嵌入的前景
- 信息技术发展的前景
- query.list();卡死
- 利用VBA创建Excel新菜单
- 使用/proc测试
- 常用ajax封装的方法
- Struts 2 methodfilterinterceptor--拦截方法的拦截器
- 简单的Kinect前景抠图
- C++ 实现反射
- 内存分配函数总结
- 内存分配方式
- Windows & Linux 文件格式之迷 < VI下删除文本中的^M>
- 【openstack】Nova(Folsom)虚拟化层Driver分析
- 执行外部程序
- 布局中的Padding与Margin的区别
- 单链表