kinect2.0同分辨率采集RGB-D图像并保存,并显示人体骨架
来源:互联网 发布:怎么汉化软件 编辑:程序博客网 时间:2024/04/30 00:44
#include "opencv2/opencv.hpp"
#include <iostream>
#include <opencv2\imgproc.hpp>
#include <opencv2\calib3d.hpp>
#include <opencv2\highgui.hpp>
#include <Kinect.h>
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
#include <string>
#include <strstream>
#include <stdlib.h>
#include <WinUser.h>
#include <windows.h>
#include <conio.h>
using namespace cv;
using namespace std;
int flag_color = -1;
int flag_depth = -1;
int fps = 0;
int rgbnum = 0;
int dnum = 0;
static int flag = 1;
static int maxx = 0;
int AviColor = 1;
int AviForamt = -1;
VideoWriter rgbwriter, dwriter;
Point Head_leftup_depth, Head_leftdown_depth, Head_rightup_depth, Head_rightdown_depth;
Point Head_leftup_color, Head_leftdown_color, Head_rightup_color, Head_rightdown_color;
Mat ConvertMat(const UINT16* pBuffer, int nWidth, int nHeight)
{
Mat img(nHeight, nWidth, CV_16UC1);
ushort* p_mat = (ushort *)img.data;//指向头指针
const UINT16* pBuffer_max = pBuffer;
const int init = (int)pBuffer_max;
const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);//指向最后一个元素的指针
while (pBuffer < pBufferEnd)
{
double a = *pBuffer++;
*p_mat = a;
p_mat++;
}
return img;
}
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL)
{
pInterfaceToRelease->Release();
pInterfaceToRelease = NULL;
}
}
//定义Kinect方法类
class Kinect
{
public:
static const int cDepthWidth = 512; //深度图的大小
static const int cDepthHeight = 424;
static const int cColorWidth = 1920; //彩色图的大小
static const int cColorHeight = 1080;
int myBodyCount = 0;
Mat showImageDepth;
HRESULT InitKinect();//初始化Kinect
void UpdateDepth();//更新深度数据
void UpdateColor();//更新深度数据
void ProcessDepth(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth); //处理得到的深度图数据
void ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight); //处理得到的彩色图数据
void draw_color(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper);
void draw_depth(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper);
Kinect(); //构造函数
~Kinect(); //析构函数
private:
IKinectSensor* m_pKinectSensor;// Current Kinect
IDepthFrameReader* m_pDepthFrameReader;// Depth reader 在需要的时候可以再添加IColorFrameReader,进行color reader
RGBQUAD* m_pDepthRGBX;
IColorFrameReader* m_pColorFrameReader;// Color reader
RGBQUAD* m_pColorRGBX;
IColorFrame * myColorFrame = nullptr;
IFrameDescription * myDescription = nullptr;
IColorFrameSource * myColorSource = nullptr;
IBodyFrameSource * myBodySource = nullptr;
IBodyFrameReader * myBodyReader = nullptr;
IBodyFrame * myBodyFrame = nullptr;
ICoordinateMapper * myMapper = nullptr;
int colorHeight = 0, colorWidth = 0;
Point Head_leftup_color, Head_leftdown_color, Head_rightup_color, Head_rightdown_color;
Point Head_leftup_depth, Head_leftdown_depth, Head_rightup_depth, Head_rightdown_depth;
};
int main()
{
Kinect kinect;
Mat showImageColor;
kinect.InitKinect();
rgbwriter.open("D:\\RGBVideo.avi", CV_FOURCC('X', 'V', 'I', 'D'), 500.0, Size(538, 702), 1);
dwriter.open("D:\\DVideo.avi", CV_FOURCC('X', 'V', 'I', 'D'), 500.0, Size(538, 702), 1);
while (1)
{
kinect.UpdateColor(); //程序的中心内容,获取数据并显示
kinect.UpdateDepth();
if (waitKey(1) >= 0)
{
break;
}
while (_kbhit())
{
_getch();
flag_color = 1;
flag_depth = 1;
if (fps > 498)fps = 1;
fps++;
cout << "已截取第" << fps << "帧!" << endl;
break;
}
}
return 0;
}
Kinect::Kinect()
{
m_pKinectSensor = NULL;
m_pColorFrameReader = NULL;
m_pDepthFrameReader = NULL;
m_pDepthRGBX = new RGBQUAD[cDepthWidth * cDepthHeight];// create heap storage for color pixel data in RGBX format ,开辟一个动态存储区域
m_pColorRGBX = new RGBQUAD[cColorWidth * cColorHeight];// create heap storage for color pixel data in RGBX format
}
Kinect::~Kinect() //定义析构函数
{
if (m_pDepthRGBX)
{
delete[] m_pDepthRGBX; //删除动态存储区域
m_pDepthRGBX = NULL;
}
SafeRelease(m_pDepthFrameReader);// done with depth frame reader
if (m_pColorRGBX)
{
delete[] m_pColorRGBX;
m_pColorRGBX = NULL;
}
SafeRelease(m_pColorFrameReader);// done with color frame reader
if (m_pKinectSensor)
{
m_pKinectSensor->Close();// close the Kinect Sensor
}
SafeRelease(m_pKinectSensor);
myBodyReader->Release();
myBodySource->Release();
myMapper->Release();
myDescription->Release();
myColorSource->Release();
}
HRESULT Kinect::InitKinect() //定义初始化kinect函数
{
HRESULT hr; //typedef long HRESULT
hr = GetDefaultKinectSensor(&m_pKinectSensor); //获取默认的kinect,一般来说只有用一个kinect,所以默认的也就是唯一的那个
if (FAILED(hr)) //Failed这个函数的参数小于0的时候返回true else 返回false
{
return hr;
}
if (m_pKinectSensor)
{
// Initialize the Kinect and get the depth reader
IDepthFrameSource* pDepthFrameSource = NULL;
IColorFrameSource* pColorFrameSource = NULL;
hr = m_pKinectSensor->Open();
if (SUCCEEDED(hr))
{
hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
hr = m_pKinectSensor->get_BodyFrameSource(&myBodySource);
}
if (SUCCEEDED(hr))
{
hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader); //初始化Depth reader,传入该IDepthReader的地址,让该指针指向深度数据流
hr = myBodySource->OpenReader(&myBodyReader);
}
SafeRelease(pColorFrameSource);
SafeRelease(pDepthFrameSource);
}
if (!m_pKinectSensor || FAILED(hr))
{
printf("No ready Kinect found! \n");
return E_FAIL;
}
myBodySource->get_BodyCount(&myBodyCount);
m_pKinectSensor->get_CoordinateMapper(&myMapper);
return hr;
}
void Kinect::UpdateDepth()
{
if (!m_pDepthFrameReader)
{
return;
}
IDepthFrame* pDepthFrame = NULL;
HRESULT hr = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxDistance = 0;
UINT nBufferSize = 0;
UINT16 *pBuffer = NULL;
//Mat temp(height, width, CV_16UC1);
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
}
if (SUCCEEDED(hr))
{
// In order to see the full range of depth (including the less reliable far field depth)
// we are setting nDepthMaxDistance to the extreme potential depth threshold
nDepthMaxDistance = USHRT_MAX;
// Note: If you wish to filter by reliable depth distance, uncomment the following line.
// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
}
if (SUCCEEDED(hr))
{
ProcessDepth(pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance);
}
SafeRelease(pFrameDescription);
}
SafeRelease(pDepthFrame);
}
void Kinect::UpdateColor()
{
if (!m_pColorFrameReader)
{
return;
}
HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&myColorFrame);
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
ColorImageFormat imageFormat = ColorImageFormat_None;
UINT nBufferSize = 0;
RGBQUAD *pBuffer = NULL;
if (SUCCEEDED(hr))
{
hr = myColorFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = myColorFrame->get_RawColorImageFormat(&imageFormat);
}
if (SUCCEEDED(hr))
{
if (imageFormat == ColorImageFormat_Bgra)
{
hr = myColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&pBuffer));
}
else if (m_pColorRGBX)
{
pBuffer = m_pColorRGBX;
nBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD);
hr = myColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pBuffer), ColorImageFormat_Bgra);
}
else
{
hr = E_FAIL;
}
}
if (SUCCEEDED(hr))
{
ProcessColor(pBuffer, nWidth, nHeight);
}
SafeRelease(pFrameDescription);
}
SafeRelease(myColorFrame);
}
void Kinect::ProcessDepth(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth)
{
// Make sure we've received valid data
if (m_pDepthRGBX && pBuffer && (nWidth == cDepthWidth) && (nHeight == cDepthHeight))
{
Mat DepthImager = ConvertMat(pBuffer, nWidth, nHeight);//转换为8位的mat
Mat DepthImage(nHeight, nWidth, CV_8UC1);
DepthImager.convertTo(DepthImage, CV_8UC1, 255.0 / 4500);
while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK); //读取身体图像
IBody ** myBodyArr = new IBody *[myBodyCount]; //为存身体数据的数组做准备
Mat show = DepthImage.clone();
resize(DepthImage, show, Size(nWidth*1.436, nHeight*1.436));
Rect rect(0, 15, 702, 538);
Mat image = show(rect);
Mat copy = image.clone();
for (int i = 0; i < myBodyCount; i++)
{
myBodyArr[i] = nullptr;
}
if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, myBodyArr) == S_OK) //把身体数据输入数组
for (int i = 0; i < myBodyCount; i++)
{
BOOLEAN result = false;
if (myBodyArr[i]->get_IsTracked(&result) == S_OK && result) //先判断是否侦测到
{
Joint myJointArr[JointType_Count];
if (myBodyArr[i]->GetJoints(JointType_Count, myJointArr) == S_OK) //如果侦测到就把关节数据输入到数组并画图
{
//draw_depth(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper);
draw_depth(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper);
draw_depth(copy, myJointArr[JointType_Neck], myJointArr[JointType_SpineShoulder], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_SpineMid], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderRight], myMapper);
draw_depth(copy, myJointArr[JointType_ShoulderLeft], myJointArr[JointType_ElbowLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineMid], myJointArr[JointType_SpineBase], myMapper);
draw_depth(copy, myJointArr[JointType_ShoulderRight], myJointArr[JointType_ElbowRight], myMapper);
draw_depth(copy, myJointArr[JointType_ElbowLeft], myJointArr[JointType_WristLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipRight], myMapper);
draw_depth(copy, myJointArr[JointType_ElbowRight], myJointArr[JointType_WristRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_ThumbLeft], myMapper);
draw_depth(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_HandLeft], myMapper);
draw_depth(copy, myJointArr[JointType_HipLeft], myJointArr[JointType_KneeLeft], myMapper);
draw_depth(copy, myJointArr[JointType_HipRight], myJointArr[JointType_KneeRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristRight], myJointArr[JointType_ThumbRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristRight], myJointArr[JointType_HandRight], myMapper);
draw_depth(copy, myJointArr[JointType_HandLeft], myJointArr[JointType_HandTipLeft], myMapper);
draw_depth(copy, myJointArr[JointType_KneeLeft], myJointArr[JointType_AnkleLeft], myMapper);
draw_depth(copy, myJointArr[JointType_AnkleLeft], myJointArr[JointType_FootLeft], myMapper);
draw_depth(copy, myJointArr[JointType_KneeRight], myJointArr[JointType_AnkleRight], myMapper);
draw_depth(copy, myJointArr[JointType_AnkleRight], myJointArr[JointType_FootRight], myMapper);
draw_depth(copy, myJointArr[JointType_HandRight], myJointArr[JointType_HandTipRight], myMapper);
}
}
}
delete[]myBodyArr;
myBodyFrame->Release();
//myColorFrame->Release();
// Draw the data with OpenCV
IplImage qImg;
char xn[50];
sprintf(xn, "%d", dnum);
dnum ++;
char address[50] = "./dsave/";
strcat(address, xn);
strcat(address, ".jpg");
//cout << xn << endl;
qImg = IplImage(copy); // cv::Mat -> IplImage
cvSaveImage(address, &qImg);
imshow("Depth_Image", copy);
//cout << copy.rows << ' ' << copy.cols << endl;
//dwriter.write(copy);
if (flag_depth == 0)
{
const char *cD = "C://Users//GS//Desktop//pig//Depth.txt";
FILE *fp_Depth;
fp_Depth = fopen(cD, "w");
for (int i = 0; i < copy.rows; i++)
{
int huanhang = 0;
uchar *ptr = copy.ptr<uchar>(i);
for (int j = 0; j < copy.cols; j++)
{
if (i < Head_leftup_depth.y || i > Head_rightdown_depth.y || j < Head_leftup_depth.x || j > Head_rightup_depth.x)huanhang++;
else
{
fprintf(fp_Depth, "%d ", ptr[4 * j + 0]);
}
}
if (huanhang != copy.cols)
fprintf(fp_Depth, "\n");
}
fclose(fp_Depth);
flag_depth--;
}
}
}
void Kinect::ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight)
{
// Make sure we've received valid data
if (pBuffer && (nWidth == cColorWidth) && (nHeight == cColorHeight))
{
Mat ColorImage(nHeight, nWidth, CV_8UC4, pBuffer);
while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK); //读取身体图像
IBody ** myBodyArr = new IBody *[myBodyCount]; //为存身体数据的数组做准备、
Mat showImage = ColorImage.clone();
resize(ColorImage, showImage, Size(nWidth / 2, nHeight / 2));
Rect rect(150, 0, 702, 538);
Mat image_roi = showImage(rect);
Mat copy = image_roi.clone();
for (int i = 0; i < myBodyCount; i++)
{
myBodyArr[i] = nullptr;
}
if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, myBodyArr) == S_OK) //把身体数据输入数组
for (int i = 0; i < myBodyCount; i++)
{
BOOLEAN result = false;
if (myBodyArr[i]->get_IsTracked(&result) == S_OK && result) //先判断是否侦测到
{
Joint myJointArr[JointType_Count];
if (myBodyArr[i]->GetJoints(JointType_Count, myJointArr) == S_OK) //如果侦测到就把关节数据输入到数组并画图
{
draw_depth(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper);
draw_depth(copy, myJointArr[JointType_Neck], myJointArr[JointType_SpineShoulder], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_SpineMid], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderRight], myMapper);
draw_depth(copy, myJointArr[JointType_ShoulderLeft], myJointArr[JointType_ElbowLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineMid], myJointArr[JointType_SpineBase], myMapper);
draw_depth(copy, myJointArr[JointType_ShoulderRight], myJointArr[JointType_ElbowRight], myMapper);
draw_depth(copy, myJointArr[JointType_ElbowLeft], myJointArr[JointType_WristLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipRight], myMapper);
draw_depth(copy, myJointArr[JointType_ElbowRight], myJointArr[JointType_WristRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_ThumbLeft], myMapper);
draw_depth(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_HandLeft], myMapper);
draw_depth(copy, myJointArr[JointType_HipLeft], myJointArr[JointType_KneeLeft], myMapper);
draw_depth(copy, myJointArr[JointType_HipRight], myJointArr[JointType_KneeRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristRight], myJointArr[JointType_ThumbRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristRight], myJointArr[JointType_HandRight], myMapper);
draw_depth(copy, myJointArr[JointType_HandLeft], myJointArr[JointType_HandTipLeft], myMapper);
draw_depth(copy, myJointArr[JointType_KneeLeft], myJointArr[JointType_AnkleLeft], myMapper);
draw_depth(copy, myJointArr[JointType_AnkleLeft], myJointArr[JointType_FootLeft], myMapper);
draw_depth(copy, myJointArr[JointType_KneeRight], myJointArr[JointType_AnkleRight], myMapper);
draw_depth(copy, myJointArr[JointType_AnkleRight], myJointArr[JointType_FootRight], myMapper);
draw_depth(copy, myJointArr[JointType_HandRight], myJointArr[JointType_HandTipRight], myMapper);
}
}
}
delete[]myBodyArr;
myBodyFrame->Release();
//读取彩色图像并输出到矩阵
//imwrite("E:\rgbdsave\rgbsave\rgbnum.jpg", copy);
IplImage qImg;
char xn[50];
sprintf(xn, "%d", rgbnum);
rgbnum ++;
char address[50] = "./rgbsave/";
strcat(address, xn);
strcat(address, ".jpg");
cout << xn << endl;
qImg = IplImage(copy); // cv::Mat -> IplImage
cvSaveImage(address, &qImg);
imshow("Color_Image", copy);
//waitKey(200);
//rgbwriter.write(copy);
if (flag_color == 0)
{
const char *cR = "C://Users//GS//Desktop//pig//Color_R.txt";
const char *cG = "C://Users//GS//Desktop//pig//Color_G.txt";
const char *cB = "C://Users//GS//Desktop//pig//Color_B.txt";
FILE *fp_R, *fp_G, *fp_B;
fp_R = fopen(cR, "w");
fp_G = fopen(cG, "w");
fp_B = fopen(cB, "w");
for (int i = 0; i < copy.rows; i++)
{
int huanhang = 0;
uchar *ptr = copy.ptr<uchar>(i);
for (int j = 0; j < copy.cols; j++)
{
if (i < Head_leftup_color.y || i > Head_rightdown_color.y || j < Head_leftup_color.x || j > Head_rightup_color.x)huanhang++;
else
{
fprintf(fp_B, "%d ", ptr[4 * j + 0]);
fprintf(fp_G, "%d ", ptr[4 * j + 1]);
fprintf(fp_R, "%d ", ptr[4 * j + 2]);
}
}
if (huanhang != copy.cols)
{
fprintf(fp_B, "\n");
fprintf(fp_G, "\n");
fprintf(fp_R, "\n");
}
}
fclose(fp_R);
fclose(fp_G);
fclose(fp_B);
flag_color--;
}
}
}
void Kinect::draw_color(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper)
{
//用两个关节点来做线段的两端,并且进行状态过滤
if (r_1.TrackingState == TrackingState_Tracked && r_2.TrackingState == TrackingState_Tracked)
{
ColorSpacePoint t_point; //要把关节点用的摄像机坐标下的点转换成彩色空间的点
Point p_1, p_2;
myMapper->MapCameraPointToColorSpace(r_1.Position, &t_point);
p_1.x = t_point.X / 2 - 150;
p_1.y = t_point.Y / 2;
myMapper->MapCameraPointToColorSpace(r_2.Position, &t_point);
p_2.x = t_point.X / 2 - 150;
p_2.y = t_point.Y / 2;
/*Point p1, p2, p3, p4;
int d = p_2.y - p_1.y;
p1.x = p_1.x - d;
p1.y = p_1.y - d;
p2.x = p_1.x + d;
p2.y = p_1.y - d;
p4.x = p_1.x - d;
p4.y = p_1.y + d;
p3.x = p_1.x + d;
p3.y = p_1.y + d;
Head_leftup_color = p1;
Head_leftdown_color = p4;
Head_rightup_color = p2;
Head_rightdown_color = p3;*/
/*for (int i = 0; i < img.rows; i++)
{
uchar *ptr = img.ptr<uchar>(i);
for (int j = 0; j < img.cols; j++)
{
if (flag_color >= 0)
{
if (i < p1.y || i > p3.y || j < p1.x || j > p2.x)
img.at <Vec4b>(i, j) = Vec4b(0, 0, 0, 0);
}
else
{
if ((i >= p1.y - 5 && i <= p1.y && j >= p1.x && j <= p2.x) || (i >= p4.y && i <= p4.y + 5 && j >= p4.x && j <= p3.x) || (j >= p1.x - 5 && j <= p1.x && i >= p1.y && i <= p4.y) || (j >= p2.x && j <= p2.x + 5 && i >= p2.y && i <= p3.y))
img.at <Vec4b>(i, j) = Vec4b(0, 255, 0, 0);
}
}
}*/
line(img, p_1, p_2, cvScalar(255,0,0), 8, 8);
flag_color--;
}
}
void Kinect::draw_depth(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper)
{
//用两个关节点来做线段的两端,并且进行状态过滤
if (r_1.TrackingState == TrackingState_Tracked && r_2.TrackingState == TrackingState_Tracked)
{
DepthSpacePoint t_point; //要把关节点用的摄像机坐标下的点转换成彩色空间的点
Point p_1, p_2;
myMapper->MapCameraPointToDepthSpace(r_1.Position, &t_point);
p_1.x = t_point.X * 1.436;
p_1.y = t_point.Y * 1.436 - 15;
myMapper->MapCameraPointToDepthSpace(r_2.Position, &t_point);
p_2.x = t_point.X * 1.436;
p_2.y = t_point.Y * 1.436 - 15;
/*Point p1, p2, p3, p4;
int d = p_2.y - p_1.y;
p1.x = p_1.x - d;
p1.y = p_1.y - d;
p2.x = p_1.x + d;
p2.y = p_1.y - d;
p4.x = p_1.x - d;
p4.y = p_1.y + d;
p3.x = p_1.x + d;
p3.y = p_1.y + d;
Head_leftup_depth = p1;
Head_leftdown_depth = p4;
Head_rightup_depth = p2;
Head_rightdown_depth = p3;*/
/*for (int i = 0; i < img.rows; i++)
{
uchar *ptr = img.ptr<uchar>(i);
for (int j = 0; j < img.cols; j++)
{
if (flag_depth >= 0)
{
if (i < p1.y || i > p3.y || j < p1.x || j > p2.x)
ptr[j] = 0;
}
else
{
if ((i >= p1.y - 5 && i <= p1.y && j >= p1.x && j <= p2.x) || (i >= p4.y && i <= p4.y + 5 && j >= p4.x && j <= p3.x) || (j >= p1.x - 5 && j <= p1.x && i >= p1.y && i <= p4.y) || (j >= p2.x && j <= p2.x + 5 && i >= p2.y && i <= p3.y))
ptr[j] = 65534;
}
}
}*/
line(img, p_1, p_2, cvScalar(65534), 8, 8);
flag_depth--;
}
}
#include <iostream>
#include <opencv2\imgproc.hpp>
#include <opencv2\calib3d.hpp>
#include <opencv2\highgui.hpp>
#include <Kinect.h>
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
#include <string>
#include <strstream>
#include <stdlib.h>
#include <WinUser.h>
#include <windows.h>
#include <conio.h>
using namespace cv;
using namespace std;
int flag_color = -1;
int flag_depth = -1;
int fps = 0;
int rgbnum = 0;
int dnum = 0;
static int flag = 1;
static int maxx = 0;
int AviColor = 1;
int AviForamt = -1;
VideoWriter rgbwriter, dwriter;
Point Head_leftup_depth, Head_leftdown_depth, Head_rightup_depth, Head_rightdown_depth;
Point Head_leftup_color, Head_leftdown_color, Head_rightup_color, Head_rightdown_color;
Mat ConvertMat(const UINT16* pBuffer, int nWidth, int nHeight)
{
Mat img(nHeight, nWidth, CV_16UC1);
ushort* p_mat = (ushort *)img.data;//指向头指针
const UINT16* pBuffer_max = pBuffer;
const int init = (int)pBuffer_max;
const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);//指向最后一个元素的指针
while (pBuffer < pBufferEnd)
{
double a = *pBuffer++;
*p_mat = a;
p_mat++;
}
return img;
}
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL)
{
pInterfaceToRelease->Release();
pInterfaceToRelease = NULL;
}
}
//定义Kinect方法类
class Kinect
{
public:
static const int cDepthWidth = 512; //深度图的大小
static const int cDepthHeight = 424;
static const int cColorWidth = 1920; //彩色图的大小
static const int cColorHeight = 1080;
int myBodyCount = 0;
Mat showImageDepth;
HRESULT InitKinect();//初始化Kinect
void UpdateDepth();//更新深度数据
void UpdateColor();//更新深度数据
void ProcessDepth(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth); //处理得到的深度图数据
void ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight); //处理得到的彩色图数据
void draw_color(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper);
void draw_depth(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper);
Kinect(); //构造函数
~Kinect(); //析构函数
private:
IKinectSensor* m_pKinectSensor;// Current Kinect
IDepthFrameReader* m_pDepthFrameReader;// Depth reader 在需要的时候可以再添加IColorFrameReader,进行color reader
RGBQUAD* m_pDepthRGBX;
IColorFrameReader* m_pColorFrameReader;// Color reader
RGBQUAD* m_pColorRGBX;
IColorFrame * myColorFrame = nullptr;
IFrameDescription * myDescription = nullptr;
IColorFrameSource * myColorSource = nullptr;
IBodyFrameSource * myBodySource = nullptr;
IBodyFrameReader * myBodyReader = nullptr;
IBodyFrame * myBodyFrame = nullptr;
ICoordinateMapper * myMapper = nullptr;
int colorHeight = 0, colorWidth = 0;
Point Head_leftup_color, Head_leftdown_color, Head_rightup_color, Head_rightdown_color;
Point Head_leftup_depth, Head_leftdown_depth, Head_rightup_depth, Head_rightdown_depth;
};
int main()
{
Kinect kinect;
Mat showImageColor;
kinect.InitKinect();
rgbwriter.open("D:\\RGBVideo.avi", CV_FOURCC('X', 'V', 'I', 'D'), 500.0, Size(538, 702), 1);
dwriter.open("D:\\DVideo.avi", CV_FOURCC('X', 'V', 'I', 'D'), 500.0, Size(538, 702), 1);
while (1)
{
kinect.UpdateColor(); //程序的中心内容,获取数据并显示
kinect.UpdateDepth();
if (waitKey(1) >= 0)
{
break;
}
while (_kbhit())
{
_getch();
flag_color = 1;
flag_depth = 1;
if (fps > 498)fps = 1;
fps++;
cout << "已截取第" << fps << "帧!" << endl;
break;
}
}
return 0;
}
Kinect::Kinect()
{
m_pKinectSensor = NULL;
m_pColorFrameReader = NULL;
m_pDepthFrameReader = NULL;
m_pDepthRGBX = new RGBQUAD[cDepthWidth * cDepthHeight];// create heap storage for color pixel data in RGBX format ,开辟一个动态存储区域
m_pColorRGBX = new RGBQUAD[cColorWidth * cColorHeight];// create heap storage for color pixel data in RGBX format
}
Kinect::~Kinect() //定义析构函数
{
if (m_pDepthRGBX)
{
delete[] m_pDepthRGBX; //删除动态存储区域
m_pDepthRGBX = NULL;
}
SafeRelease(m_pDepthFrameReader);// done with depth frame reader
if (m_pColorRGBX)
{
delete[] m_pColorRGBX;
m_pColorRGBX = NULL;
}
SafeRelease(m_pColorFrameReader);// done with color frame reader
if (m_pKinectSensor)
{
m_pKinectSensor->Close();// close the Kinect Sensor
}
SafeRelease(m_pKinectSensor);
myBodyReader->Release();
myBodySource->Release();
myMapper->Release();
myDescription->Release();
myColorSource->Release();
}
HRESULT Kinect::InitKinect() //定义初始化kinect函数
{
HRESULT hr; //typedef long HRESULT
hr = GetDefaultKinectSensor(&m_pKinectSensor); //获取默认的kinect,一般来说只有用一个kinect,所以默认的也就是唯一的那个
if (FAILED(hr)) //Failed这个函数的参数小于0的时候返回true else 返回false
{
return hr;
}
if (m_pKinectSensor)
{
// Initialize the Kinect and get the depth reader
IDepthFrameSource* pDepthFrameSource = NULL;
IColorFrameSource* pColorFrameSource = NULL;
hr = m_pKinectSensor->Open();
if (SUCCEEDED(hr))
{
hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
hr = m_pKinectSensor->get_BodyFrameSource(&myBodySource);
}
if (SUCCEEDED(hr))
{
hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader); //初始化Depth reader,传入该IDepthReader的地址,让该指针指向深度数据流
hr = myBodySource->OpenReader(&myBodyReader);
}
SafeRelease(pColorFrameSource);
SafeRelease(pDepthFrameSource);
}
if (!m_pKinectSensor || FAILED(hr))
{
printf("No ready Kinect found! \n");
return E_FAIL;
}
myBodySource->get_BodyCount(&myBodyCount);
m_pKinectSensor->get_CoordinateMapper(&myMapper);
return hr;
}
void Kinect::UpdateDepth()
{
if (!m_pDepthFrameReader)
{
return;
}
IDepthFrame* pDepthFrame = NULL;
HRESULT hr = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxDistance = 0;
UINT nBufferSize = 0;
UINT16 *pBuffer = NULL;
//Mat temp(height, width, CV_16UC1);
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
}
if (SUCCEEDED(hr))
{
// In order to see the full range of depth (including the less reliable far field depth)
// we are setting nDepthMaxDistance to the extreme potential depth threshold
nDepthMaxDistance = USHRT_MAX;
// Note: If you wish to filter by reliable depth distance, uncomment the following line.
// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
}
if (SUCCEEDED(hr))
{
ProcessDepth(pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance);
}
SafeRelease(pFrameDescription);
}
SafeRelease(pDepthFrame);
}
void Kinect::UpdateColor()
{
if (!m_pColorFrameReader)
{
return;
}
HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&myColorFrame);
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
ColorImageFormat imageFormat = ColorImageFormat_None;
UINT nBufferSize = 0;
RGBQUAD *pBuffer = NULL;
if (SUCCEEDED(hr))
{
hr = myColorFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = myColorFrame->get_RawColorImageFormat(&imageFormat);
}
if (SUCCEEDED(hr))
{
if (imageFormat == ColorImageFormat_Bgra)
{
hr = myColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&pBuffer));
}
else if (m_pColorRGBX)
{
pBuffer = m_pColorRGBX;
nBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD);
hr = myColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pBuffer), ColorImageFormat_Bgra);
}
else
{
hr = E_FAIL;
}
}
if (SUCCEEDED(hr))
{
ProcessColor(pBuffer, nWidth, nHeight);
}
SafeRelease(pFrameDescription);
}
SafeRelease(myColorFrame);
}
void Kinect::ProcessDepth(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth)
{
// Make sure we've received valid data
if (m_pDepthRGBX && pBuffer && (nWidth == cDepthWidth) && (nHeight == cDepthHeight))
{
Mat DepthImager = ConvertMat(pBuffer, nWidth, nHeight);//转换为8位的mat
Mat DepthImage(nHeight, nWidth, CV_8UC1);
DepthImager.convertTo(DepthImage, CV_8UC1, 255.0 / 4500);
while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK); //读取身体图像
IBody ** myBodyArr = new IBody *[myBodyCount]; //为存身体数据的数组做准备
Mat show = DepthImage.clone();
resize(DepthImage, show, Size(nWidth*1.436, nHeight*1.436));
Rect rect(0, 15, 702, 538);
Mat image = show(rect);
Mat copy = image.clone();
for (int i = 0; i < myBodyCount; i++)
{
myBodyArr[i] = nullptr;
}
if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, myBodyArr) == S_OK) //把身体数据输入数组
for (int i = 0; i < myBodyCount; i++)
{
BOOLEAN result = false;
if (myBodyArr[i]->get_IsTracked(&result) == S_OK && result) //先判断是否侦测到
{
Joint myJointArr[JointType_Count];
if (myBodyArr[i]->GetJoints(JointType_Count, myJointArr) == S_OK) //如果侦测到就把关节数据输入到数组并画图
{
//draw_depth(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper);
draw_depth(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper);
draw_depth(copy, myJointArr[JointType_Neck], myJointArr[JointType_SpineShoulder], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_SpineMid], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderRight], myMapper);
draw_depth(copy, myJointArr[JointType_ShoulderLeft], myJointArr[JointType_ElbowLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineMid], myJointArr[JointType_SpineBase], myMapper);
draw_depth(copy, myJointArr[JointType_ShoulderRight], myJointArr[JointType_ElbowRight], myMapper);
draw_depth(copy, myJointArr[JointType_ElbowLeft], myJointArr[JointType_WristLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipRight], myMapper);
draw_depth(copy, myJointArr[JointType_ElbowRight], myJointArr[JointType_WristRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_ThumbLeft], myMapper);
draw_depth(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_HandLeft], myMapper);
draw_depth(copy, myJointArr[JointType_HipLeft], myJointArr[JointType_KneeLeft], myMapper);
draw_depth(copy, myJointArr[JointType_HipRight], myJointArr[JointType_KneeRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristRight], myJointArr[JointType_ThumbRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristRight], myJointArr[JointType_HandRight], myMapper);
draw_depth(copy, myJointArr[JointType_HandLeft], myJointArr[JointType_HandTipLeft], myMapper);
draw_depth(copy, myJointArr[JointType_KneeLeft], myJointArr[JointType_AnkleLeft], myMapper);
draw_depth(copy, myJointArr[JointType_AnkleLeft], myJointArr[JointType_FootLeft], myMapper);
draw_depth(copy, myJointArr[JointType_KneeRight], myJointArr[JointType_AnkleRight], myMapper);
draw_depth(copy, myJointArr[JointType_AnkleRight], myJointArr[JointType_FootRight], myMapper);
draw_depth(copy, myJointArr[JointType_HandRight], myJointArr[JointType_HandTipRight], myMapper);
}
}
}
delete[]myBodyArr;
myBodyFrame->Release();
//myColorFrame->Release();
// Draw the data with OpenCV
IplImage qImg;
char xn[50];
sprintf(xn, "%d", dnum);
dnum ++;
char address[50] = "./dsave/";
strcat(address, xn);
strcat(address, ".jpg");
//cout << xn << endl;
qImg = IplImage(copy); // cv::Mat -> IplImage
cvSaveImage(address, &qImg);
imshow("Depth_Image", copy);
//cout << copy.rows << ' ' << copy.cols << endl;
//dwriter.write(copy);
if (flag_depth == 0)
{
const char *cD = "C://Users//GS//Desktop//pig//Depth.txt";
FILE *fp_Depth;
fp_Depth = fopen(cD, "w");
for (int i = 0; i < copy.rows; i++)
{
int huanhang = 0;
uchar *ptr = copy.ptr<uchar>(i);
for (int j = 0; j < copy.cols; j++)
{
if (i < Head_leftup_depth.y || i > Head_rightdown_depth.y || j < Head_leftup_depth.x || j > Head_rightup_depth.x)huanhang++;
else
{
fprintf(fp_Depth, "%d ", ptr[4 * j + 0]);
}
}
if (huanhang != copy.cols)
fprintf(fp_Depth, "\n");
}
fclose(fp_Depth);
flag_depth--;
}
}
}
void Kinect::ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight)
{
// Make sure we've received valid data
if (pBuffer && (nWidth == cColorWidth) && (nHeight == cColorHeight))
{
Mat ColorImage(nHeight, nWidth, CV_8UC4, pBuffer);
while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK); //读取身体图像
IBody ** myBodyArr = new IBody *[myBodyCount]; //为存身体数据的数组做准备、
Mat showImage = ColorImage.clone();
resize(ColorImage, showImage, Size(nWidth / 2, nHeight / 2));
Rect rect(150, 0, 702, 538);
Mat image_roi = showImage(rect);
Mat copy = image_roi.clone();
for (int i = 0; i < myBodyCount; i++)
{
myBodyArr[i] = nullptr;
}
if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, myBodyArr) == S_OK) //把身体数据输入数组
for (int i = 0; i < myBodyCount; i++)
{
BOOLEAN result = false;
if (myBodyArr[i]->get_IsTracked(&result) == S_OK && result) //先判断是否侦测到
{
Joint myJointArr[JointType_Count];
if (myBodyArr[i]->GetJoints(JointType_Count, myJointArr) == S_OK) //如果侦测到就把关节数据输入到数组并画图
{
draw_depth(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper);
draw_depth(copy, myJointArr[JointType_Neck], myJointArr[JointType_SpineShoulder], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_SpineMid], myMapper);
draw_depth(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderRight], myMapper);
draw_depth(copy, myJointArr[JointType_ShoulderLeft], myJointArr[JointType_ElbowLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineMid], myJointArr[JointType_SpineBase], myMapper);
draw_depth(copy, myJointArr[JointType_ShoulderRight], myJointArr[JointType_ElbowRight], myMapper);
draw_depth(copy, myJointArr[JointType_ElbowLeft], myJointArr[JointType_WristLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipLeft], myMapper);
draw_depth(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipRight], myMapper);
draw_depth(copy, myJointArr[JointType_ElbowRight], myJointArr[JointType_WristRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_ThumbLeft], myMapper);
draw_depth(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_HandLeft], myMapper);
draw_depth(copy, myJointArr[JointType_HipLeft], myJointArr[JointType_KneeLeft], myMapper);
draw_depth(copy, myJointArr[JointType_HipRight], myJointArr[JointType_KneeRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristRight], myJointArr[JointType_ThumbRight], myMapper);
draw_depth(copy, myJointArr[JointType_WristRight], myJointArr[JointType_HandRight], myMapper);
draw_depth(copy, myJointArr[JointType_HandLeft], myJointArr[JointType_HandTipLeft], myMapper);
draw_depth(copy, myJointArr[JointType_KneeLeft], myJointArr[JointType_AnkleLeft], myMapper);
draw_depth(copy, myJointArr[JointType_AnkleLeft], myJointArr[JointType_FootLeft], myMapper);
draw_depth(copy, myJointArr[JointType_KneeRight], myJointArr[JointType_AnkleRight], myMapper);
draw_depth(copy, myJointArr[JointType_AnkleRight], myJointArr[JointType_FootRight], myMapper);
draw_depth(copy, myJointArr[JointType_HandRight], myJointArr[JointType_HandTipRight], myMapper);
}
}
}
delete[]myBodyArr;
myBodyFrame->Release();
//读取彩色图像并输出到矩阵
//imwrite("E:\rgbdsave\rgbsave\rgbnum.jpg", copy);
IplImage qImg;
char xn[50];
sprintf(xn, "%d", rgbnum);
rgbnum ++;
char address[50] = "./rgbsave/";
strcat(address, xn);
strcat(address, ".jpg");
cout << xn << endl;
qImg = IplImage(copy); // cv::Mat -> IplImage
cvSaveImage(address, &qImg);
imshow("Color_Image", copy);
//waitKey(200);
//rgbwriter.write(copy);
if (flag_color == 0)
{
const char *cR = "C://Users//GS//Desktop//pig//Color_R.txt";
const char *cG = "C://Users//GS//Desktop//pig//Color_G.txt";
const char *cB = "C://Users//GS//Desktop//pig//Color_B.txt";
FILE *fp_R, *fp_G, *fp_B;
fp_R = fopen(cR, "w");
fp_G = fopen(cG, "w");
fp_B = fopen(cB, "w");
for (int i = 0; i < copy.rows; i++)
{
int huanhang = 0;
uchar *ptr = copy.ptr<uchar>(i);
for (int j = 0; j < copy.cols; j++)
{
if (i < Head_leftup_color.y || i > Head_rightdown_color.y || j < Head_leftup_color.x || j > Head_rightup_color.x)huanhang++;
else
{
fprintf(fp_B, "%d ", ptr[4 * j + 0]);
fprintf(fp_G, "%d ", ptr[4 * j + 1]);
fprintf(fp_R, "%d ", ptr[4 * j + 2]);
}
}
if (huanhang != copy.cols)
{
fprintf(fp_B, "\n");
fprintf(fp_G, "\n");
fprintf(fp_R, "\n");
}
}
fclose(fp_R);
fclose(fp_G);
fclose(fp_B);
flag_color--;
}
}
}
void Kinect::draw_color(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper)
{
//用两个关节点来做线段的两端,并且进行状态过滤
if (r_1.TrackingState == TrackingState_Tracked && r_2.TrackingState == TrackingState_Tracked)
{
ColorSpacePoint t_point; //要把关节点用的摄像机坐标下的点转换成彩色空间的点
Point p_1, p_2;
myMapper->MapCameraPointToColorSpace(r_1.Position, &t_point);
p_1.x = t_point.X / 2 - 150;
p_1.y = t_point.Y / 2;
myMapper->MapCameraPointToColorSpace(r_2.Position, &t_point);
p_2.x = t_point.X / 2 - 150;
p_2.y = t_point.Y / 2;
/*Point p1, p2, p3, p4;
int d = p_2.y - p_1.y;
p1.x = p_1.x - d;
p1.y = p_1.y - d;
p2.x = p_1.x + d;
p2.y = p_1.y - d;
p4.x = p_1.x - d;
p4.y = p_1.y + d;
p3.x = p_1.x + d;
p3.y = p_1.y + d;
Head_leftup_color = p1;
Head_leftdown_color = p4;
Head_rightup_color = p2;
Head_rightdown_color = p3;*/
/*for (int i = 0; i < img.rows; i++)
{
uchar *ptr = img.ptr<uchar>(i);
for (int j = 0; j < img.cols; j++)
{
if (flag_color >= 0)
{
if (i < p1.y || i > p3.y || j < p1.x || j > p2.x)
img.at <Vec4b>(i, j) = Vec4b(0, 0, 0, 0);
}
else
{
if ((i >= p1.y - 5 && i <= p1.y && j >= p1.x && j <= p2.x) || (i >= p4.y && i <= p4.y + 5 && j >= p4.x && j <= p3.x) || (j >= p1.x - 5 && j <= p1.x && i >= p1.y && i <= p4.y) || (j >= p2.x && j <= p2.x + 5 && i >= p2.y && i <= p3.y))
img.at <Vec4b>(i, j) = Vec4b(0, 255, 0, 0);
}
}
}*/
line(img, p_1, p_2, cvScalar(255,0,0), 8, 8);
flag_color--;
}
}
void Kinect::draw_depth(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper)
{
//用两个关节点来做线段的两端,并且进行状态过滤
if (r_1.TrackingState == TrackingState_Tracked && r_2.TrackingState == TrackingState_Tracked)
{
DepthSpacePoint t_point; //要把关节点用的摄像机坐标下的点转换成彩色空间的点
Point p_1, p_2;
myMapper->MapCameraPointToDepthSpace(r_1.Position, &t_point);
p_1.x = t_point.X * 1.436;
p_1.y = t_point.Y * 1.436 - 15;
myMapper->MapCameraPointToDepthSpace(r_2.Position, &t_point);
p_2.x = t_point.X * 1.436;
p_2.y = t_point.Y * 1.436 - 15;
/*Point p1, p2, p3, p4;
int d = p_2.y - p_1.y;
p1.x = p_1.x - d;
p1.y = p_1.y - d;
p2.x = p_1.x + d;
p2.y = p_1.y - d;
p4.x = p_1.x - d;
p4.y = p_1.y + d;
p3.x = p_1.x + d;
p3.y = p_1.y + d;
Head_leftup_depth = p1;
Head_leftdown_depth = p4;
Head_rightup_depth = p2;
Head_rightdown_depth = p3;*/
/*for (int i = 0; i < img.rows; i++)
{
uchar *ptr = img.ptr<uchar>(i);
for (int j = 0; j < img.cols; j++)
{
if (flag_depth >= 0)
{
if (i < p1.y || i > p3.y || j < p1.x || j > p2.x)
ptr[j] = 0;
}
else
{
if ((i >= p1.y - 5 && i <= p1.y && j >= p1.x && j <= p2.x) || (i >= p4.y && i <= p4.y + 5 && j >= p4.x && j <= p3.x) || (j >= p1.x - 5 && j <= p1.x && i >= p1.y && i <= p4.y) || (j >= p2.x && j <= p2.x + 5 && i >= p2.y && i <= p3.y))
ptr[j] = 65534;
}
}
}*/
line(img, p_1, p_2, cvScalar(65534), 8, 8);
flag_depth--;
}
}
0 0
- kinect2.0同分辨率采集RGB-D图像并保存,并显示人体骨架
- matlab显示并保存RGB图像的单个通道图像
- ROS下订阅topic,显示并保存Kinect(Xtion pro live )深度摄像机的RGB图像
- Python OpenCV显示图像并保存图像
- opencv2.x遍历文件夹改变图像分辨率并保存
- 利用OpenCV读入,显示并保存图像
- Vtk读取并显示保存图像
- Qt环境下利用OpenCV采集摄像头图像并保存
- 使用opencv提取RGB图像的三个通道,并显示
- android 采集摄像头帧图像数据并显示
- PHP采集图片并保存
- PHP采集图片并保存
- 彩色(RGB/HSV)&灰度 图像像素值读取并保存到txt文件
- 利用opencv将录制的rgb图像转化为yuv文件并保存。
- OpenCV读取RGB图像像素值,并保存到txt中
- OpenCV 读RGB图像然后转换成灰度并保存成灰度图
- 读取并显示图像
- 使用 Video4Linux 采集USB摄像头的图像,并保存到一张 jpg图像文件中的程序
- Redis
- 关于图像处理时傅里叶谱和相的一点思考
- GPG入门教程
- 浅谈继承的那些细节知识点--java中继承的简单应用。
- matlab正则表达式的简单表示
- kinect2.0同分辨率采集RGB-D图像并保存,并显示人体骨架
- Ubuntu开发环境搭建(2)【安装arm-linux-gcc-4.4.3交叉编译环境】
- LeetCode:Relative Ranks
- 排序--插入排序
- redis与memcached比较
- 1089.Insert or Merge (25)...to be continued...
- Crackme 30
- MongoDB文档翻译-mongo Shell-给mongo Shell编写脚本
- MyBatis:Parameter Maps collection does not contain value for 的问题解决