Kinect获取 骨骼图骨骼三维坐标
该项目需要我们利用KinectV2 获取骨骼图并得到头部坐标,每一秒取一帧,一共五分钟,将坐标输出。
KinectV2 可以获得色彩图、深度图、骨骼图等等,其实一开始我也不知道该从哪里下手,但对骨骼图的代码进行了一下分析找到了突破点。
对KinectV2大体了解:
Kinect一共有三个坐标空间,分别是相机空间、深度空间、色彩空间,相机空间对应骨骼图等,深度空间对应深度图,色彩空间对应色彩图,并且KInectSDK中有CoordinateMapper类来实现深度空间到相机空间和深度空间到色彩空间的转换。
具体查看官方文档:https://docs.microsoft.com/en-us/previous-versions/windows/kinect/dn785530(v=ieb.10)
重点需要关心获取的帧的数据如何存储的,对于骨骼图,SDK中有两个重要的类分别是IBody 和 Joint 分别代表人和骨骼点。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
| IBody : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE GetJoints( _Pre_equal_to_(JointType_Count) UINT capacity, _Out_writes_all_(capacity) Joint *joints) = 0; ... virtual HRESULT STDMETHODCALLTYPE get_IsTracked( _Out_ BOOLEAN *tracked) = 0; ... };
|
分别利用GetJoints函数和get_IsTracked函数可以获得Joints和判断是否被记录到了。
对于Joints
1 2 3 4 5 6 7 8 9 10
| #ifndef _Joint_ #define _Joint_ typedef struct _Joint { JointType JointType; CameraSpacePoint Position; TrackingState TrackingState; } Joint;
#endif
|
一个struct 里面有Joint类型、相机空间位置、追踪状态(是否被观测到),接着往里看CameSpacePoint
1 2 3 4 5 6 7 8 9 10
| #ifndef _CameraSpacePoint_ #define _CameraSpacePoint_ typedef struct _CameraSpacePoint { float X; float Y; float Z; } CameraSpacePoint;
#endif
|
到这里就很清楚了,直接就可以得到相机空间地址了。(本来我以为需要先获得深度图地址,然后自己进行坐标计算的。。。)
然后SDK还定义了一下常量和其他类,在这里贴一下以防看代码的时候不懂
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
| enum _JointType { JointType_SpineBase = 0, JointType_SpineMid = 1, JointType_Neck = 2, JointType_Head = 3, JointType_ShoulderLeft = 4, JointType_ElbowLeft = 5, JointType_WristLeft = 6, JointType_HandLeft = 7, JointType_ShoulderRight = 8, JointType_ElbowRight = 9, JointType_WristRight = 10, JointType_HandRight = 11, JointType_HipLeft = 12, JointType_KneeLeft = 13, JointType_AnkleLeft = 14, JointType_FootLeft = 15, JointType_HipRight = 16, JointType_KneeRight = 17, JointType_AnkleRight = 18, JointType_FootRight = 19, JointType_SpineShoulder = 20, JointType_HandTipLeft = 21, JointType_ThumbLeft = 22, JointType_HandTipRight = 23, JointType_ThumbRight = 24, JointType_Count = ( JointType_ThumbRight + 1 ) } ; #endif
#ifndef _TrackingState_ #define _TrackingState_ typedef enum _TrackingState TrackingState;
enum _TrackingState { TrackingState_NotTracked = 0, TrackingState_Inferred = 1, TrackingState_Tracked = 2 } ; #endif
IBodyFrame : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE GetAndRefreshBodyData( UINT capacity, _Inout_updates_all_(capacity) IBody **bodies) = 0; virtual HRESULT STDMETHODCALLTYPE get_FloorClipPlane( _Out_ Vector4 *floorClipPlane) = 0; virtual HRESULT STDMETHODCALLTYPE get_RelativeTime( _Out_ TIMESPAN *relativeTime) = 0; virtual HRESULT STDMETHODCALLTYPE get_BodyFrameSource( _COM_Outptr_ IBodyFrameSource **bodyFrameSource) = 0; };
|
下面是项目代码的详细步骤:
第一步需要安装Kinect 的SDK,然后再Visual Studio进行项目配置,具体可以参考一下文章:
https://blog.csdn.net/lizhiguo18/article/details/51037672
https://blog.csdn.net/weixin_43850620/article/details/103874847
对项目配置好后就可以写代码啦。
1.这一步应该是获取Kinect,然后打开。
1 2 3
| IKinectSensor * mySensor = nullptr; GetDefaultKinectSensor(&mySensor); mySensor->Open();
|
2.准备读取ColorFrame
1 2 3 4 5 6 7 8 9 10 11
| IColorFrameSource * myColorSource = nullptr; mySensor->get_ColorFrameSource(&myColorSource); IColorFrameReader * myColorReader = nullptr; myColorSource->OpenReader(&myColorReader); int colorHeight = 0, colorWidth = 0; IFrameDescription * myDescription = nullptr; myColorSource->get_FrameDescription(&myDescription); myDescription->get_Height(&colorHeight); myDescription->get_Width(&colorWidth); IColorFrame * myColorFrame = nullptr; Mat original(colorHeight, colorWidth, CV_8UC4);
|
3.准备读取BodyFrame
1 2 3 4 5 6 7 8 9
| IBodyFrameSource * myBodySource = nullptr; mySensor->get_BodyFrameSource(&myBodySource); IBodyFrameReader * myBodyReader = nullptr; myBodySource->OpenReader(&myBodyReader); int myBodyCount = 0; myBodySource->get_BodyCount(&myBodyCount); IBodyFrame * myBodyFrame = nullptr; ICoordinateMapper * myMapper = nullptr; mySensor->get_CoordinateMapper(&myMapper);
|
4.每30帧获取一帧然后输出
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
| int fps = 0; while (1) { while (myColorReader->AcquireLatestFrame(&myColorFrame) != S_OK); myColorFrame->CopyConvertedFrameDataToArray(colorHeight * colorWidth * 4, original.data, ColorImageFormat_Bgra); Mat copy = original.clone(); while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK); IBody ** myBodyArr = new IBody *[myBodyCount]; if (fps != 0) { fps++; fps = fps % 30; delete[]myBodyArr; myBodyFrame->Release(); myColorFrame->Release(); continue; } fps++; for (int i = 0; i < myBodyCount; i++) myBodyArr[i] = nullptr; if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, myBodyArr) == S_OK) for (int i = 0; i < myBodyCount; i++) { BOOLEAN result = false; if (myBodyArr[i]->get_IsTracked(&result) == S_OK && result) { Joint myJointArr[JointType_Count]; if (myBodyArr[i]->GetJoints(JointType_Count, myJointArr) == S_OK) { draw(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper); } } } delete[]myBodyArr; myBodyFrame->Release(); myColorFrame->Release(); if (countFrame == 330) { break; } }
|
5.关闭Kinect并退出
1 2 3 4 5 6 7 8 9
| myMapper->Release(); myDescription->Release(); myColorReader->Release(); myColorSource->Release(); myBodyReader->Release(); myBodySource->Release(); mySensor->Close(); mySensor->Release(); return 0;
|
6.函数draw()
获取时间然后直接输出坐标。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
| void draw(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper) { countFrame++; if (r_1.TrackingState == TrackingState_Tracked ) { now = clock(); ofstream fout("01.txt", ios::app); cout << now; cout << r_1.Position.X << r_1.Position.Y << r_1.Position.Z << '\t ,'<<countFrame<<endl; fout << r_1.Position.X<<',' << r_1.Position.Y << ',' << r_1.Position.Z <<','<<now<< endl; fout.close(); } else { now = clock(); ofstream fout("01.txt", ios::app); cout << now; cout << 0 << 0 <<0 << endl; fout << 0 << ',' <<0 << ',' << 0 << ',' << now<<endl; fout.close(); } }
|
完整代码如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
|
#include <iostream> #include <opencv2\imgproc.hpp> //opencv头文件 #include <opencv2\calib3d.hpp> #include <opencv2\highgui.hpp> #include <Kinect.h> //Kinect头文件 #include<fstream> #include<ctime> using namespace std; using namespace cv;
int countFrame = 0; clock_t pre, now; clock_t begintime, endtime;
void draw(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper); int main(void) { begintime = clock(); IKinectSensor * mySensor = nullptr; GetDefaultKinectSensor(&mySensor); mySensor->Open();
IColorFrameSource * myColorSource = nullptr; mySensor->get_ColorFrameSource(&myColorSource);
IColorFrameReader * myColorReader = nullptr; myColorSource->OpenReader(&myColorReader);
int colorHeight = 0, colorWidth = 0; IFrameDescription * myDescription = nullptr; myColorSource->get_FrameDescription(&myDescription); myDescription->get_Height(&colorHeight); myDescription->get_Width(&colorWidth);
IColorFrame * myColorFrame = nullptr; Mat original(colorHeight, colorWidth, CV_8UC4);
IBodyFrameSource * myBodySource = nullptr; mySensor->get_BodyFrameSource(&myBodySource); IBodyFrameReader * myBodyReader = nullptr; myBodySource->OpenReader(&myBodyReader); int myBodyCount = 0; myBodySource->get_BodyCount(&myBodyCount); IBodyFrame * myBodyFrame = nullptr; ICoordinateMapper * myMapper = nullptr; mySensor->get_CoordinateMapper(&myMapper); int fps = 0; while (1) { while (myColorReader->AcquireLatestFrame(&myColorFrame) != S_OK); myColorFrame->CopyConvertedFrameDataToArray(colorHeight * colorWidth * 4, original.data, ColorImageFormat_Bgra); Mat copy = original.clone(); while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK); IBody ** myBodyArr = new IBody *[myBodyCount]; if (fps != 0) { fps++; fps = fps % 30; delete[]myBodyArr; myBodyFrame->Release(); myColorFrame->Release(); continue; } fps++; for (int i = 0; i < myBodyCount; i++) myBodyArr[i] = nullptr; if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, myBodyArr) == S_OK) for (int i = 0; i < myBodyCount; i++) { BOOLEAN result = false; if (myBodyArr[i]->get_IsTracked(&result) == S_OK && result) { Joint myJointArr[JointType_Count]; if (myBodyArr[i]->GetJoints(JointType_Count, myJointArr) == S_OK) { draw(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper); } } } delete[]myBodyArr; myBodyFrame->Release(); myColorFrame->Release(); if (countFrame == 330) { break; } } myMapper->Release(); myDescription->Release(); myColorReader->Release(); myColorSource->Release(); myBodyReader->Release(); myBodySource->Release(); mySensor->Close(); mySensor->Release(); return 0; }
void draw(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper) { countFrame++; if (r_1.TrackingState == TrackingState_Tracked ) { now = clock(); ofstream fout("01.txt", ios::app); cout << now; cout << r_1.Position.X << r_1.Position.Y << r_1.Position.Z << '\t ,'<<countFrame<<endl; fout << r_1.Position.X<<',' << r_1.Position.Y << ',' << r_1.Position.Z <<','<<now<< endl; fout.close(); } else { now = clock(); ofstream fout("01.txt", ios::app); cout << now; cout << 0 << 0 <<0 << endl; fout << 0 << ',' <<0 << ',' << 0 << ',' << now<<endl; fout.close(); } }
|