使用opencv+Kinect进行手势控制鼠标

本文介绍了一种使用Kinect相机和OpenCV实现的手势识别技术,通过识别手指运动来控制鼠标移动。文中详细展示了如何设置Kinect传感器,处理深度和身体索引帧,以及如何识别并跟踪指尖,实现精准的鼠标控制。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

使用opencv+Kinect进行手势控制鼠标

参考优快云的前辈文章,使用kinect相机进行手势识别,手指最长的进行识别,同时控制鼠标移动。强烈推荐
模拟方形框等比对应电脑屏幕。然后移动控制。但是我现在找不到是从哪里的了,所以写一下,算一个备份。

这个代码是
#include
#include
#include <Kinect.h>
#pragma comment ( lib, “kinect20.lib” )
#include <opencv2\core.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\imgproc.hpp>
#include <math.h>
#include <stdio.h>
#include <windows.h>
#include <cv.h>
#include
using namespace std;
using namespace cv;

const double OK_LEVEL = 0.15; //判断跟手是否在一个平面上的容错值,单位米
const int HAND_UP = 150; //手掌上可能存在指尖的区域,单位毫米
const int HAND_LEFT_RIGHT = 100; //手掌左右可能存在指尖的区域,单位毫米
const int OK_MOUSE = 0; //鼠标开始移动的阈值,越大越稳定,越小越精确
Vec3b COLOR_TABLE[] = { Vec3b(255,0,0),Vec3b(0,255,0),Vec3b(0,0,255),Vec3b(255,255,255),Vec3b(0,0,0) };
enum { BLUE, GREEN, RED, WHITE, BLACK };//定义这些都是枚举类型,所谓的枚举类型就是不知道这些是什么类型,所以定义的,可以百度百科enum

bool depth_rage_check(int, int, int, int);
bool level_check(const CameraSpacePoint &, const CameraSpacePoint &);
bool distance_check(const CameraSpacePoint &, const CameraSpacePoint &);
bool check_new_point(DepthSpacePoint &, DepthSpacePoint &, int, int);
void draw_line(Mat &, const DepthSpacePoint &, DepthSpacePoint &);
void draw_body(Mat &, BYTE *, int, int);
void draw_Hand(Mat &, const DepthSpacePoint &);
void draw_circle(Mat &, int, int);
int main(void)
{
IKinectSensor * mySensor = nullptr;//nullptr是C++空指针类型的关键字,nullptr是在C++11中引入的。
GetDefaultKinectSensor(&mySensor);//デフォルトセンサーを取る
mySensor->Open();//kinectセンサーを開く

IFrameDescription	* myDescription = nullptr;//フレームを説明、描述帧
int	depthHeight = 0, depthWidth = 0;//高さと幅を定義する;定义和初始化高和宽
IDepthFrameSource	* myDepthSource = nullptr;
IDepthFrameReader	* myDepthReader = nullptr;//用来表示空指针类型 ;NULLポインタ型を示すために使用されます
IDepthFrame		* myDepthFrame = nullptr;
mySensor->get_DepthFrameSource(&myDepthSource);//センサーの中の深さソースを取る
myDepthSource->get_FrameDescription(&myDescription);//深さソース類の説明と定義を行く
myDescription->get_Height(&depthHeight);//次は高さと幅関数を開く
myDescription->get_Width(&depthWidth);
myDepthSource->OpenReader(&myDepthReader);		//以上为Depth帧的准备,直接开好Reader


IBodyIndexFrameSource	* myBodyIndexSource = nullptr; //NULLポインタ型を示すために使用されます
IBodyIndexFrameReader	* myBodyIndexReader = nullptr;
IBodyIndexFrame		* myBodyIndexFrame = nullptr;
mySensor->get_BodyIndexFrameSource(&myBodyIndexSource);
myBodyIndexSource->OpenReader(&myBodyIndexReader);		//以上为BodyIndex帧的准备,直接开好Reader


IBodyFrameSource	* myBodySource = nullptr;
IBodyFrameReader	* myBodyReader = nullptr;
IBodyFrame		* myBodyFrame = nullptr;
mySensor->get_BodyFrameSource(&myBodySource);
myBodySource->OpenReader(&myBodyReader);			//以上为Body帧的准备,直接开好Reader

ICoordinateMapper	* myMapper = nullptr;
mySensor->get_CoordinateMapper(&myMapper);			//Maper的准备

DepthSpacePoint		front = { 0,0 };				//用来记录上一次鼠标的位置
DepthSpacePoint		depthUpLeft = { 1,1 };			//操作窗口的左上角和右下角,要注意这两个X和X、Y和Y的差会作为除数,所以不能都为0
DepthSpacePoint		depthDownRight = { 0,0 };
bool	gotEdge = false;//端にまではfalseです
while (1)//while(1)代表了循环永远执行下去.除非遇到break;才跳出循环.原因是while的循环里面是一个布尔值,而1代表了true,所以是一个无限循环.
{
	while (myDepthReader->AcquireLatestFrame(&myDepthFrame) != S_OK);//深さ読者が最新フレームを読む
	UINT	depthBufferSize = 0;//32位无符号整数,是unsigned int派生出来的;unsigned intから派生した32ビット符号なし整数
	UINT16	* depthBuffer = nullptr;;//16位无符号整形指针。
	myDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, &depthBuffer);		//读取Depth数据,我的深度框架 - >访问底层缓冲区(和深度缓冲区大小,深度缓冲区);//读取深度数据

	while (myBodyIndexReader->AcquireLatestFrame(&myBodyIndexFrame) != S_OK);	//读取BodyIndex(身体指数)数据
	UINT	bodyIndexBufferSize = 0;
	BYTE	* bodyIndexBuffer = nullptr;//好像是无符号字符型指针
	myBodyIndexFrame->AccessUnderlyingBuffer(&bodyIndexBufferSize, &bodyIndexBuffer);//读取BodyIndex数据
	Mat	img(depthHeight, depthWidth, CV_8UC3);
	draw_body(img, bodyIndexBuffer, depthHeight, depthWidth);/*1--bit_depth---比特数---代表8bite,16bites,32bites,64bites---举个例子吧--比如说,如
															 如果你现在创建了一个存储--灰度图片的Mat对象, 这个图像的大小为宽100, 高100, 那么, 现在这张
															 灰度图片中有10000个像素点,它每一个像素点在内存空间所占的空间大小是8bite, 8位--所以它对
															 应的就是CV_8;
															 S | U | F--S--代表-- - signed int-- - 有符号整形
															 U--代表--unsigned int--无符号整形
															 F--代表--float-------- - 单精度浮点型;
															 C<number_of_channels>----代表-- - 一张图片的通道数, 比如:
															 1--灰度图片--grayImg-- - 是--单通道图像
															 2--RGB彩色图像-------- - 是--3通道图像
															 3--带Alph通道的RGB图像--是--4通道图像;
															 所以8UC3可以理解成8比特的无符号整形的RGB四通道图像*/


	while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK);			//读取Body数据
	int	bodyBufferSize = 0;
	myBodySource->get_BodyCount(&bodyBufferSize);
	IBody	** bodyArray = new IBody *[bodyBufferSize];//暂时理解是两次指针,指向指向指针的地址,再指向指针
	for (int i = 0; i < bodyBufferSize; i++)
		bodyArray[i] = nullptr;
	myBodyFrame->GetAndRefreshBodyData(bodyBufferSize, bodyArray);

	for (int i = 0; i < bodyBufferSize; i++)					//遍历6个人
	{
		BOOLEAN		result = false;
		if (bodyArray[i]->get_IsTracked(&result) == S_OK && result)		//将关节点输出,正式开始处理
		{
			Joint	jointArray[JointType_Count];
			bodyArray[i]->GetJoints(JointType_Count, jointArray);

			//以下为确定操作窗口的左上角和右下角,左右手的窗口位置不同。以Head关节到Neck关节的距离作为单位长度.最后用的深度数据,所以要转换
			if (!gotEdge)
				if (jointArray[JointType_Neck].TrackingState == TrackingState_Tracked && jointArray[JointType_Head].TrackingState == TrackingState_Tracked)
				{
					CameraSpacePoint	cameraNeck = jointArray[JointType_Neck].Position;
					CameraSpacePoint	cameraHead = jointArray[JointType_Head].Position;//这两行是识别点的函数和代码,以后应该可以尝试再这里开始设定单位和参考别处的识别
					double	unite = sqrt(pow(cameraNeck.X - cameraHead.X, 2) + pow(cameraNeck.Y - cameraHead.Y, 2) + pow(cameraNeck.Z - cameraHead.Z, 2));//单位的定义,平方和
					CameraSpacePoint	cameraUpLeft = { cameraNeck.X + unite * 0.5,cameraNeck.Y + unite * 3,cameraNeck.Z };
					CameraSpacePoint	cameraDownRight = { cameraNeck.X + unite * 4,cameraNeck.Y + unite,cameraNeck.Z };//这两行是确定框框的上下左右吧
					myMapper->MapCameraPointToDepthSpace(cameraUpLeft, &depthUpLeft);
					myMapper->MapCameraPointToDepthSpace(cameraDownRight, &depthDownRight);
					gotEdge = true;
				}



			//指尖识别,记录最长的那根手指的指尖,
			DepthSpacePoint		highestPoint = { depthWidth - 1,depthHeight - 1 };
			if (jointArray[JointType_HandRight].TrackingState == TrackingState_Tracked)
			{
				CameraSpacePoint	cameraHandRight = jointArray[JointType_HandRight].Position;
				DepthSpacePoint		depthHandRight;//DepthSpacePointこの緑色のものは結構体です、也就是结构体。同时要注意结构体和类的区别
				myMapper->MapCameraPointToDepthSpace(cameraHandRight, &depthHandRight);

				//开始测试右手手掌区域;12/27
				for (int i = depthHandRight.Y; i > depthHandRight.Y - HAND_UP; i--)//初始点的y,然后看点移动,到什么条件才算移动了
					for (int j = depthHandRight.X - HAND_LEFT_RIGHT; j < depthHandRight.X + 100; j++)	//确定要检查的范围
					{
						if (!depth_rage_check(j, i, depthWidth, depthHeight))				//判合法
							continue;
						int	index = i * depthWidth + j;
						CameraSpacePoint	cameraTemp;
						DepthSpacePoint		depthTemp = { j,i };
						myMapper->MapDepthPointToCameraSpace(depthTemp, depthBuffer[index], &cameraTemp);

						if (bodyIndexBuffer[index] > 5 || (bodyIndexBuffer[index] <= 5 && !level_check(cameraHandRight, cameraTemp)))	//看此像素是否不属于人体(指尖上方一点),或者属于人体但是和手不在同一平面(胸)
						{
							bool	flag = true;
							for (int k = 1; k <= 5; k++)	//看时候此点下面连续5个像素都属于人体,且和手腕在同一平面,且距离合适
							{
								int	index_check = (i + k) * depthWidth + j;
								depthTemp.X = j;
								depthTemp.Y = i + k;
								myMapper->MapDepthPointToCameraSpace(depthTemp, depthBuffer[index_check], &cameraTemp);
								if (bodyIndexBuffer[index_check] <= 5 && level_check(cameraHandRight, cameraTemp) && distance_check(cameraHandRight, cameraTemp))
									continue;
								else
								{
									flag = false;
									break;
								}
							}
							if (flag && i < highestPoint.Y)
							{
								highestPoint.X = j;
								highestPoint.Y = i;
							}
						}
					}
			}


			int	windowWidth = (int)depthDownRight.X - (int)depthUpLeft.X;	//计算操作窗口的尺寸
			int	windowHeight = (int)depthDownRight.Y - (int)depthUpLeft.Y;
			draw_line(img, depthUpLeft, depthDownRight);
			if (check_new_point(front, highestPoint, depthHeight, depthWidth))
			{
				draw_circle(img, highestPoint.X, highestPoint.Y);		//碰到边缘就停止,不反弹
				if (highestPoint.X < depthUpLeft.X)
					highestPoint.X = depthUpLeft.X;
				if (highestPoint.X > depthDownRight.X)
					highestPoint.X = depthDownRight.X;
				if (highestPoint.Y > depthDownRight.Y)
					highestPoint.Y = depthDownRight.Y;
				if (highestPoint.Y < depthUpLeft.Y)
					highestPoint.Y = depthUpLeft.Y;
				int	mouseX = fabs(highestPoint.X - depthUpLeft.X);
				int	mouseY = fabs(highestPoint.Y - depthUpLeft.Y);
				mouse_event(MOUSEEVENTF_ABSOLUTE | MOUSEEVENTF_MOVE, 65535 * mouseX / windowWidth, 65535 * mouseY / windowHeight, 0, 0);	//计算公式:小窗口的点/小窗口尺寸 = 大窗口的点/大
				front = highestPoint;
			}
			else							//如果和上一帧相比移动的幅度小于阈值,则保持上一帧的状态
			{
				draw_circle(img, front.X, front.Y);
				int	mouseX = fabs(front.X - depthUpLeft.X);
				int	mouseY = fabs(front.Y - depthUpLeft.Y);
				mouse_event(MOUSEEVENTF_ABSOLUTE | MOUSEEVENTF_MOVE, 65535 * mouseX / windowWidth, 65535 * mouseY / windowHeight, 0, 0);
			}
		}
	}

	imshow("TEST", img);
	if (waitKey(30) == VK_ESCAPE)
		break;

	myDepthFrame->Release();
	myBodyIndexFrame->Release();
	myBodyFrame->Release();
	delete[] bodyArray;
}
myBodySource->Release();
myBodyIndexSource->Release();
myDepthSource->Release();
myBodyReader->Release();
myBodyIndexReader->Release();
myDepthReader->Release();
myDescription->Release();
myMapper->Release();
mySensor->Close();
mySensor->Release();


return	0;

}

void draw_body(Mat & img, BYTE * buffer, int height, int width)//定义函数
{
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
{
int index = buffer[i * width + j];
if (index <= 5)
img.at(i, j) = COLOR_TABLE[WHITE];
else
img.at(i, j) = COLOR_TABLE[BLACK];
}
}

void draw_circle(Mat & img, int x, int y)//void代表的是函数定义,Mat类是OpenCV表达二维图片的基础
{
Point center = { x,y };
circle(img, center, 5, COLOR_TABLE[GREEN], -1, 0, 0);
}

void draw_Hand(Mat & img, const DepthSpacePoint & hand)
{
Point center = { (int)hand.X,(int)hand.Y };
circle(img, center, 5, COLOR_TABLE[BLUE], -1, 0, 0);
}

void draw_line(Mat & img, const DepthSpacePoint & UpLeft, DepthSpacePoint & DownRight)
{
Point a = { (int)UpLeft.X,(int)DownRight.Y };
circle(img, a, 5, COLOR_TABLE[RED], -1, 0, 0);
Point b = { (int)UpLeft.X,(int)UpLeft.Y };
circle(img, b, 5, COLOR_TABLE[GREEN], -1, 0, 0);
Point c = { (int)DownRight.X,(int)UpLeft.Y };
circle(img, c, 5, COLOR_TABLE[BLUE], -1, 0, 0);
Point d = { (int)DownRight.X,(int)DownRight.Y };
circle(img, d, 5, COLOR_TABLE[WHITE], -1, 0, 0);
line(img, a, b, COLOR_TABLE[RED], 1, 8, 0);
line(img, b, c, COLOR_TABLE[RED], 1, 8, 0);
line(img, c, d, COLOR_TABLE[RED], 1, 8, 0);
line(img, a, d, COLOR_TABLE[RED], 1, 8, 0);
}

bool level_check(const CameraSpacePoint & hand, const CameraSpacePoint & temp)
{
if (fabs(temp.Z - hand.Z) <= OK_LEVEL)
return true;
return false;
}

bool distance_check(const CameraSpacePoint & hand, const CameraSpacePoint & temp)
{
double ans = sqrt(pow(hand.X - temp.X, 2) + pow(hand.Y - temp.Y, 2) + pow(hand.Z - temp.Z, 2));
if (ans <= 0.2 && ans >= 0.06)
return true;
return false;
}

bool depth_rage_check(int x, int y, int depthWidth, int depthHeight)
{
if (x >= 0 && x < depthWidth && y >= 0 && y < depthHeight)
return true;
return false;
}

bool check_new_point(DepthSpacePoint & front, DepthSpacePoint & now, int height, int width)
{
if (now.X == width - 1 && now.Y == height - 1 && (front.X || front.Y))
return false;
else if (fabs(now.X - front.X) <= OK_MOUSE && fabs(now.Y - front.Y) <= OK_MOUSE)
return false;
return true;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值