两百行C++代码实现yolov5人形运动检测

本文介绍了如何利用开源的Yolov5模型进行单个人形检测,并通过静止抑制和队列机制实现多人运动检测功能。作者通过实例展示了从模型转换、部署C++工程,到处理视频帧并检测运动的完整流程。
部署运行你感兴趣的模型镜像

简单实现了一下yolov5单个人形的运动检测功能,多个的话涉及到多目标跟踪,以后有空再弄。实现思路即目标检测+静止抑制,具体步骤如下:
(1)训练yolov5模型,这里就没有自己训练了,直接使用官方的开源模型yolov5s.pt;
(2)运行yolov5工程下面的export.py,将pt模型转成onnx模型;
(3)编写yolov5部署的C++工程,包括前处理、推理和后处理部分;
(4)设置一个固定长度的队列,用来存放每帧检测出单个人形检测框的中心点;
(5)读取视频第一帧,用yolov5检测第一帧图像的人形,检测器能可能检测出0或多个人形,如果检测数不为1则直接跳过读取后续帧;
(6)读取后续帧,直到检测出人形的帧数达到队列长度,此时队列已满;
(7)此时再处理图片帧的时候,需要弹出队列首,把新计算得到的检测框中心点添加到队列尾;
(8)计算队列中相邻元素检测框中心点的距离,若相邻元素检测框中心点的距离都大于阈值,说明检测物在一直移动,显示运动检测框,否则不显示。
具体实现的时候,队列的长度和移动距离的阈值需要调节,LZ取队列长度为5,移动距离阈值为检测框对角线长度除以100。下面的代码基本上实现了实时运动检测功能,但不知道有什么应用哈哈,LZ就疫情在家没事写了玩的。

#include <iostream>
#include <fstream>
#include <opencv2/opencv.hpp>


#define DEBUG


// 常量
const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const float SCORE_THRESHOLD = 0.6;
const float NMS_THRESHOLD = 0.25;
const float CONFIDENCE_THRESHOLD = 0.6;

const std::vector<std::string> class_name = {
	"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
	"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
	"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
	"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
	"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
	"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
	"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
	"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
	"hair drier", "toothbrush" };

const int QUEUE_SIZE = 5;


//画出检测框和标签
void draw_label(cv::Mat& input_image, std::string label, int left, int top)
{
	int baseLine;
	cv::Size label_size = cv::getTextSize(label, 0.7, 0.7, 1, &baseLine);
	top = std::max(top, label_size.height);
	cv::Point tlc = cv::Point(left, top);
	cv::Point brc = cv::Point(left, top + label_size.height + baseLine);
	cv::putText(input_image, label, cv::Point(left, top + label_size.height), cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(0, 255, 255), 1);
}


//预处理
std::vector<cv::Mat> preprocess(cv::Mat & input_image, cv::dnn::Net & net)
{
	cv::Mat blob;
	cv::dnn::blobFromImage(input_image, blob, 1. / 255., cv::Size(INPUT_WIDTH, INPUT_HEIGHT), cv::Scalar(), true, false);

	net.setInput(blob);

	std::vector<cv::Mat> preprcess_image;
	net.forward(preprcess_image, net.getUnconnectedOutLayersNames());

	return preprcess_image;
}


//后处理
std::vector<cv::Rect> postprocess(std::vector<cv::Mat> & preprcess_image, cv::Mat & output_image, int class_index)
{
	std::vector<int> class_ids;
	std::vector<float> confidences;
	std::vector<cv::Rect> boxes;
	std::vector<cv::Rect> boxes_nms;

	float x_factor = output_image.cols / INPUT_WIDTH;
	float y_factor = output_image.rows / INPUT_HEIGHT;

	float* data = (float*)preprcess_image[0].data;

	const int dimensions = 85;
	const int rows = 25200;
	for (int i = 0; i < rows; ++i)
	{
		float confidence = data[4];
		if (confidence >= CONFIDENCE_THRESHOLD)
		{
			float* classes_scores = data + 5;
			cv::Mat scores(1, class_name.size(), CV_32FC1, classes_scores);
			cv::Point class_id;
			double max_class_score;
			cv::minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
			if (max_class_score > SCORE_THRESHOLD)
			{
				confidences.push_back(confidence);
				class_ids.push_back(class_id.x);

				float cx = data[0];
				float cy = data[1];
				float w = data[2];
				float h = data[3];
				int left = int((cx - 0.5 * w) * x_factor);
				int top = int((cy - 0.5 * h) * y_factor);
				int width = int(w * x_factor);
				int height = int(h * y_factor);
				boxes.push_back(cv::Rect(left, top, width, height));
			}
		}
		data += 85;
	}

	std::vector<int> indices;
	cv::dnn::NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, indices);
	for (size_t i = 0; i < indices.size(); i++)
	{
		int idx = indices[i];
		if (class_ids[idx] == class_index)
		{
			cv::Rect box = boxes[idx];
			boxes_nms.push_back(box);

			int left = box.x;
			int top = box.y;
			int width = box.width;
			int height = box.height;

			std::string label = cv::format("%.2f", confidences[idx]);
			label = class_name[class_ids[idx]] + ":" + label;
			draw_label(output_image, label, left, top);
		}
	}

	return boxes_nms;
}


//计算检测框的中心
cv::Point get_center(cv::Rect detection)
{
	return cv::Point(detection.x + detection.width / 2, detection.y + detection.height / 2);;
}


//计算两点间距离
float get_distance(cv::Point p1, cv::Point p2)
{
	return sqrt(pow(p1.x - p2.x, 2) + pow(p1.y - p2.y, 2));
}



int main(int argc, char** argv)
{
	cv::VideoCapture capture(0);
	cv::Mat frame;
	cv::dnn::Net net = cv::dnn::readNet("yolov5s-f32.onnx");

	int frame_num = 0;
	std::vector<cv::Point> detection_center_queue;

	while (cv::waitKey(1) < 0)
	{
		capture >> frame;
		if (frame.empty())
			break;

		std::cout << "******************************************************************* frame_num: " << frame_num << std::endl;

		cv::Mat image = frame.clone();
		std::vector<cv::Mat> preprcess_image = preprocess(image, net);

		std::vector<cv::Rect> detections = postprocess(preprcess_image, image, 0);
		cv::Rect detection;

		if (detections.size() != 1)
			goto FLAG;

		detection = detections[0];

		if (frame_num < QUEUE_SIZE)
		{
			cv::Point detection_center = get_center(detection);
			detection_center_queue.push_back(detection_center);

#ifdef DEBUG
			std::cout << "detection_center_queue:" << std::endl;
			for (size_t i = 0; i < detection_center_queue.size(); i++)
			{
				std::cout << detection_center_queue[i] << " ";
			}
			std::cout << std::endl;
#endif // DEBUG
		}
		else
		{
			detection_center_queue.erase(detection_center_queue.begin());
			cv::Point detection_center = get_center(detection);
			detection_center_queue.push_back(detection_center);
			
#ifdef DEBUG
			std::cout << "detection_center_queue:" << std::endl;
			for (size_t i = 0; i < detection_center_queue.size(); i++)
			{
				std::cout << detection_center_queue[i] << " ";
			}
			std::cout << std::endl;
#endif // DEBUG

			std::vector<float> distances;

			for (size_t i = 1; i < detection_center_queue.size(); i++)
			{
				cv::Point detection_center_old = detection_center_queue[i - 1];
				cv::Point detection_center_new = detection_center_queue[i];

				float distance = get_distance(detection_center_new, detection_center_old);
				distances.push_back(distance);
				std::cout << distance << std::endl;
			}

			bool show_box = true;
			for (size_t i = 0; i < distances.size(); i++)
			{
				float moving_threshold = sqrt(pow(detection.width, 2) + pow(detection.height, 2)) / 100;
				//float moving_threshold = 5;
				if (distances[i] < moving_threshold)
					show_box = false;
			}

			if(show_box)
				cv::rectangle(image, cv::Point(detection.x, detection.y), \
					cv::Point(detection.x + detection.width, detection.y + detection.height), cv::Scalar(0, 0, 255), 2);
		}

		frame_num++;

	FLAG:
		cv::imshow("output", image);		
	}

	capture.release();
	return 0;
}

后续:多目标跟踪版本的实现sort+yolov5

您可能感兴趣的与本文相关的镜像

Yolo-v5

Yolo-v5

Yolo

YOLO(You Only Look Once)是一种流行的物体检测和图像分割模型,由华盛顿大学的Joseph Redmon 和Ali Farhadi 开发。 YOLO 于2015 年推出,因其高速和高精度而广受欢迎

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

给算法爸爸上香

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值