from ultralytics import YOLO
import cv2
import numpy as np
from ultralytics.utils.plotting import Annotator
import math
class PostureAnalyzer:
KP_NOSE = 0
KP_LEFT_SHOULDER = 5
KP_RIGHT_SHOULDER = 6
KP_LEFT_HIP = 11
KP_RIGHT_HIP = 12
KP_LEFT_KNEE = 13
KP_RIGHT_KNEE = 14
KP_LEFT_ANKLE = 15
KP_RIGHT_ANKLE = 16
def init(self): # 姿态判断阈值配置 self.LYING_ANGLE_THRESHOLD = 45 # 躺姿躯干角度阈值(度) self.SITTING_ANGLE_THRESHOLD = 150 # 坐姿腿部弯曲阈值(度) self.SITTING_LEG_RATIO_THRESHOLD = 0.3 # 坐姿大腿长度比例阈值 self.CONFIDENCE_THRESHOLD = 0.3 # 关键点置信度阈值 self.MIN_LEG_LENGTH = 5 # 有效腿部最小像素长度 # 姿态颜色映射 self.POSTURE_COLORS = ( (0, 255, 0), # 站立: 绿色 (0, 165, 255), # 坐姿: 橙色 (0, 0, 255), # 躺姿: 红色 (0, 255, 255) # 未知: 黄色 ) def calculate_torso_angle(self, torso_vec): “”“计算躯干与垂直方向的夹角(角度制)”“” dx, dy = torso_vec return math.degrees(math.atan2(abs(dx), abs(dy))) def calculate_joint_angle(self, a, b, c): “”“计算三个关节点形成的角度”“” ba = a - b bc = c - b cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc) + 1e-6) cosine_angle = np.clip(cosine_angle, -1.0, 1.0) return math.degrees(np.arccos(cosine_angle)) def calculate_leg_ratio(self, hip, knee, ankle): “”“计算大腿长度占腿总长度的比例”“” thigh_length = np.linalg.norm(hip - knee) calf_length = np.linalg.norm(knee - ankle) if (thigh_length + calf_length) > self.MIN_LEG_LENGTH: return thigh_length / (thigh_length + calf_length) return None def determine_posture(self, kpts, confs): “”“核心姿态判断逻辑”“” STANDING, SITTING, LYING, UNKNOWN = 0, 1, 2, 3 # 必需的关键点索引 required_keypoints = [ self.KP_LEFT_SHOULDER, self.KP_RIGHT_SHOULDER, self.KP_LEFT_HIP, self.KP_RIGHT_HIP ] # 1. 检查关键点置信度 if any(confs[i] < self.CONFIDENCE_THRESHOLD for i in required_keypoints): return UNKNOWN, self.POSTURE_COLORS[UNKNOWN] # 2. 提取关键点坐标 left_shoulder = kpts[self.KP_LEFT_SHOULDER] right_shoulder = kpts[self.KP_RIGHT_SHOULDER] left_hip = kpts[self.KP_LEFT_HIP] right_hip = kpts[self.KP_RIGHT_HIP] # 3. 计算躯干向量 shoulder_center = (left_shoulder + right_shoulder) / 2 hip_center = (left_hip + right_hip) / 2 torso_vector = hip_center - shoulder_center # 4. 躺姿判断 (躯干倾斜角度过大) if self.calculate_torso_angle(torso_vector) > self.LYING_ANGLE_THRESHOLD: return LYING, self.POSTURE_COLORS[LYING] # 5. 检测腿部弯曲特征 (坐姿判断) leg_angles = [] leg_ratios = [] # 左腿检测 left_leg_indices = [self.KP_LEFT_HIP, self.KP_LEFT_KNEE, self.KP_LEFT_ANKLE] if all(confs[i] >= self.CONFIDENCE_THRESHOLD for i in left_leg_indices): left_angle = self.calculate_joint_angle( left_hip, kpts[self.KP_LEFT_KNEE], kpts[self.KP_LEFT_ANKLE] ) leg_angles.append(left_angle) ratio = self.calculate_leg_ratio( left_hip, kpts[self.KP_LEFT_KNEE], kpts[self.KP_LEFT_ANKLE] ) if ratio: leg_ratios.append(ratio) # 右腿检测 right_leg_indices = [self.KP_RIGHT_HIP, self.KP_RIGHT_KNEE, self.KP_RIGHT_ANKLE] if all(confs[i] >= self.CONFIDENCE_THRESHOLD for i in right_leg_indices): right_angle = self.calculate_joint_angle( right_hip, kpts[self.KP_RIGHT_KNEE], kpts[self.KP_RIGHT_ANKLE] ) leg_angles.append(right_angle) ratio = self.calculate_leg_ratio( right_hip, kpts[self.KP_RIGHT_KNEE], kpts[self.KP_RIGHT_ANKLE] ) if ratio: leg_ratios.append(ratio) # 坐姿判定条件 (腿部弯曲或大腿比例异常) is_sitting = False if leg_angles and any(angle < self.SITTING_ANGLE_THRESHOLD for angle in leg_angles): is_sitting = True elif leg_ratios and any(ratio < self.SITTING_LEG_RATIO_THRESHOLD for ratio in leg_ratios): is_sitting = True if is_sitting: return SITTING, self.POSTURE_COLORS[SITTING] return STANDING, self.POSTURE_COLORS[STANDING]
class PostureDetector:
POSTURE_LABELS = [“Standing”, “Sitting”, “Lying”, “Unknown”]
def init(self, model_path, video_path): self.model = YOLO(model_path) self.video_path = video_path self.analyzer = PostureAnalyzer() def run_detection(self): “”“处理视频流并检测姿态”“” if not self.video_path: return cap = cv2.VideoCapture(self.video_path) if not cap.isOpened(): return # 准备视频输出 fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) video_writer = cv2.VideoWriter(‘output.mp4’, cv2.VideoWriter_fourcc(*‘mp4v’), fps, (width, height)) frame_count = 0 while cap.isOpened(): status, frame = cap.read() if not status: break frame_count += 1 annotated_frame, people_count = self.process_results(frame) stats_text = f"people number: {people_count}" cv2.putText(annotated_frame, stats_text, (15, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.imshow(‘Yolov11’, annotated_frame) video_writer.write(annotated_frame) if cv2.waitKey(1) & 0xFF == ord(‘q’): break cap.release() video_writer.release() cv2.destroyAllWindows() def process_results(self, frame): “”“处理检测结果并标注姿态”“” results = self.model.track(frame, persist=True, verbose=False) annotator = Annotator(frame) person_count = 0 for res in results: if hasattr(res, ‘keypoints’) and res.keypoints is not None: keypoints = res.keypoints.xy.cpu().numpy() confidences = res.keypoints.conf.cpu().numpy() else: continue bboxes = res.boxes.xyxy.cpu().numpy() for idx, bbox in enumerate(bboxes): if hasattr(res.boxes, ‘cls’): cls_id = int(res.boxes.cls[idx]) else: continue if cls_id != 0: continue person_count += 1 kpts = keypoints[idx] confs = confidences[idx] if idx < len(confidences) else np.zeros(len(kpts)) posture_id, color = self.analyzer.determine_posture(kpts, confs) posture_label = self.POSTURE_LABELS[posture_id] annotator.box_label(bbox, posture_label, color) kpt_data = np.hstack([kpts, np.ones((len(kpts), 1))]) annotator.kpts(kpt_data, radius=5) return frame, person_count
if name == “main”:
detector = PostureDetector(
model_path=‘yolo11n-pose.pt’,
video_path=‘video.mp4’
)
detector.run_detection()给我写的代码写一个说明文档要求是流程和函数都介绍清楚