import cv2
import mediapipe as mp
import time
import math
from scipy.spatial import distance as dist
import numpy as np
from mediaPose import poseDetector
from mediaFace import FaceMeshDetector
from euler_angle import get_head_pose
#创建面部检测和姿态检测对象
faceDetector = FaceMeshDetector()
poseDetector = poseDetector()
def eye_aspect_ratio(facelist, eye):
# 垂直眼标志(X, Y)坐标
A = dist.euclidean(facelist[eye[2]], facelist[eye[3]]) # 计算两个集合之间的欧式距离
B = dist.euclidean(facelist[eye[4]], facelist[eye[5]])
# 计算水平之间的欧几里得距离
# 水平眼标志(X, Y)坐标
D = dist.euclidean(facelist[eye[0]], facelist[eye[1]])
# 眼睛长宽比的计算
ear = (A + B) / (2.0 * D)
if len(eye) == 8:
C = dist.euclidean(facelist[eye[6]], facelist[eye[7]])
ear = (A + B + C) / (3.0 * D)
# 返回眼睛的长宽比
return ear
def mouth_aspect_ratio(facelist, mouth): # 嘴部
A = dist.euclidean(facelist[mouth[2]], facelist[mouth[3]]) # 计算两个集合之间的欧式距离
B = dist.euclidean(facelist[mouth[4]], facelist[mouth[5]])
C = dist.euclidean(facelist[mouth[6]], facelist[mouth[7]])
D = dist.euclidean(facelist[mouth[0]], facelist[mouth[1]])
# 眼睛长宽比的计算
mar = (A + B + C) / (3.0 * D)
# 返回眼睛的长宽比
return mar
def headdownOrPhone_detection(lmList, poseKeypoint, headTime, phoneTime, r1, r2):
'''取出需要的关键点 然后计算距离'''
nose = lmList[poseKeypoint[0]]
shoulder1 = lmList[poseKeypoint[1]]
shoulder2 = lmList[poseKeypoint[2]]
finger1 = lmList[poseKeypoint[3]]
finger2 = lmList[poseKeypoint[4]]
x1, y1, x2, y2 = shoulder1[1], shoulder1[2], shoulder2[1], shoulder2[2]
x3, y3 = (x1 + x2) / 2, (y1 + y2) / 2
d1 = dist.euclidean((nose[1],nose[2]),(x3,y3))
#d2是双肩距离
d2 = dist.euclidean((shoulder1[1],shoulder1[2]),(shoulder2[1],shoulder2[2]))
d3 = dist.euclidean((finger1[1],finger1[2]),(nose[1],nose[2]))
d4 = dist.euclidean((finger2[1],finger2[2]),(nose[1],nose[2]))
# print('肩宽:', int(d2))
'''判断低头'''
headdwon = False
if d1 < (d2 / r1):
headTime += 1
else:
headTime = 0
if headTime > 60:
headdwon = True
'''判断玩手机'''
phone = False
if d3 < (d2 / r2) or d4 < (d2 / r2):
phoneTime += 1
else:
phoneTime = 0
if phoneTime > 60:
phone = True
return headdwon, phone, headTime, phoneTime
def get_facelist(img, option):
if option == 'mediapipe':
img, facelist = faceDetector.FindFaceMesh(img)
return facelist
def get_fatigue_parameter(feature_queue,close_time,open_time,EAR_THRESH,MAR_THRESH,T):
#删除队首元素
if len(feature_queue) > T:
feature_queue.pop(0)
'''计算连续闭眼时间'''
#队列中最后一个数据是当前帧的数据 current的格式为[mar, ear_l, ear_r, pitch, yaw, roll]
current = feature_queue[-1]
if 0 < (current[1] + current[2])/2 < EAR_THRESH :
close_time += 1
else:
close_time = 0
'''计算连续张嘴时间'''
if current[0] >MAR_THRESH :
open_time += 1
else:
open_time = 0
'''计算perclos'''
if len(feature_queue) == T:
wink = 0
for i in range(T):
if (feature_queue[i][1] + feature_queue[i][2]) / 2 < EAR_THRESH:
wink += 1
perclos = wink / T
else:
perclos = -1
return close_time, perclos, open_time
def fatigue_datection(close_time, open_time, alert1, alert2,yawnTime1,P,yawn_time):
'''通过眼睛判断疲劳'''
if close_time > 40 or perclos > P:
alert1 = True
else:
alert1 = False
'''通过嘴部判断疲劳'''
# 打过哈欠了吗 yawntime是上一次打哈欠的时间
if yawnTime1 > 0:
#又打哈欠了吗
if open_time == 40:
t = time.time() - yawnTime1
#两次哈欠时间差很短吗
if t < yawn_time:
print('两次打哈欠时间间隔为:', t)
alert2 = True
yawnTime1 = time.time()
else:
alert2 = False
yawnTime1 = time.time()
elif open_time >40 :
yawnTime1 = time.time()
else:
alert2 = False
else:
alert2 = False
if open_time == 40:
yawnTime1 = time.time()
return alert1,alert2,yawnTime1
#画点函数
def drawCircle_face(img, facelist, list1, list2, list3):
for i in range(len(list1)):
p = facelist[list1[i]]
cv2.circle(img, (p[0], p[1]), 2, (0, 0, 255), 1)
for j in range(len(list2)):
p = facelist[list2[j]]
cv2.circle(img, (p[0], p[1]), 2, (0, 0, 255), 1)
for k in range(len(list3)):
p = facelist[list3[k]]
cv2.circle(img, (p[0], p[1]), 2, (0, 0, 255), 1)
return img
def drawCircle_pose(img, lmlist):
list=[lmlist[0],lmlist[11],lmlist[12],lmlist[19],lmlist[20]]
for p in range(len(list)):
cv2.circle(img, (list[p][1], list[p][2]), 7, (0, 0, 255), cv2.FILLED)
return img
EAR_THRESH = 0.15
MAR_THRESH = 0.6
T = 50
Yawn_time = 60
PERCLOS = 0.4
DRAW = True
left_eye = [33, 133, 160, 144, 158, 153, 159, 145]
right_eye = [362, 263, 385, 380, 387, 373, 386, 374]
mouth = [78, 308, 82, 87, 312, 317, 13, 14]
poseKeypoint = [0,11,12,19,20]
if __name__ == "__main__":
cap = cv2.VideoCapture(1)
#初始化相关变量
fps = 0
feature_queue = []
close_time, open_time, alert1, alert2, yawnTime1 = 0,0,0,0,-1000
head_down, phone_call, headTime, phoneTime = False, False, 0, 0
while cap.isOpened():
success, img = cap.read()
t1 = time.time()
img1 = img.copy()
# 检测姿态
t1 = time.time()
lmList = poseDetector.findPosition(img, draw=False)
if len(lmList) != 0:
#判断异常姿态
head_down, phone_call, headTime, phoneTime = headdownOrPhone_detection(lmList,poseKeypoint, headTime=headTime,phoneTime=phoneTime,r1=3,r2=2)
else:
print("请调整坐姿")
lmList = []
head_down = False
# 检测面部特征
facelist = get_facelist(img1, option='mediapipe')
mar, ear_l, ear_r, pitch, yaw, roll = 0, 0, 0, 0, 0, 0
# 如果检测到人脸的话 计算相关疲劳特征参数
if len(facelist) != 0:
# 左眼纵横比
ear_l = eye_aspect_ratio(facelist, left_eye)
# 右眼
ear_r = eye_aspect_ratio(facelist, right_eye)
# 嘴
mar = mouth_aspect_ratio(facelist, mouth)
# 姿态角
pitch, yaw, roll = get_head_pose(facelist)
#脸部特征参数放队列里去
feature_queue.append([mar, ear_l, ear_r, pitch, yaw, roll])
#如果突然没检测到脸 说明乱动了 并不疲劳
else:
print("请调整坐姿")
feature_queue = [[0,0,0,0,0,0]]
#计算疲劳参数
close_time, perclos, open_time = get_fatigue_parameter(feature_queue,close_time,open_time,EAR_THRESH=EAR_THRESH,MAR_THRESH=MAR_THRESH,T=50)
#判断疲劳 alert1是关于眼部的 alert2是关于嘴部的
alert1, alert2, yawnTime1 = fatigue_datection(close_time, open_time,alert1=alert1,alert2=alert2,yawnTime1=yawnTime1,P=PERCLOS,yawn_time=Yawn_time)
# 是否画点
if DRAW and len(facelist) != 0:
img = drawCircle_face(img, facelist, left_eye, right_eye, mouth)
if DRAW and len(lmList) != 0:
img = drawCircle_pose(img, lmList)
t2 = time.time()
fps = 1 / (t2 - t1)
'''显示部分'''
if alert1:
cv2.putText(img, "EYE_Fatigue", (200, 180), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
if alert2:
cv2.putText(img, "YWAN_Fatigue", (200, 280), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
if head_down:
cv2.putText(img, "Head Down !!!", (100, 80), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
if phone_call:
cv2.putText(img, "Calling !!!", (100, 120), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
cv2.putText(img, f'FPS:{int(fps)}', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(img, "MAR: {:.2f}".format(mar), (300, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, "EAR_L: {:.2f}".format(ear_l), (300, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, "EAR_R: {:.2f}".format(ear_r), (500, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, "X: " + "{:5.2f}".format(pitch), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
(0, 0, 255), thickness=2) # GREEN
cv2.putText(img, "Y: " + "{:5.2f}".format(yaw), (150, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
(0, 0, 255), thickness=2) # BLUE
cv2.putText(img, "Z: " + "{:5.2f}".format(roll), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
(0, 0, 255), thickness=2) # RED
cv2.imshow('DETECTION', img)
if cv2.waitKey(1) & 0xFF == 27:
cv2.destroyAllWindows()
cap.release()
break
01-16
1502

05-18
2255
