第一步:使用人眼识别器实时对眼睛进行识别并提取
import numpy as np
import cv2
cap = cv2.VideoCapture('C:/Users/zhangjing/Documents/Bandicam/phone_7.mp4')# 调用摄像头
classfier = cv2.CascadeClassifier("C:/Users/zhangjing/Anaconda3/Lib/site-packages/cv2/data/haarcascade_eye_tree_eyeglasses.xml")# 人眼识别器分类器
frame_cnt = 0 #读frame
while cap.isOpened():
read, frame = cap.read()
if not read:
break
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)# 灰度转换
Rects = classfier.detectMultiScale(grey, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))# 人眼检测
if len(Rects) > 0:
for Rect in Rects:
x, y, w, h = Rect
print(x,y,w,h)
# cv2.imshow("eye", frame[y + 10:y + h - 10, x:x + w])
# cv2.imshow("fame", frame)
cv2.imwrite('C:/Users/zhangjing/Documents/Bandicam/phone_7_eye/'+str(frame_cnt)+'.png',frame[y + 10:y + h - 10, x + 10:x + w- 10])# 保存图片
frame_cnt= frame_cnt+1
if cv2.waitKey(5) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
第二步:对保存的图片进行统一尺寸的剪辑
import os.path
from PIL import Image
def ResizeImage():
img_filepath = 'C:/Users/zhangjing/Documents/Bandicam/phone_7_eye/'#输入的图片路径
fileout = 'C:/Users/zhangjing/Documents/Bandicam/phone_6_cut/'#输出的图片路径
width = 70#宽度裁剪尺寸
height = 70#高度裁剪尺寸
type = 'png'#图片格式
picture_name = os.listdir(img_filepath)
for i in range(len(picture_name)):
img_rd = Image.open(img_filepath+picture_name[i])
out = img_rd.resize((width, height),Image.ANTIALIAS)
#resize image with high-quality
out.save(img_filepath+picture_name[i], type)
第三步: 对剪好的图片进行视频合成
def video_make():
write_image_path = 'C:/Users/zhangjing/Documents/Bandicam/phone_7_cut/'#写入视频的路径
new_video = 'C:/Users/zhangjing/Documents/Bandicam/phone_7_cut/'#图片的路径
print('video make ...')
all_folds = os.listdir(write_image_path)
fps = 30 # 保存视频的FPS,可以适当调整
size = (70, 70)#图片的尺寸
# 可以用(*'DVIX')或(*'X264'),如果都不行先装ffmepg: sudo apt-get install ffmepg
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
videoWriter = cv2.VideoWriter(new_video +'test_640.mp4', fourcc, fps, size)
for i in range(0, len(all_folds)):
frame = cv2.imread(write_image_path + str(i) + '.png')
videoWriter.write(frame)
videoWriter.release()
print('video make end!!')
第四步: 对眼睛视频做光流分析保存图片
import cv2
import numpy as np
cap = cv2.VideoCapture('C:/Users/zhangjing/Documents/Bandicam/phone_7_eye/test_640.mp4')
#获取第一帧
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
#遍历每一行的第1列
hsv[...,1] = 255
frame_cnt = 0
while(1):
ret, frame2 = cap.read()
next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
#返回一个两通道的光流向量,实际上是每个点的像素位移值
flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
print(flow.shape)
# print(flow)
#笛卡尔坐标转换为极坐标,获得极轴和极角
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
frame_cnt = frame_cnt + 1
# cv2.imshow('rgb', rgb)
cv2.imwrite('C:/Users/zhangjing/Documents/Bandicam/phone_7_cut/' + str(frame_cnt) + '.png', rgb)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('opticalfb.png',frame2)
cv2.imwrite('opticalhsv.png',rgb)
prvs = next
cap.release()
cv2.destroyAllWindows()
第二种光流法
import cv2
cap = cv2.VideoCapture('C:/Users/zhangjing/Documents/Bandicam/phone.mp4')
# ShiTomasi corner detection的参数
feature_params = dict(maxCorners=100,
qualityLevel=0.3,
minDistance=7,
blockSize=7)
# 光流法参数
# maxLevel 未使用的图像金字塔层数
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 创建随机生成的颜色
color = np.random.randint(0, 255, (100, 3))
ret, old_frame = cap.read() # 取出视频的第一帧
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) # 灰度化
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame) # 为绘制创建掩码图片
fram_cnt = 0
while True:
_, frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算光流以获取点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# 选择good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制跟踪框
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('image', img)
fram_cnt = fram_cnt + 1
k = cv2.waitKey(30) # & 0xff
if k == 27:
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
cv2.destroyAllWindows()
cap.release()
第五步对输出的光流图进行视频合成同步骤三
结果展示