webcam和手机相机瞳孔变量输出系统

本文介绍了一种眼动视频分析的实现步骤,包括实时人眼识别、图片尺寸统一剪辑、视频合成、光流分析及视频输出等。通过具体代码示例展示了整个处理流程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

第一步:使用人眼识别器实时对眼睛进行识别并提取

import numpy as np
import cv2

cap = cv2.VideoCapture('C:/Users/zhangjing/Documents/Bandicam/phone_7.mp4')# 调用摄像头
classfier = cv2.CascadeClassifier("C:/Users/zhangjing/Anaconda3/Lib/site-packages/cv2/data/haarcascade_eye_tree_eyeglasses.xml")# 人眼识别器分类器
frame_cnt = 0 #读frame
while cap.isOpened():
    read, frame = cap.read()
    if not read:
        break
    grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)# 灰度转换
    Rects = classfier.detectMultiScale(grey, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))# 人眼检测
    if len(Rects) > 0:
        for Rect in Rects:
            x, y, w, h = Rect
            print(x,y,w,h)
            # cv2.imshow("eye", frame[y + 10:y + h - 10, x:x + w])
            # cv2.imshow("fame", frame)
            cv2.imwrite('C:/Users/zhangjing/Documents/Bandicam/phone_7_eye/'+str(frame_cnt)+'.png',frame[y + 10:y + h - 10, x + 10:x + w- 10])# 保存图片
            frame_cnt= frame_cnt+1
            if cv2.waitKey(5) & 0xFF == ord('q'):
                break
            
cap.release()
cv2.destroyAllWindows()

第二步:对保存的图片进行统一尺寸的剪辑

import os.path
from PIL import Image

def ResizeImage():
    img_filepath = 'C:/Users/zhangjing/Documents/Bandicam/phone_7_eye/'#输入的图片路径
    fileout = 'C:/Users/zhangjing/Documents/Bandicam/phone_6_cut/'#输出的图片路径
    width = 70#宽度裁剪尺寸
    height = 70#高度裁剪尺寸
    type = 'png'#图片格式
    picture_name = os.listdir(img_filepath)
    for i in range(len(picture_name)):
        img_rd = Image.open(img_filepath+picture_name[i])
        out = img_rd.resize((width, height),Image.ANTIALIAS)
        #resize image with high-quality
        out.save(img_filepath+picture_name[i], type)

第三步: 对剪好的图片进行视频合成

def video_make():
    write_image_path = 'C:/Users/zhangjing/Documents/Bandicam/phone_7_cut/'#写入视频的路径
    new_video = 'C:/Users/zhangjing/Documents/Bandicam/phone_7_cut/'#图片的路径
    print('video make ...')
    all_folds = os.listdir(write_image_path)
    fps = 30  # 保存视频的FPS,可以适当调整
    size = (70, 70)#图片的尺寸
    # 可以用(*'DVIX')或(*'X264'),如果都不行先装ffmepg: sudo apt-get install ffmepg
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    videoWriter = cv2.VideoWriter(new_video +'test_640.mp4', fourcc, fps, size) 

    for i in range(0, len(all_folds)):
        frame = cv2.imread(write_image_path + str(i) + '.png')
        videoWriter.write(frame)
    videoWriter.release()
    print('video make end!!')

第四步: 对眼睛视频做光流分析保存图片

import cv2
import numpy as np

cap = cv2.VideoCapture('C:/Users/zhangjing/Documents/Bandicam/phone_7_eye/test_640.mp4')

#获取第一帧
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)

#遍历每一行的第1列
hsv[...,1] = 255
frame_cnt = 0

while(1):
    ret, frame2 = cap.read()
    next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)

    #返回一个两通道的光流向量,实际上是每个点的像素位移值
    flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
    print(flow.shape)
    # print(flow)

    #笛卡尔坐标转换为极坐标,获得极轴和极角
    mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
    hsv[...,0] = ang*180/np.pi/2
    hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
    rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
    frame_cnt = frame_cnt + 1

    # cv2.imshow('rgb', rgb)
    cv2.imwrite('C:/Users/zhangjing/Documents/Bandicam/phone_7_cut/' + str(frame_cnt) + '.png', rgb)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
    elif k == ord('s'):
        cv2.imwrite('opticalfb.png',frame2)
        cv2.imwrite('opticalhsv.png',rgb)
    prvs = next

cap.release()
cv2.destroyAllWindows()

第二种光流法

import cv2

cap = cv2.VideoCapture('C:/Users/zhangjing/Documents/Bandicam/phone.mp4')

# ShiTomasi corner detection的参数
feature_params = dict(maxCorners=100,
                      qualityLevel=0.3,
                      minDistance=7,
                      blockSize=7)
# 光流法参数
# maxLevel 未使用的图像金字塔层数
lk_params = dict(winSize=(15, 15),
                 maxLevel=2,
                 criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

# 创建随机生成的颜色
color = np.random.randint(0, 255, (100, 3))


ret, old_frame = cap.read()                             # 取出视频的第一帧
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)  # 灰度化
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)                         # 为绘制创建掩码图片
fram_cnt = 0
while True:
    _, frame = cap.read()
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # 计算光流以获取点的新位置
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
    # 选择good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]
    # 绘制跟踪框
    for i, (new, old) in enumerate(zip(good_new, good_old)):
        a, b = new.ravel()
        c, d = old.ravel()
        mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
        frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
    img = cv2.add(frame, mask)
    cv2.imshow('image', img)
    fram_cnt = fram_cnt + 1
    k = cv2.waitKey(30)  # & 0xff
    if k == 27:
        break
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1, 1, 2)

cv2.destroyAllWindows()
cap.release()

第五步对输出的光流图进行视频合成同步骤三

结果展示

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值