在前端使用mediapipe实现人脸识别,以vue框架为例,
首先需要安装@mediapipe/tasks-vision库:
npm i @mediapipe/tasks-vision
在项目中引入后,初始化,runningMode有 "IMAGE"和"VIDEO"两种模式,分别对应着识别源是图片还是视频。
import {
FaceDetector,
FilesetResolver
} from "@mediapipe/tasks-vision";
async function initFaceDetector() {
const vision = await FilesetResolver.forVisionTasks(
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm"
// "/mediapipe/tasks-vision/wasm" 下载到本地
)
faceDetector = await FaceDetector.createFromOptions(vision, {
baseOptions: {
//将模型下载到本地,引入本地文件
//modelAssetPath: `/mediapipe/blaze_face_short_range.tflite`,
modelAssetPath: `https://storage.googleapis.com/mediapipe-models/face_detector/blaze_face_short_range/float16/1/blaze_face_short_range.tflite`,
delegate: "CPU"
},
runningMode: "IMAGE" //VIDEO
})
}
识别图片中的人脸:
const detections = faceDetector.detect(imageData).detections;
识别视频中的人脸:
//使用usb相机,加载到id为cam的video标签中
function loadVideoUSB() {
navigator.mediaDevices.getUserMedia({video: true})
.then(function (stream) {
let videoElement = document.getElementById('cam');
// 用于关闭相机
mediaStreamTrack = stream
videoElement.srcObject = stream;
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
console.log("Resolution: " + settings.width + "x" + settings.height);
})
.catch(function (err) {
alert(err);
})
}
// 加载前需要把runningMode 设置为‘VIDEO’
function handleVideo() {
console.log('mode is video ')
let video = document.querySelector("#cam")
let lastVideoTime = -1;
async function predictWebcam() {
let startTimeMs = performance.now();
if (video.currentTime !== lastVideoTime) {
lastVideoTime = video.currentTime;
const detections = faceDetectorVideo.detectForVideo(video,startTimeMs).detections;
//detections中就是识别到的人脸数据
}
requestAnimationFrame(predictWebcam)
}
}