WebRTC(Web Real - Time Communication)是一项强大的 Web 技术,可实现浏览器之间的实时通信,包括音视频流的传输。下面将详细介绍如何使用 WebRTC 实现实时滤镜与录屏功能。
1. 项目初始化
创建一个 HTML 文件,例如 index.html
,并引入必要的元素和样式。
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF - 8">
<meta name="viewport" content="width=device - width, initial - scale = 1.0">
<title>WebRTC 实时滤镜与录屏</title>
<style>
video {
width: 640px;
height: 480px;
margin: 10px;
}
</style>
</head>
<body>
<video id="localVideo" autoplay muted></video>
<video id="filteredVideo" autoplay></video>
<button id="startRecording">开始录屏</button>
<button id="stopRecording">停止录屏</button>
<a id="downloadLink" download="recorded_video.webm">下载录屏</a>
<script src="script.js"></script>
</body>
</html>
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
- 21.
- 22.
- 23.
- 24.
- 25.
- 26.
2. 获取用户媒体流
在 script.js
文件中,首先获取用户的摄像头和麦克风的媒体流。
const localVideo = document.getElementById('localVideo');
const filteredVideo = document.getElementById('filteredVideo');
const startRecordingButton = document.getElementById('startRecording');
const stopRecordingButton = document.getElementById('stopRecording');
const downloadLink = document.getElementById('downloadLink');
let mediaStream;
let mediaRecorder;
let recordedChunks = [];
async function getMediaStream() {
try {
mediaStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: true });
localVideo.srcObject = mediaStream;
} catch (error) {
console.error('获取媒体流失败:', error);
}
}
getMediaStream();
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
3. 实现实时滤镜
使用 canvas
对视频流应用滤镜,然后将处理后的图像显示在另一个 video
元素中。
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
canvas.width = 640;
canvas.height = 480;
function applyFilter() {
ctx.drawImage(localVideo, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
const data = imageData.data;
// 简单的灰度滤镜示例
for (let i = 0; i < data.length; i += 4) {
const avg = (data[i] + data[i + 1] + data[i + 2]) / 3;
data[i] = avg;
data[i + 1] = avg;
data[i + 2] = avg;
}
ctx.putImageData(imageData, 0, 0);
filteredVideo.srcObject = canvas.captureStream();
}
localVideo.addEventListener('play', () => {
setInterval(applyFilter, 16); // 约 60 FPS
});
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
- 21.
- 22.
- 23.
- 24.
- 25.
4. 实现录屏功能
使用 MediaRecorder
API 录制处理后的视频流。
startRecordingButton.addEventListener('click', () => {
recordedChunks = [];
mediaRecorder = new MediaRecorder(filteredVideo.srcObject);
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
recordedChunks.push(event.data);
}
};
mediaRecorder.onstop = () => {
const blob = new Blob(recordedChunks, { type: 'video/webm' });
downloadLink.href = URL.createObjectURL(blob);
};
mediaRecorder.start();
});
stopRecordingButton.addEventListener('click', () => {
if (mediaRecorder && mediaRecorder.state!== 'inactive') {
mediaRecorder.stop();
}
});
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
- 21.
- 22.
- 23.
完整代码示例
const localVideo = document.getElementById('localVideo');
const filteredVideo = document.getElementById('filteredVideo');
const startRecordingButton = document.getElementById('startRecording');
const stopRecordingButton = document.getElementById('stopRecording');
const downloadLink = document.getElementById('downloadLink');
let mediaStream;
let mediaRecorder;
let recordedChunks = [];
async function getMediaStream() {
try {
mediaStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: true });
localVideo.srcObject = mediaStream;
} catch (error) {
console.error('获取媒体流失败:', error);
}
}
getMediaStream();
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
canvas.width = 640;
canvas.height = 480;
function applyFilter() {
ctx.drawImage(localVideo, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
const data = imageData.data;
// 简单的灰度滤镜示例
for (let i = 0; i < data.length; i += 4) {
const avg = (data[i] + data[i + 1] + data[i + 2]) / 3;
data[i] = avg;
data[i + 1] = avg;
data[i + 2] = avg;
}
ctx.putImageData(imageData, 0, 0);
filteredVideo.srcObject = canvas.captureStream();
}
localVideo.addEventListener('play', () => {
setInterval(applyFilter, 16); // 约 60 FPS
});
startRecordingButton.addEventListener('click', () => {
recordedChunks = [];
mediaRecorder = new MediaRecorder(filteredVideo.srcObject);
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
recordedChunks.push(event.data);
}
};
mediaRecorder.onstop = () => {
const blob = new Blob(recordedChunks, { type: 'video/webm' });
downloadLink.href = URL.createObjectURL(blob);
};
mediaRecorder.start();
});
stopRecordingButton.addEventListener('click', () => {
if (mediaRecorder && mediaRecorder.state!== 'inactive') {
mediaRecorder.stop();
}
});
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
- 21.
- 22.
- 23.
- 24.
- 25.
- 26.
- 27.
- 28.
- 29.
- 30.
- 31.
- 32.
- 33.
- 34.
- 35.
- 36.
- 37.
- 38.
- 39.
- 40.
- 41.
- 42.
- 43.
- 44.
- 45.
- 46.
- 47.
- 48.
- 49.
- 50.
- 51.
- 52.
- 53.
- 54.
- 55.
- 56.
- 57.
- 58.
- 59.
- 60.
- 61.
- 62.
- 63.
- 64.
- 65.
- 66.
- 67.
- 68.
- 69.
- 70.
总结
通过上述步骤,你可以使用 WebRTC 实现实时滤镜与录屏功能。获取用户媒体流后,使用 canvas
对视频流应用滤镜,再使用 MediaRecorder
录制处理后的视频流。你可以根据需求调整滤镜算法,实现更复杂的滤镜效果。