import sys
import cv2
import numpy as np
import wave
import time
import pyqtgraph as pg
import subprocess
import threading
from queue import Queue
from PyQt5.QtCore import Qt, QTimer, QThread, pyqtSignal
from PyQt5.QtGui import QImage, QPixmap, QIcon, QFont
from PyQt5.QtWidgets import (
QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout,
QPushButton, QLabel, QSlider, QComboBox, QGroupBox, QStatusBar, QTextEdit,
QLineEdit, QMessageBox, QSplitter
)
class VideoThread(QThread):
change_pixmap = pyqtSignal(QImage)
fps_signal = pyqtSignal(float)
status_signal = pyqtSignal(str)
def __init__(self, rtsp_url):
super().__init__()
self.rtsp_url = rtsp_url
self._run_flag = True
self.capture = None
self.reconnect_attempts = 0
self.max_reconnect_attempts = 5
def run(self):
while self._run_flag and self.reconnect_attempts < self.max_reconnect_attempts:
try:
self.status_signal.emit(f"尝试连接摄像头... (尝试 {self.reconnect_attempts + 1}/{self.max_reconnect_attempts})")
# 使用FFmpeg后端以更好地支持RTSP流
self.capture = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
if not self.capture.isOpened():
self.status_signal.emit("摄像头连接失败")
self.reconnect_attempts += 1
time.sleep(2)
continue
self.status_signal.emit("摄像头连接成功")
self.reconnect_attempts = 0
# 设置摄像头参数
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
self.capture.set(cv2.CAP_PROP_FPS, 30)
frame_count = 0
start_time = time.time()
last_frame_time = start_time
while self._run_flag:
ret, frame = self.capture.read()
current_time = time.time()
if not ret:
self.status_signal.emit("视频流中断,尝试重连...")
break
# 转换为RGB格式
rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_qt_format = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format.Format_RGB888)
p = convert_to_qt_format.scaled(1280, 720, Qt.AspectRatioMode.KeepAspectRatio)
self.change_pixmap.emit(p)
# 计算FPS
frame_count += 1
elapsed_time = current_time - start_time
if elapsed_time > 1.0:
fps = frame_count / elapsed_time
self.fps_signal.emit(fps)
frame_count = 0
start_time = current_time
# 检查帧间隔,避免卡顿
if current_time - last_frame_time > 2.0: # 超过2秒没有新帧
self.status_signal.emit("视频流卡顿,尝试重连...")
break
last_frame_time = current_time
if self.capture:
self.capture.release()
except Exception as e:
self.status_signal.emit(f"摄像头错误: {str(e)}")
self.reconnect_attempts += 1
time.sleep(2)
if self.reconnect_attempts >= self.max_reconnect_attempts:
self.status_signal.emit("摄像头连接失败,请检查配置")
else:
self.status_signal.emit("视频流已停止")
def stop(self):
self._run_flag = False
if self.capture and self.capture.isOpened():
self.capture.release()
self.wait()
class AudioCaptureThread(QThread):
audio_data_signal = pyqtSignal(np.ndarray)
status_signal = pyqtSignal(str)
def __init__(self, rtsp_url):
super().__init__()
self.rtsp_url = rtsp_url
self._run_flag = True
self.audio_queue = Queue()
self.ffmpeg_process = None
def run(self):
try:
# 使用FFmpeg提取RTSP流中的音频
command = [
'ffmpeg',
'-i', self.rtsp_url, # 输入RTSP流
'-vn', # 不处理视频
'-ac', '1', # 单声道
'-ar', '44100', # 采样率
'-f', 's16le', # 输出格式为16位有符号小端PCM
'-loglevel', 'quiet', # 不输出日志
'pipe:1' # 输出到标准输出
]
self.status_signal.emit("正在连接摄像头音频流...")
self.ffmpeg_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.status_signal.emit("音频流已连接")
chunk_size = 1024 * 2 # 1024个样本 * 2字节/样本
while self._run_flag:
# 从FFmpeg输出读取原始音频数据
raw_audio = self.ffmpeg_process.stdout.read(chunk_size)
if not raw_audio:
break
# 将字节数据转换为numpy数组
audio_data = np.frombuffer(raw_audio, dtype=np.int16)
self.audio_data_signal.emit(audio_data)
except Exception as e:
self.status_signal.emit(f"音频错误: {str(e)}")
finally:
if self.ffmpeg_process:
self.ffmpeg_process.terminate()
self.ffmpeg_process.wait()
self.status_signal.emit("音频流已停止")
def stop(self):
self._run_flag = False
if self.ffmpeg_process:
self.ffmpeg_process.terminate()
self.wait()
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("海康威视监控系统 - 视频与音频分析")
self.setGeometry(100, 100, 1400, 900)
# 设置应用图标
self.setWindowIcon(QIcon("monitor_icon.png")) # 替换为实际图标路径
# 创建主部件和布局
central_widget = QWidget()
self.setCentralWidget(central_widget)
main_layout = QVBoxLayout(central_widget)
# 创建状态栏
self.status_bar = QStatusBar()
self.setStatusBar(self.status_bar)
self.status_bar.showMessage("就绪")
# 创建RTSP配置区域
config_group = QGroupBox("海康威视摄像头配置")
config_layout = QHBoxLayout()
config_layout.addWidget(QLabel("RTSP URL:"))
self.rtsp_url_edit = QLineEdit()
self.rtsp_url_edit.setPlaceholderText("rtsp://username:password@ip:port/Streaming/Channels/101")
self.rtsp_url_edit.setText("rtsp://admin:1q2w3e4r@192.168.1.64:554/Streaming/Channels/101")
config_layout.addWidget(self.rtsp_url_edit, 70)
self.test_btn = QPushButton("测试连接")
self.test_btn.setIcon(QIcon("test.png"))
config_layout.addWidget(self.test_btn)
config_layout.addWidget(QLabel("分辨率:"))
self.resolution_combo = QComboBox()
self.resolution_combo.addItems(["1280x720", "1920x1080", "640x480"])
self.resolution_combo.setCurrentIndex(0)
config_layout.addWidget(self.resolution_combo)
config_layout.addWidget(QLabel("码率:"))
self.bitrate_combo = QComboBox()
self.bitrate_combo.addItems(["2048 Kbps", "4096 Kbps", "8192 Kbps"])
self.bitrate_combo.setCurrentIndex(1)
config_layout.addWidget(self.bitrate_combo)
config_group.setLayout(config_layout)
main_layout.addWidget(config_group)
# 创建视频和音频部分 - 修改为垂直布局
media_layout = QVBoxLayout()
# 视频显示区域
video_group = QGroupBox("视频监控")
video_layout = QVBoxLayout()
self.video_label = QLabel()
self.video_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
self.video_label.setMinimumSize(1280, 720)
self.video_label.setStyleSheet("background-color: black;")
video_controls_layout = QHBoxLayout()
self.start_video_btn = QPushButton("开始视频")
self.start_video_btn.setIcon(QIcon("video_start.png"))
self.stop_video_btn = QPushButton("停止视频")
self.stop_video_btn.setIcon(QIcon("video_stop.png"))
self.stop_video_btn.setEnabled(False)
self.brightness_slider = QSlider(Qt.Orientation.Horizontal)
self.brightness_slider.setRange(0, 100)
self.brightness_slider.setValue(50)
self.brightness_slider.setTickPosition(QSlider.TickPosition.TicksBelow)
self.brightness_slider.setTickInterval(10)
self.contrast_slider = QSlider(Qt.Orientation.Horizontal)
self.contrast_slider.setRange(0, 100)
self.contrast_slider.setValue(50)
self.contrast_slider.setTickPosition(QSlider.TickPosition.TicksBelow)
self.contrast_slider.setTickInterval(10)
video_controls_layout.addWidget(self.start_video_btn)
video_controls_layout.addWidget(self.stop_video_btn)
video_controls_layout.addWidget(QLabel("亮度:"))
video_controls_layout.addWidget(self.brightness_slider)
video_controls_layout.addWidget(QLabel("对比度:"))
video_controls_layout.addWidget(self.contrast_slider)
video_layout.addWidget(self.video_label)
video_layout.addLayout(video_controls_layout)
video_group.setLayout(video_layout)
media_layout.addWidget(video_group, 70) # 视频占据70%空间
# 音频显示区域 - 移动到视频下方
audio_group = QGroupBox("音频波形")
audio_layout = QVBoxLayout()
# 音频波形图 - 修改高度限制和Y轴范围
self.audio_plot = pg.PlotWidget()
self.audio_plot.setTitle("摄像头音频波形")
self.audio_plot.setLabel('left', '振幅')
self.audio_plot.setLabel('bottom', '样本')
# 设置更大的Y轴范围并启用自动缩放
self.audio_plot.setYRange(-20000, 20000)
self.audio_plot.enableAutoRange('y', 0.95) # 保留5%的边界
self.audio_plot.setXRange(0, 1024)
self.audio_plot.setBackground('k')
self.audio_plot.setMinimumHeight(300) # 增加最小高度
self.audio_curve = self.audio_plot.plot(pen='y')
# 音频分析指标
audio_metrics_layout = QHBoxLayout()
self.volume_label = QLabel("音量: -- dB")
self.sample_rate_label = QLabel("采样率: -- Hz")
self.peak_label = QLabel("峰值: --")
for label in [self.volume_label, self.sample_rate_label, self.peak_label]:
label.setFont(QFont("Arial", 10))
audio_metrics_layout.addWidget(label)
audio_controls_layout = QHBoxLayout()
self.start_audio_btn = QPushButton("开始音频")
self.start_audio_btn.setIcon(QIcon("audio_start.png"))
self.stop_audio_btn = QPushButton("停止音频")
self.stop_audio_btn.setIcon(QIcon("audio_stop.png"))
self.stop_audio_btn.setEnabled(False)
self.volume_slider = QSlider(Qt.Orientation.Horizontal)
self.volume_slider.setRange(0, 100)
self.volume_slider.setValue(80)
self.volume_slider.setTickPosition(QSlider.TickPosition.TicksBelow)
self.volume_slider.setTickInterval(10)
audio_controls_layout.addWidget(self.start_audio_btn)
audio_controls_layout.addWidget(self.stop_audio_btn)
audio_controls_layout.addWidget(QLabel("音量:"))
audio_controls_layout.addWidget(self.volume_slider)
audio_layout.addWidget(self.audio_plot)
audio_layout.addLayout(audio_metrics_layout)
audio_layout.addLayout(audio_controls_layout)
audio_group.setLayout(audio_layout)
media_layout.addWidget(audio_group, 30) # 音频占据30%空间
main_layout.addLayout(media_layout, 70) # 整个媒体区域占据70%空间
# 信息显示区域
info_layout = QHBoxLayout()
# 系统状态
status_group = QGroupBox("系统状态")
status_layout = QVBoxLayout()
self.camera_status = QLabel("摄像头: 未连接")
self.audio_status = QLabel("音频: 未连接")
self.fps_label = QLabel("FPS: 0.0")
self.resolution_label = QLabel("分辨率: --")
self.bitrate_label = QLabel("码率: --")
self.cpu_label = QLabel("CPU使用率: --%")
self.mem_label = QLabel("内存使用: --MB")
status_labels = [
self.camera_status, self.audio_status, self.fps_label,
self.resolution_label, self.bitrate_label, self.cpu_label, self.mem_label
]
for label in status_labels:
label.setFont(QFont("Arial", 10))
status_layout.addWidget(label)
status_group.setLayout(status_layout)
# 操作日志
log_group = QGroupBox("操作日志")
log_layout = QVBoxLayout()
self.log_text = QTextEdit()
self.log_text.setReadOnly(True)
self.log_text.setFont(QFont("Consolas", 9))
log_layout.addWidget(self.log_text)
log_group.setLayout(log_layout)
info_layout.addWidget(status_group, 30)
info_layout.addWidget(log_group, 70)
main_layout.addLayout(info_layout, 30) # 信息区域占据30%空间
# 底部按钮
bottom_layout = QHBoxLayout()
self.record_btn = QPushButton("开始录制")
self.record_btn.setIcon(QIcon("record.png"))
self.snapshot_btn = QPushButton("截图")
self.snapshot_btn.setIcon(QIcon("camera.png"))
self.settings_btn = QPushButton("PTZ控制")
self.settings_btn.setIcon(QIcon("ptz.png"))
self.exit_btn = QPushButton("退出")
self.exit_btn.setIcon(QIcon("exit.png"))
self.exit_btn.setStyleSheet("background-color: #ff6666; color: white;")
bottom_layout.addWidget(self.record_btn)
bottom_layout.addWidget(self.snapshot_btn)
bottom_layout.addWidget(self.settings_btn)
bottom_layout.addStretch()
bottom_layout.addWidget(self.exit_btn)
main_layout.addLayout(bottom_layout)
# 初始化变量
self.video_thread = None
self.audio_thread = None
self.audio_data = np.zeros(1024, dtype=np.int16)
self.is_recording = False
self.video_writer = None
self.audio_writer = None
self.recording_start_time = 0
self.audio_frames = []
self.volume_history = []
# 连接信号和槽
self.start_video_btn.clicked.connect(self.start_video)
self.stop_video_btn.clicked.connect(self.stop_video)
self.start_audio_btn.clicked.connect(self.start_audio)
self.stop_audio_btn.clicked.connect(self.stop_audio)
self.snapshot_btn.clicked.connect(self.take_snapshot)
self.record_btn.clicked.connect(self.toggle_recording)
self.exit_btn.clicked.connect(self.close)
self.test_btn.clicked.connect(self.test_connection)
self.brightness_slider.valueChanged.connect(self.adjust_brightness)
self.contrast_slider.valueChanged.connect(self.adjust_contrast)
# 音频波形更新定时器
self.audio_timer = QTimer()
self.audio_timer.timeout.connect(self.update_audio_plot)
self.audio_timer.start(50) # 20Hz更新
# 系统状态更新定时器
self.status_timer = QTimer()
self.status_timer.timeout.connect(self.update_system_status)
self.status_timer.start(1000) # 1秒更新一次
# 录制定时器
self.record_timer = QTimer()
self.record_timer.timeout.connect(self.update_recording_time)
# 添加日志
self.log_message("应用程序已启动")
self.log_message("使用FFmpeg从海康威视摄像头获取音频流")
def start_video(self):
if self.video_thread is None:
rtsp_url = self.rtsp_url_edit.text().strip()
if not rtsp_url:
QMessageBox.warning(self, "配置错误", "请输入有效的RTSP URL")
return
self.video_thread = VideoThread(rtsp_url)
self.video_thread.change_pixmap.connect(self.set_image)
self.video_thread.fps_signal.connect(self.update_fps)
self.video_thread.status_signal.connect(self.update_camera_status)
self.video_thread.start()
self.start_video_btn.setEnabled(False)
self.stop_video_btn.setEnabled(True)
self.log_message(f"视频流启动: {rtsp_url}")
def stop_video(self):
if self.video_thread:
self.video_thread.stop()
self.video_thread = None
self.start_video_btn.setEnabled(True)
self.stop_video_btn.setEnabled(False)
self.camera_status.setText("摄像头: 未连接")
self.fps_label.setText("FPS: 0.0")
self.log_message("视频流已停止")
def start_audio(self):
if self.audio_thread is None:
rtsp_url = self.rtsp_url_edit.text().strip()
if not rtsp_url:
QMessageBox.warning(self, "配置错误", "请输入有效的RTSP URL")
return
self.audio_thread = AudioCaptureThread(rtsp_url)
self.audio_thread.audio_data_signal.connect(self.update_audio_data)
self.audio_thread.status_signal.connect(self.update_audio_status)
self.audio_thread.start()
self.start_audio_btn.setEnabled(False)
self.stop_audio_btn.setEnabled(True)
self.log_message("音频流启动中...")
def stop_audio(self):
if self.audio_thread:
self.audio_thread.stop()
self.audio_thread = None
self.start_audio_btn.setEnabled(True)
self.stop_audio_btn.setEnabled(False)
self.audio_status.setText("音频: 未连接")
self.log_message("音频流已停止")
def set_image(self, image):
self.video_label.setPixmap(QPixmap.fromImage(image))
def update_audio_data(self, data):
self.audio_data = data
# 计算音量 (dB)
if len(data) > 0:
rms = np.sqrt(np.mean(np.square(data.astype(np.float32))))
if rms > 0:
db = 20 * np.log10(rms / 32768.0)
self.volume_label.setText(f"音量: {db:.1f} dB")
self.volume_history.append(db)
if len(self.volume_history) > 10:
self.volume_history.pop(0)
# 计算峰值
peak = np.max(np.abs(data))
self.peak_label.setText(f"峰值: {peak}")
# 动态调整Y轴范围
max_peak = max(5000, peak * 1.2) # 确保最小值
self.audio_plot.setYRange(-max_peak, max_peak)
# 更新采样率标签
self.sample_rate_label.setText("采样率: 44100 Hz")
def update_audio_plot(self):
if self.audio_thread:
# 绘制音频波形
x = np.arange(len(self.audio_data))
self.audio_curve.setData(x, self.audio_data)
def update_fps(self, fps):
self.fps_label.setText(f"FPS: {fps:.1f}")
def update_camera_status(self, message):
self.camera_status.setText(f"摄像头: {message}")
self.status_bar.showMessage(message, 3000)
self.log_message(f"摄像头状态: {message}")
def update_audio_status(self, message):
self.audio_status.setText(f"音频: {message}")
self.status_bar.showMessage(message, 3000)
self.log_message(f"音频状态: {message}")
def update_system_status(self):
# 模拟系统状态更新
self.cpu_label.setText("CPU使用率: 32%")
self.mem_label.setText("内存使用: 245MB")
# 更新分辨率
resolution = self.resolution_combo.currentText()
self.resolution_label.setText(f"分辨率: {resolution}")
# 更新码率
bitrate = self.bitrate_combo.currentText()
self.bitrate_label.setText(f"码率: {bitrate}")
# 计算平均音量
if self.volume_history:
avg_volume = np.mean(self.volume_history)
self.status_bar.showMessage(f"平均音量: {avg_volume:.1f} dB", 1000)
def take_snapshot(self):
if self.video_thread and self.video_label.pixmap():
pixmap = self.video_label.pixmap()
filename = f"snapshot_{time.strftime('%Y%m%d_%H%M%S')}.jpg"
pixmap.save(filename)
self.log_message(f"截图已保存: {filename}")
self.status_bar.showMessage(f"截图已保存: {filename}", 3000)
def toggle_recording(self):
self.is_recording = not self.is_recording
if self.is_recording:
self.start_recording()
else:
self.stop_recording()
def start_recording(self):
try:
# 视频录制
video_filename = f"recording_{time.strftime('%Y%m%d_%H%M%S')}.avi"
fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.video_writer = cv2.VideoWriter(video_filename, fourcc, 20.0, (1280, 720))
# 音频录制
audio_filename = f"recording_{time.strftime('%Y%m%d_%H%M%S')}.wav"
self.audio_frames = []
self.record_btn.setText("停止录制")
self.record_btn.setIcon(QIcon("record_stop.png"))
self.recording_start_time = time.time()
self.record_timer.start(1000)
self.log_message("开始录制视频和音频...")
self.log_message(f"视频文件: {video_filename}")
self.log_message(f"音频文件: {audio_filename}")
except Exception as e:
self.log_message(f"录制错误: {str(e)}")
self.is_recording = False
def stop_recording(self):
if self.video_writer:
self.video_writer.release()
self.video_writer = None
if self.audio_frames:
audio_filename = f"recording_{time.strftime('%Y%m%d_%H%M%S')}.wav"
wf = wave.open(audio_filename, 'wb')
wf.setnchannels(1)
wf.setsampwidth(2) # 16-bit = 2 bytes
wf.setframerate(44100)
wf.writeframes(b''.join(self.audio_frames))
wf.close()
self.audio_frames = []
self.log_message(f"音频已保存: {audio_filename}")
self.record_btn.setText("开始录制")
self.record_btn.setIcon(QIcon("record.png"))
self.record_timer.stop()
self.log_message("录制已停止")
def update_recording_time(self):
elapsed = int(time.time() - self.recording_start_time)
mins, secs = divmod(elapsed, 60)
self.status_bar.showMessage(f"录制中: {mins:02d}:{secs:02d}")
def test_connection(self):
rtsp_url = self.rtsp_url_edit.text().strip()
if not rtsp_url:
QMessageBox.warning(self, "配置错误", "请输入有效的RTSP URL")
return
self.log_message(f"测试连接: {rtsp_url}")
cap = cv2.VideoCapture(rtsp_url, cv2.CAP_FFMPEG)
if cap.isOpened():
ret, frame = cap.read()
if ret:
self.log_message("连接测试成功: 摄像头响应正常")
QMessageBox.information(self, "连接测试", "摄像头连接成功!")
else:
self.log_message("连接测试失败: 无法读取视频帧")
QMessageBox.warning(self, "连接测试", "连接成功但无法读取视频帧")
else:
self.log_message("连接测试失败: 无法打开摄像头")
QMessageBox.critical(self, "连接测试", "无法连接摄像头")
cap.release()
def adjust_brightness(self, value):
# 在实际应用中,这里应该通过SDK或API调整摄像头亮度
self.log_message(f"调整亮度: {value}")
self.status_bar.showMessage(f"亮度调整为: {value}", 2000)
def adjust_contrast(self, value):
# 在实际应用中,这里应该通过SDK或API调整摄像头对比度
self.log_message(f"调整对比度: {value}")
self.status_bar.showMessage(f"对比度调整为: {value}", 2000)
def log_message(self, message):
timestamp = time.strftime("%H:%M:%S")
self.log_text.append(f"[{timestamp}] {message}")
self.log_text.verticalScrollBar().setValue(self.log_text.verticalScrollBar().maximum())
def closeEvent(self, event):
self.stop_video()
self.stop_audio()
if self.is_recording:
self.stop_recording()
event.accept()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec()) 在这个代码基础上增加声音播放