修改系统默认volume_*_speaker的值

本文详细解析了在Android系统中如何调整音视频编解码的默认参数和最大范围,包括修改AudioSystem.java中的DEFAULT_STREAM_VOLUME数组和AudioService.java中的MAX_STREAM_VOLUME数组,以实现更精细的音视频音量控制。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

【原因分析】

需要修改默认的Audio值和调节范围

music : 0- 15(U50) default: 5 pepito: 0- 15 default: 5 KO 
notification : 0 - 7(U50) default :5 pepito : 0 -15 default: 9 KO 
Alarm : 0 - 7(U50) default :6 pepito : 0 -15 default: 11 KO 
system : 0 - 7(U50) default :5 pepito : 0 -7 default: 5 KO

【解决方案】

  • 在/platform/frameworks/base/media/java/android/media/AudioSystem.java 中修改DEFAULT_STREAM_VOLUME数组默认值
    public static int[] DEFAULT_STREAM_VOLUME = new int[] {
        4,  // STREAM_VOICE_CALL
        11,  // STREAM_SYSTEM
        5,  // STREAM_RING
        7, // STREAM_MUSIC
        11,  // STREAM_ALARM
        9,  // STREAM_NOTIFICATION
        7,  // STREAM_BLUETOOTH_SCO
        7,  // STREAM_SYSTEM_ENFORCED
        5, // STREAM_DTMF
        5, // STREAM_TTS
        5, // STREAM_ACCESSIBILITY
    };
  • 在platform/frameworks/base/services/core/java/com/android/server/audio/AudioService.java 中修改MAX_STREAM_VOLUME数组范围
private static int[] MAX_STREAM_VOLUME = new int[] {
        8,  // STREAM_VOICE_CALL PR6931858 ling.yi@tcl.com add voice volume level 5-->8
        15,  // STREAM_SYSTEM
        7,  // STREAM_RING
        15, // STREAM_MUSIC
        15,  // STREAM_ALARM
        15,  // STREAM_NOTIFICATION
        15, // STREAM_BLUETOOTH_SCO
        7,  // STREAM_SYSTEM_ENFORCED
        15, // STREAM_DTMF
        15, // STREAM_TTS
        15  // STREAM_ACCESSIBILITY
    };
  • 但是需要注意一下,在AudioService.java的构造方法中会重新修改STREAM_MUSIC的默认值。我这边是给ro.config.media_vol_default写入了默认值7,否则就要在下面的方法中手动赋默认值
    int defaultMusicVolume = SystemProperties.getInt("ro.config.media_vol_default", -1);
        if (defaultMusicVolume != -1 &&
                defaultMusicVolume <= MAX_STREAM_VOLUME[AudioSystem.STREAM_MUSIC]) {
            AudioSystem.DEFAULT_STREAM_VOLUME[AudioSystem.STREAM_MUSIC] = defaultMusicVolume;
        } else {
            if (isPlatformTelevision()) {
                AudioSystem.DEFAULT_STREAM_VOLUME[AudioSystem.STREAM_MUSIC] =
                        MAX_STREAM_VOLUME[AudioSystem.STREAM_MUSIC] / 4;
            } else {
                AudioSystem.DEFAULT_STREAM_VOLUME[AudioSystem.STREAM_MUSIC] =
                        MAX_STREAM_VOLUME[AudioSystem.STREAM_MUSIC] / 3;
            }
检查并优化代码: import sys import os import json import time import wave import numpy as np import pandas as pd import matplotlib.pyplot as plt import soundfile as sf from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QLineEdit, QTextEdit, QFileDialog, QProgressBar, QGroupBox, QComboBox, QCheckBox) from PyQt5.QtCore import QThread, pyqtSignal from pydub import AudioSegment from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification import whisper from pyannote.audio import Pipeline from docx import Document from docx.shared import Inches import librosa import tempfile from collections import defaultdict import re class AnalysisThread(QThread): progress = pyqtSignal(int) message = pyqtSignal(str) analysis_complete = pyqtSignal(dict) error = pyqtSignal(str) def __init__(self, audio_files, keyword_file, whisper_model_path, pyannote_model_path, emotion_model_path): super().__init__() self.audio_files = audio_files self.keyword_file = keyword_file self.whisper_model_path = whisper_model_path self.pyannote_model_path = pyannote_model_path self.emotion_model_path = emotion_model_path self.running = True self.cached_models = {} def run(self): try: # 加载关键词 self.message.emit("正在加载关键词...") keywords = self.load_keywords() # 预加载模型 self.message.emit("正在预加载模型...") self.preload_models() results = [] total_files = len(self.audio_files) for idx, audio_file in enumerate(self.audio_files): if not self.running: self.message.emit("分析已停止") return self.message.emit(f"正在处理文件: {os.path.basename(audio_file)} ({idx + 1}/{total_files})") file_result = self.analyze_file(audio_file, keywords) if file_result: results.append(file_result) self.progress.emit(int((idx + 1) / total_files * 100)) self.analysis_complete.emit({"results": results, "keywords": keywords}) self.message.emit("分析完成!") except Exception as e: import traceback self.error.emit(f"分析过程中发生错误: {str(e)}\n{traceback.format_exc()}") def preload_models(self): """预加载所有模型到缓存""" # 检查是否已加载模型 if hasattr(self, 'cached_models') and self.cached_models: return self.cached_models = {} # 加载语音识别模型 if 'whisper' not in self.cached_models: self.message.emit("正在加载语音识别模型...") self.cached_models['whisper'] = whisper.load_model(self.whisper_model_path) # 加载说话人分离模型 if 'pyannote' not in self.cached_models: self.message.emit("正在加载说话人分离模型...") self.cached_models['pyannote'] = Pipeline.from_pretrained(self.pyannote_model_path) # 加载情感分析模型 if 'emotion_classifier' not in self.cached_models: self.message.emit("正在加载情感分析模型...") tokenizer = AutoTokenizer.from_pretrained(self.emotion_model_path) model = AutoModelForSequenceClassification.from_pretrained(self.emotion_model_path) self.cached_models['emotion_classifier'] = pipeline( "text-classification", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1 # 使用GPU如果可用 ) def analyze_file(self, audio_file, keywords): """分析单个音频文件""" try: # 确保音频为WAV格式 wav_file = self.convert_to_wav(audio_file) # 获取音频信息 duration, sample_rate, channels = self.get_audio_info(wav_file) # 说话人分离 diarization = self.cached_models['pyannote'](wav_file) # 识别客服和客户(使用改进的方法) agent_segments, customer_segments = self.identify_speakers(wav_file, diarization, keywords['opening']) # 语音识别(使用优化后的方法) whisper_model = self.cached_models['whisper'] agent_text = self.transcribe_audio(wav_file, agent_segments, whisper_model) customer_text = self.transcribe_audio(wav_file, customer_segments, whisper_model) # 情感分析 emotion_classifier = self.cached_models['emotion_classifier'] agent_emotion = self.analyze_emotion(agent_text, emotion_classifier) customer_emotion = self.analyze_emotion(customer_text, emotion_classifier) # 服务规范检查 opening_check = self.check_opening(agent_text, keywords['opening']) closing_check = self.check_closing(agent_text, keywords['closing']) forbidden_check = self.check_forbidden(agent_text, keywords['forbidden']) # 沟通技巧分析(使用改进的方法) speech_rate = self.analyze_speech_rate(wav_file, agent_segments) volume_analysis = self.analyze_volume(wav_file, agent_segments) # 问题解决率分析 resolution_rate = self.analyze_resolution(agent_text, customer_text, keywords['resolution']) # 构建结果 return { "file_name": os.path.basename(audio_file), "duration": duration, "agent_text": agent_text, "customer_text": customer_text, "opening_check": opening_check, "closing_check": closing_check, "forbidden_check": forbidden_check, "agent_emotion": agent_emotion, "customer_emotion": customer_emotion, "speech_rate": speech_rate, "volume_mean": volume_analysis['mean'], "volume_std": volume_analysis['std'], "resolution_rate": resolution_rate } except Exception as e: self.error.emit(f"处理文件 {os.path.basename(audio_file)} 时出错: {str(e)}") return None def load_keywords(self): """从Excel文件加载关键词""" try: df = pd.read_excel(self.keyword_file) keywords = { "opening": [str(k).strip() for k in df['opening'].dropna().tolist()], "closing": [str(k).strip() for k in df['closing'].dropna().tolist()], "forbidden": [str(k).strip() for k in df['forbidden'].dropna().tolist()], "resolution": [str(k).strip() for k in df['resolution'].dropna().tolist()] } return keywords except Exception as e: raise Exception(f"加载关键词文件失败: {str(e)}") def convert_to_wav(self, audio_file): """将音频文件转换为WAV格式(如果需要)""" try: if not audio_file.lower().endswith('.wav'): # 使用临时文件避免磁盘IO with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmpfile: output_file = tmpfile.name audio = AudioSegment.from_file(audio_file) audio.export(output_file, format='wav') return output_file return audio_file except Exception as e: raise Exception(f"音频转换失败: {str(e)}") def get_audio_info(self, wav_file): """获取音频文件信息""" try: with wave.open(wav_file, 'rb') as wf: frames = wf.getnframes() rate = wf.getframerate() channels = wf.getnchannels() duration = frames / float(rate) return duration, rate, channels except Exception as e: raise Exception(f"获取音频信息失败: {str(e)}") def identify_speakers(self, wav_file, diarization, opening_keywords): """改进的客服识别方法 - 检查前三个片段是否有开场白关键词""" speaker_segments = defaultdict(list) for segment, _, speaker in diarization.itertracks(yield_label=True): speaker_segments[speaker].append((segment.start, segment.end)) # 如果没有说话人 if not speaker_segments: return [], [] # 如果只有一个说话人 if len(speaker_segments) == 1: speaker = list(speaker_segments.keys())[0] return speaker_segments[speaker], [] # 检查每个说话人的前三个片段是否有开场白 speaker_scores = {} whisper_model = self.cached_models['whisper'] for speaker, segments in speaker_segments.items(): score = 0 # 取前三个片段(或所有片段如果少于3个) check_segments = segments[:3] for start, end in check_segments: # 转录片段 text = self.transcribe_audio_segment(wav_file, [(start, end)], whisper_model) # 检查开场白关键词 for keyword in opening_keywords: if keyword and keyword in text: score += 1 break speaker_scores[speaker] = score # 找到得分最高的说话人作为客服 agent_speaker = max(speaker_scores, key=speaker_scores.get) agent_segments = [] customer_segments = [] for speaker, segments in speaker_segments.items(): if speaker == agent_speaker: agent_segments = segments else: customer_segments.extend(segments) return agent_segments, customer_segments def transcribe_audio_segment(self, wav_file, segments, model): """转录单个音频片段 - 用于客服识别""" if not segments: return "" # 使用pydub加载音频 audio = AudioSegment.from_wav(wav_file) start, end = segments[0] # 转换为毫秒 start_ms = int(start * 1000) end_ms = int(end * 1000) segment_audio = audio[start_ms:end_ms] # 使用临时文件 with tempfile.NamedTemporaryFile(suffix='.wav') as tmpfile: segment_audio.export(tmpfile.name, format="wav") result = model.transcribe(tmpfile.name) return result['text'] def transcribe_audio(self, wav_file, segments, model): """优化后的转录方法 - 按片段转录""" if not segments: return "" # 使用pydub加载音频 audio = AudioSegment.from_wav(wav_file) full_text = "" # 只处理指定片段 for start, end in segments: # 转换为毫秒 start_ms = int(start * 1000) end_ms = int(end * 1000) segment_audio = audio[start_ms:end_ms] # 使用临时文件避免内存占用 with tempfile.NamedTemporaryFile(suffix='.wav') as tmpfile: segment_audio.export(tmpfile.name, format="wav") result = model.transcribe(tmpfile.name) full_text += result['text'] + " " return full_text.strip() def analyze_emotion(self, text, classifier): """分析文本情感""" if not text.strip(): return {"label": "中性", "score": 0.0} # 截断长文本以提高性能 if len(text) > 500: text = text[:500] result = classifier(text, truncation=True, max_length=512) return { "label": result[0]['label'], "score": result[0]['score'] } def check_opening(self, text, opening_keywords): """检查开场白""" return any(keyword in text for keyword in opening_keywords if keyword) def check_closing(self, text, closing_keywords): """检查结束语""" return any(keyword in text for keyword in closing_keywords if keyword) def check_forbidden(self, text, forbidden_keywords): """检查服务禁语""" return any(keyword in text for keyword in forbidden_keywords if keyword) def analyze_speech_rate(self, wav_file, segments): """改进的语速分析 - 基于实际识别文本""" if not segments: return 0 # 加载音频 y, sr = librosa.load(wav_file, sr=None) total_chars = 0 total_duration = 0 whisper_model = self.cached_models['whisper'] for start, end in segments: # 计算片段时长(秒) duration = end - start total_duration += duration # 转录片段 text = self.transcribe_audio_segment(wav_file, [(start, end)], whisper_model) # 计算中文字符数(去除标点和空格) chinese_chars = sum(1 for char in text if '\u4e00' <= char <= '\u9fff') total_chars += chinese_chars if total_duration == 0: return 0 # 语速 = 总字数 / 总时长(分钟) return total_chars / (total_duration / 60) def analyze_volume(self, wav_file, segments): """改进的音量分析 - 使用librosa计算RMS分贝""" if not segments: return {"mean": -60, "std": 0} # 加载音频 y, sr = librosa.load(wav_file, sr=None) all_dB = [] for start, end in segments: start_sample = int(start * sr) end_sample = int(end * sr) segment_audio = y[start_sample:end_sample] # 计算RMS并转换为dB rms = librosa.feature.rms(y=segment_audio)[0] dB = librosa.amplitude_to_db(rms, ref=np.max) all_dB.extend(dB) if not all_dB: return {"mean": -60, "std": 0} return { "mean": float(np.mean(all_dB)), "std": float(np.std(all_dB)) } def analyze_resolution(self, agent_text, customer_text, resolution_keywords): """分析问题解决率""" return any(keyword in agent_text for keyword in resolution_keywords if keyword) def stop(self): """停止分析""" self.running = False class MainWindow(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("外呼电话录音包质检分析系统") self.setGeometry(100, 100, 1000, 700) # 初始化变量 self.audio_files = [] self.keyword_file = "" self.whisper_model_path = "./models/whisper-small" self.pyannote_model_path = "./models/pyannote-speaker-diarization" self.emotion_model_path = "./models/Erlangshen-Roberta-110M-Sentiment" self.output_dir = "./reports" # 创建主控件 central_widget = QWidget() self.setCentralWidget(central_widget) main_layout = QVBoxLayout(central_widget) # 文件选择区域 file_group = QGroupBox("文件选择") file_layout = QVBoxLayout(file_group) # 音频文件选择 audio_layout = QHBoxLayout() self.audio_label = QLabel("音频文件/文件夹:") audio_layout.addWidget(self.audio_label) self.audio_path_edit = QLineEdit() audio_layout.addWidget(self.audio_path_edit) self.audio_browse_btn = QPushButton("浏览...") self.audio_browse_btn.clicked.connect(self.browse_audio) audio_layout.addWidget(self.audio_browse_btn) file_layout.addLayout(audio_layout) # 关键词文件选择 keyword_layout = QHBoxLayout() self.keyword_label = QLabel("关键词文件:") keyword_layout.addWidget(self.keyword_label) self.keyword_path_edit = QLineEdit() keyword_layout.addWidget(self.keyword_path_edit) self.keyword_browse_btn = QPushButton("浏览...") self.keyword_browse_btn.clicked.connect(self.browse_keyword) keyword_layout.addWidget(self.keyword_browse_btn) file_layout.addLayout(keyword_layout) main_layout.addWidget(file_group) # 模型设置区域 model_group = QGroupBox("模型设置") model_layout = QVBoxLayout(model_group) # Whisper模型路径 whisper_layout = QHBoxLayout() whisper_layout.addWidget(QLabel("Whisper模型路径:")) self.whisper_edit = QLineEdit(self.whisper_model_path) whisper_layout.addWidget(self.whisper_edit) model_layout.addLayout(whisper_layout) # Pyannote模型路径 pyannote_layout = QHBoxLayout() pyannote_layout.addWidget(QLabel("Pyannote模型路径:")) self.pyannote_edit = QLineEdit(self.pyannote_model_path) pyannote_layout.addWidget(self.pyannote_edit) model_layout.addLayout(pyannote_layout) # 情感分析模型路径 emotion_layout = QHBoxLayout() emotion_layout.addWidget(QLabel("情感分析模型路径:")) self.emotion_edit = QLineEdit(self.emotion_model_path) emotion_layout.addWidget(self.emotion_edit) model_layout.addLayout(emotion_layout) # 输出目录 output_layout = QHBoxLayout() output_layout.addWidget(QLabel("输出目录:")) self.output_edit = QLineEdit(self.output_dir) output_layout.addWidget(self.output_edit) self.output_browse_btn = QPushButton("浏览...") self.output_browse_btn.clicked.connect(self.browse_output) output_layout.addWidget(self.output_browse_btn) model_layout.addLayout(output_layout) main_layout.addWidget(model_group) # 控制按钮区域 control_layout = QHBoxLayout() self.start_btn = QPushButton("开始分析") self.start_btn.clicked.connect(self.start_analysis) control_layout.addWidget(self.start_btn) self.stop_btn = QPushButton("停止分析") self.stop_btn.clicked.connect(self.stop_analysis) self.stop_btn.setEnabled(False) control_layout.addWidget(self.stop_btn) self.clear_btn = QPushButton("清空") self.clear_btn.clicked.connect(self.clear_all) control_layout.addWidget(self.clear_btn) main_layout.addLayout(control_layout) # 进度条 self.progress_bar = QProgressBar() self.progress_bar.setValue(0) main_layout.addWidget(self.progress_bar) # 日志输出区域 log_group = QGroupBox("分析日志") log_layout = QVBoxLayout(log_group) self.log_text = QTextEdit() self.log_text.setReadOnly(True) log_layout.addWidget(self.log_text) main_layout.addWidget(log_group) # 状态区域 status_layout = QHBoxLayout() self.status_label = QLabel("就绪") status_layout.addWidget(self.status_label) self.file_count_label = QLabel("已选择0个音频文件") status_layout.addWidget(self.file_count_label) main_layout.addLayout(status_layout) # 初始化分析线程 self.analysis_thread = None def browse_audio(self): """浏览音频文件或文件夹""" options = QFileDialog.Options() files, _ = QFileDialog.getOpenFileNames( self, "选择音频文件", "", "音频文件 (*.mp3 *.wav *.amr *.ogg *.flac);;所有文件 (*)", options=options ) if files: self.audio_files = files self.audio_path_edit.setText("; ".join(files)) self.file_count_label.setText(f"已选择{len(files)}个音频文件") self.log_text.append(f"已选择{len(files)}个音频文件") def browse_keyword(self): """浏览关键词文件""" options = QFileDialog.Options() file, _ = QFileDialog.getOpenFileName( self, "选择关键词文件", "", "Excel文件 (*.xlsx *.xls);;所有文件 (*)", options=options ) if file: self.keyword_file = file self.keyword_path_edit.setText(file) self.log_text.append(f"已选择关键词文件: {file}") def browse_output(self): """浏览输出目录""" options = QFileDialog.Options() directory = QFileDialog.getExistingDirectory( self, "选择输出目录", "", options=options ) if directory: self.output_dir = directory self.output_edit.setText(directory) self.log_text.append(f"输出目录设置为: {directory}") def start_analysis(self): """开始分析""" if not self.audio_files: self.log_text.append("错误: 请先选择音频文件") return if not self.keyword_file: self.log_text.append("错误: 请先选择关键词文件") return # 更新模型路径 self.whisper_model_path = self.whisper_edit.text() self.pyannote_model_path = self.pyannote_edit.text() self.emotion_model_path = self.emotion_edit.text() self.output_dir = self.output_edit.text() # 创建输出目录 os.makedirs(self.output_dir, exist_ok=True) self.log_text.append("开始分析...") self.start_btn.setEnabled(False) self.stop_btn.setEnabled(True) self.status_label.setText("分析中...") self.progress_bar.setValue(0) # 创建并启动分析线程 self.analysis_thread = AnalysisThread( self.audio_files, self.keyword_file, self.whisper_model_path, self.pyannote_model_path, self.emotion_model_path ) self.analysis_thread.progress.connect(self.progress_bar.setValue) self.analysis_thread.message.connect(self.log_text.append) self.analysis_thread.analysis_complete.connect(self.on_analysis_complete) self.analysis_thread.error.connect(self.on_analysis_error) self.analysis_thread.finished.connect(self.on_analysis_finished) self.analysis_thread.start() def stop_analysis(self): """停止分析""" if self.analysis_thread and self.analysis_thread.isRunning(): self.analysis_thread.stop() self.log_text.append("正在停止分析...") self.stop_btn.setEnabled(False) def clear_all(self): """清空所有内容""" self.audio_files = [] self.keyword_file = "" self.audio_path_edit.clear() self.keyword_path_edit.clear() self.log_text.clear() self.progress_bar.setValue(0) self.status_label.setText("就绪") self.file_count_label.setText("已选择0个音频文件") self.log_text.append("已清空所有内容") def on_analysis_complete(self, result): """分析完成处理""" try: self.log_text.append("正在生成报告...") if not result.get("results"): self.log_text.append("警告: 没有生成任何分析结果") return # 生成Excel报告 excel_path = os.path.join(self.output_dir, "质检分析报告.xlsx") self.generate_excel_report(result, excel_path) # 生成Word报告 word_path = os.path.join(self.output_dir, "质检分析报告.docx") self.generate_word_report(result, word_path) self.log_text.append(f"分析报告已保存至: {excel_path}") self.log_text.append(f"可视化报告已保存至: {word_path}") self.log_text.append("分析完成!") self.status_label.setText(f"分析完成!报告保存至: {self.output_dir}") except Exception as e: import traceback self.log_text.append(f"生成报告时出错: {str(e)}\n{traceback.format_exc()}") def on_analysis_error(self, message): """分析错误处理""" self.log_text.append(f"错误: {message}") self.status_label.setText("发生错误") def on_analysis_finished(self): """分析线程结束处理""" self.start_btn.setEnabled(True) self.stop_btn.setEnabled(False) def generate_excel_report(self, result, output_path): """生成Excel报告""" # 从结果中提取数据 data = [] for res in result['results']: data.append({ "文件名": res['file_name'], "音频时长(秒)": res['duration'], "开场白检查": "通过" if res['opening_check'] else "未通过", "结束语检查": "通过" if res['closing_check'] else "未通过", "服务禁语检查": "通过" if not res['forbidden_check'] else "未通过", "客服情感": res['agent_emotion']['label'], "客服情感得分": res['agent_emotion']['score'], "客户情感": res['customer_emotion']['label'], "客户情感得分": res['customer_emotion']['score'], "语速(字/分)": res['speech_rate'], "平均音量(dB)": res['volume_mean'], "音量标准差": res['volume_std'], "问题解决率": "是" if res['resolution_rate'] else "否" }) # 创建DataFrame并保存 df = pd.DataFrame(data) df.to_excel(output_path, index=False) # 添加汇总统计 try: with pd.ExcelWriter(output_path, engine='openpyxl', mode='a', if_sheet_exists='replace') as writer: summary_data = { "统计项": ["总文件数", "开场白通过率", "结束语通过率", "服务禁语通过率", "问题解决率"], "数": [ len(result['results']), df['开场白检查'].value_counts().get('通过', 0) / len(df), df['结束语检查'].value_counts().get('通过', 0) / len(df), df['服务禁语检查'].value_counts().get('通过', 0) / len(df), df['问题解决率'].value_counts().get('是', 0) / len(df) ] } summary_df = pd.DataFrame(summary_data) summary_df.to_excel(writer, sheet_name='汇总统计', index=False) except Exception as e: self.log_text.append(f"添加汇总统计时出错: {str(e)}") def generate_word_report(self, result, output_path): """生成Word报告""" doc = Document() # 添加标题 doc.add_heading('外呼电话录音质检分析报告', 0) # 添加基本信息 doc.add_heading('分析概况', level=1) doc.add_paragraph(f"分析时间: {time.strftime('%Y-%m-%d %H:%M:%S')}") doc.add_paragraph(f"分析文件数量: {len(result['results'])}") doc.add_paragraph(f"关键词文件: {os.path.basename(self.keyword_file)}") # 添加汇总统计 doc.add_heading('汇总统计', level=1) # 创建汇总表格 table = doc.add_table(rows=5, cols=2) table.style = 'Table Grid' # 表头 hdr_cells = table.rows[0].cells hdr_cells[0].text = '统计项' hdr_cells[1].text = '数' # 计算统计数据 df = pd.DataFrame(result['results']) pass_rates = { "开场白通过率": df['opening_check'].mean() if not df.empty else 0, "结束语通过率": df['closing_check'].mean() if not df.empty else 0, "服务禁语通过率": (1 - df['forbidden_check']).mean() if not df.empty else 0, "问题解决率": df['resolution_rate'].mean() if not df.empty else 0 } # 填充表格 rows = [ ("总文件数", len(result['results'])), ("开场白通过率", f"{pass_rates['开场白通过率']:.2%}"), ("结束语通过率", f"{pass_rates['结束语通过率']:.2%}"), ("服务禁语通过率", f"{pass_rates['服务禁语通过率']:.2%}"), ("问题解决率", f"{pass_rates['问题解决率']:.2%}") ] for i, row_data in enumerate(rows): if i < len(table.rows): row_cells = table.rows[i].cells row_cells[0].text = row_data[0] row_cells[1].text = str(row_data[1]) # 添加情感分析图表 if result['results']: doc.add_heading('情感分析', level=1) # 客服情感分布 agent_emotions = [res['agent_emotion']['label'] for res in result['results']] agent_emotion_counts = pd.Series(agent_emotions).value_counts() if not agent_emotion_counts.empty: fig, ax = plt.subplots(figsize=(6, 4)) agent_emotion_counts.plot.pie(autopct='%1.1f%%', ax=ax) ax.set_title('客服情感分布') plt.tight_layout() # 保存图表到临时文件 chart_path = os.path.join(self.output_dir, "agent_emotion_chart.png") plt.savefig(chart_path, dpi=100) plt.close() doc.add_picture(chart_path, width=Inches(4)) doc.add_paragraph('图1: 客服情感分布') # 客户情感分布 customer_emotions = [res['customer_emotion']['label'] for res in result['results']] customer_emotion_counts = pd.Series(customer_emotions).value_counts() if not customer_emotion_counts.empty: fig, ax = plt.subplots(figsize=(6, 4)) customer_emotion_counts.plot.pie(autopct='%1.1f%%', ax=ax) ax.set_title('客户情感分布') plt.tight_layout() chart_path = os.path.join(self.output_dir, "customer_emotion_chart.png") plt.savefig(chart_path, dpi=100) plt.close() doc.add_picture(chart_path, width=Inches(4)) doc.add_paragraph('图2: 客户情感分布') # 添加详细分析结果 doc.add_heading('详细分析结果', level=1) # 创建详细表格 table = doc.add_table(rows=1, cols=6) table.style = 'Table Grid' # 表头 hdr_cells = table.rows[0].cells headers = ['文件名', '开场白', '结束语', '禁语', '客服情感', '问题解决'] for i, header in enumerate(headers): hdr_cells[i].text = header # 填充数据 for res in result['results']: row_cells = table.add_row().cells row_cells[0].text = res['file_name'] row_cells[1].text = "✓" if res['opening_check'] else "✗" row_cells[2].text = "✓" if res['closing_check'] else "✗" row_cells[3].text = "✗" if res['forbidden_check'] else "✓" row_cells[4].text = res['agent_emotion']['label'] row_cells[5].text = "✓" if res['resolution_rate'] else "✗" # 保存文档 doc.save(output_path) if __name__ == "__main__": # 检查是否安装了torch try: import torch except ImportError: print("警告: PyTorch 未安装,情感分析可能无法使用GPU加速") app = QApplication(sys.argv) window = MainWindow() window.show() sys.exit(app.exec_())
07-16
修改代码,将音量分析修改为仅针对客服部分,注意修改后的整体变化: import os import sys import time import json import traceback import numpy as np import pandas as pd import torch import librosa import jieba import tempfile from pydub import AudioSegment from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer from pyannote.audio import Pipeline from concurrent.futures import ThreadPoolExecutor, as_completed from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QFileDialog, QTextEdit, QProgressBar, QGroupBox, QCheckBox, QListWidget, QMessageBox) from PyQt5.QtCore import QThread, pyqtSignal, Qt, QTimer from PyQt5.QtGui import QFont from docx import Document from docx.shared import Inches import matplotlib.pyplot as plt from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from collections import Counter # 全局配置 MODEL_CONFIG = { "whisper_model": "openai/whisper-small", "diarization_model": "pyannote/speaker-diarization@2.1-base", # 使用更轻量模型 "sentiment_model": "IDEA-CCNL/Erlangshen-Roberta-110M-Sentiment", "chunk_size": 10, # 强制10秒分块 "sample_rate": 16000, "device": "cuda" if torch.cuda.is_available() else "cpu", "max_workers": 2 if torch.cuda.is_available() else 4, # GPU模式下并行度降低 "batch_size": 8 # 批处理大小 } # 初始化分词器 jieba.initialize() class ModelLoader(QThread): """模型加载线程""" progress = pyqtSignal(str) finished = pyqtSignal(bool, str) def __init__(self): super().__init__() self.models = {} self.error = None def run(self): try: self.progress.emit("正在加载语音识别模型...") # 语音识别模型 - 指定语言为中文减少推理时间 self.models["asr_pipeline"] = pipeline( "automatic-speech-recognition", model=MODEL_CONFIG["whisper_model"], torch_dtype=torch.float16, device=MODEL_CONFIG["device"], batch_size=MODEL_CONFIG["batch_size"], language="chinese" # 指定语言减少推理时间 ) self.progress.emit("正在加载说话人分离模型...") # 说话人分离模型 - 使用更轻量版本 self.models["diarization_pipeline"] = Pipeline.from_pretrained( MODEL_CONFIG["diarization_model"], use_auth_token=True ).to(torch.device(MODEL_CONFIG["device"]), torch.float16) self.progress.emit("正在加载情感分析模型...") # 情感分析模型 self.models["sentiment_tokenizer"] = AutoTokenizer.from_pretrained( MODEL_CONFIG["sentiment_model"] ) self.models["sentiment_model"] = AutoModelForSequenceClassification.from_pretrained( MODEL_CONFIG["sentiment_model"], torch_dtype=tor极狐float16 ).to(MODEL_CONFIG["device"]) self.finished.emit(True, "模型加载完成!") except Exception as e: self.error = str(e) traceback.print_exc() self.finished.emit(False, f"模型加载失败: {str(e)}") class AudioAnalyzer: """深度优化的核心音频分析类""" def __init__(self, models): self.keywords = { "opening": ["您好", "请问是", "先生/女士", "很高兴为您服务"], "closing": ["感谢接听", "祝您生活愉快", "再见", "有问题随时联系"], "forbidden": ["不可能", "没办法", "我不管", "随便你", "投诉也没用"], "solution": ["解决", "处理好了", "已完成", "满意吗", "还有问题吗"] } self.synonyms = { "不可能": ["不可能", "没可能", "做不到", "无法做到"], "解决": ["极狐", "处理", "完成", "搞定", "办妥"] } self.models = models self.models_loaded = True if models else False def load_keywords(self, excel_path): """从Excel加载关键词和同义词""" try: # 使用更健壮的Excel读取方式 df = pd.read_excel(excel_path, sheet_name=None) if "开场白" in df: self.keywords["opening"] = df["开场白"].dropna()["关键词"].tolist() if "结束语" in df: self.keywords["closing"] = df["结束语"].dropna()["关键词"].tolist() if "禁语" in df: self.keywords["forbidden"] = df["禁语"].dropna()["关键词"].tolist() if "解决关键词" in极狐 df: self.keywords["solution"] = df["解决关键词"].dropna()["关键词"].tolist() # 加载同义词表 if "同义词" in df: for _, row in df["同义词"].iterrows(): main_word = row["主词"] synonyms = row["同义词"].split("、") self.synonyms[main_word] = synonyms return True, "关键词加载成功" except Exception as e: error_msg = f"加载关键词失败: {str(e)}" return False, error_msg def convert_audio(self, input_path): """转换音频为WAV格式并分块,使用临时目录管理""" # 添加音频文件校验 if not os.path.exists(input_path): raise Exception(f"文件不存在: {input_path}") if os.path.getsize(input_path) == 0: raise Exception("文件为空") valid_extensions = ['.mp3', '.wav', '.amr', '.flac', '.m4a', '.ogg'] _, ext = os.path.splitext(input_path) if ext.lower() not in valid_extensions: raise Exception(f"不支持的文件格式: {ext}") temp_dir = None # 初始化为None try: # 检查原始音频格式,符合条件则跳过转换 _, ext = os.path.splitext(input_path) if ext.lower() in ['.wav', '.wave']: # 检查文件格式是否符合要求 audio = AudioSegment.from_file(input_path) if (audio.frame_rate == MODEL_CONFIG["sample_rate"] and audio.channels == 1 and audio.sample_width == 2): # 16位PCM # 符合要求的WAV文件,直接使用 chunks = [] chunk_size = MODEL_CONFIG["chunk_size"] * 1000 # 毫秒 # 创建临时目录用于分块 temp_dir = tempfile.TemporaryDirectory() for i in range(0, len(audio), chunk_size): chunk = audio[i:i + chunk_size] chunk_path = os.path.join(temp_dir.name, f"chunk_{i // chunk_size}.wav") chunk.export(chunk_path, format="wav") chunks.append({ "path": chunk_path, "start_time": i / 1000.0, "end_time": (i + len(chunk)) / 1000.0 }) return chunks, len(audio) / 1000.0, temp_dir # 创建临时目录 temp_dir = tempfile.TemporaryDirectory() # 读取音频文件 audio = AudioSegment.from_file(input_path) # 转换为单声道16kHz audio = audio.set_frame_rate(MODEL_CONFIG["sample_rate"]) audio = audio.set_channels(1) # 计算总时长 duration = len(audio) / 1000.0 # 毫秒转秒 # 分块处理(10秒) chunks = [] chunk_size = MODEL_CONFIG["chunk_size"] * 1000 # 毫秒 for i in range(0, len(audio), chunk_size): chunk = audio[i:i + chunk_size] chunk_path = os.path.join(temp_dir.name, f"chunk_{i // chunk_size}.wav") chunk.export(chunk_path, format="wav") chunks.append({ "path": chunk_path, "start_time": i / 1000.0, # 全局起始时间(秒) "end_time": (i + len(chunk)) / 1000.0 # 全局结束时间(秒) }) return chunks, duration, temp_dir except Exception as e: error_msg = f"音频转换失败: {str(e)}" # 安全清理临时目录 if temp_dir: try: temp_dir.cleanup() except: pass return [], 0, None def diarize_speakers(self, audio_path): """说话人分离 - 合并连续片段""" try: diarization = self.models["diarization_pipeline"](audio_path) segments = [] current_segment = None # 合并连续相同说话人的片段 for turn, _, speaker in diarization.itertracks(yield_label=True): if current_segment is None: # 第一个片段 current_segment = { "start": turn.start, "end": turn.end, "speaker": speaker } elif current_segment["speaker"] == speaker and (turn.start - current_segment["end"]) < 1.0: # 相同说话人且间隔小于1秒,合并片段 current_segment["end"] = turn.end else: # 不同说话人或间隔过大,保存当前片段并开始新片段 segments.append(current_segment) current_segment = { "start": turn.start, "end": turn.end, "speaker": speaker } # 添加最后一个片段 if current_segment: segments.append(current_segment) # 添加文本占位符 for segment in segments: segment["text"] = "" return segments except Exception as e: error_msg = f"说话人分离失败: {str(e)}" raise Exception(error_msg) from e def transcribe_audio_batch(self, chunk_paths): """批量语音识别多个分块""" try: # 批量处理音频分块 results = self.models["asr_pipeline"]( chunk_paths, chunk_length_s=MODEL_CONFIG["chunk_size"], stride_length_s=(4, 2), batch_size=MODEL_CONFIG["batch_size"], return_timestamps=True ) # 整理结果 transcribed_data = [] for result in results: text = result["text"] chunks = result["chunks"] transcribed_data.append((text, chunks)) return transcribed_data except Exception as e: error_msg = f"语音识别失败: {str(e)}" raise Exception(error_msg) from e def analyze_sentiment_batch(self, texts, context_weights=None): """批量情感分析 - 支持长文本处理和上下文权重""" try: if not texts: return [] # 应用上下文权重(如果有) if context_weights is None: context_weights = [1.0] * len(texts) # 预处理文本 - 截断并添加特殊token inputs = self.models["sentiment_tokenizer"]( texts, padding=True, truncation=True, max_length=512, return_tensors="pt" ).to(MODEL_CONFIG["device"]) # 批量推理 with torch.no_grad(): outputs = self.models["sentiment_model"](**inputs) # 计算概率 probs = torch.softmax(outputs.logits, dim=-1).cpu().numpy() # 处理结果 results = [] labels = ["积极", "消极", "中性"] for i, text in enumerate(texts): base_probs = probs[i] weight = context_weights[i] # 应用上下文权重 weighted_probs = base_probs * weight sentiment = labels[np.argmax(weighted_probs)] # 情感强度检测 strong_negative = weighted_probs[1] > 0.7 # 消极概率超过70% strong_positive = weighted_probs[0] > 0.7 # 积极概率超过70% # 特定情绪检测 specific_emotion = "无" if "生气" in text or "愤怒" in text or "气死" in text: specific_emotion = "愤怒" elif "不耐烦" in text or "快点" in text or "急死" in text: specific_emotion = "不耐烦" elif "失望" in text or "无奈" in text: specific_emotion = "失望" # 如果有强烈情感则覆盖平均结果 if strong_negative: sentiment = "强烈消极" elif strong_positive: sentiment = "强烈积极" results.append({ "sentiment": sentiment, "emotion": specific_emotion, "s极狐": weighted_probs.tolist(), "weight": weight }) return results except Exception as e: error_msg = f"情感分析失败: {str(e)}" raise Exception(error_msg) from e def match_keywords(self, text, keyword_type): """高级关键词匹配 - 使用分词和同义词""" # 获取关键词列表 keywords = self.keywords.get(keyword_type, []) if not keywords: return False # 分词处理 words = jieba.lcut(text) # 检查每个关键词 for keyword in keywords: # 检查直接匹配 if keyword in text: return True # 检查同义词 synonyms = self.synonyms.get(keyword, []) for synonym in synonyms: if synonym in text: return True # 检查分词匹配(全词匹配) if keyword in words: return True return False def identify_agent(self, segments, full_text): """智能客服身份识别""" # 候选客服信息 candidates = {} # 特征1:开场白关键词 for i, segment in enumerate(segments[:5]): # 检查前5个片段 if self.match_keywords(segment["text"], "opening"): speaker = segment["speaker"] candidates.setdefault(speaker, {"score": 0, "segments": []}) candidates[speaker]["score"] += 3 # 开场白权重高 candidates[speaker]["segments"].append(i) # 特征2:结束语关键词 for i, segment in enumerate(segments[-3:]): # 检查最后3个片段 if self.match_keywords(segment["text"], "closing"): speaker = segment["speaker"] candidates.setdefault(speaker, {"score": 0, "segments": []}) candidates[speaker]["score"] += 2 # 结束语权重中等 candidates[speaker]["segments"].append(len(segments) - 3 + i) # 特征3:说话时长 speaker_durations = {} for segment in segments: duration = segment["end"] - segment["start"] speaker_durations[segment["speaker"]] = speaker_durations.get(segment["speaker"], 0) + duration # 为说话时长最长的加分 if speaker_durations: max_duration = max(speaker_durations.values()) for speaker, duration in speaker_durations.items(): candidates.setdefault(speaker, {"score": 0, "segments": []}) if duration == max_duration: candidates[speaker]["score"] += 1 # 特征4:客服特定词汇出现频率 agent_keywords = ["客服", "代表", "专员", "先生", "女士"] speaker_keyword_count = {} for segment in segments: text = segment["text"] speaker = segment["speaker"] for word in agent_keywords: if word in text: speaker_keyword_count[speaker] = speaker_keyword_count.get(speaker, 0) + 1 # 为关键词出现最多的加分 if speaker_keyword_count: max_count = max(speaker_keyword_count.values()) for speaker, count in speaker_keyword_count.items(): if count == max_count: candidates.setdefault(speaker, {"score": 0, "segments": []}) candidates[speaker]["score"] += 1 # 选择得分最高的作为客服 if candidates: best_speaker = max(candidates.items(), key=lambda x: x[1]["score"])[0] return best_speaker # 默认选择第一个说话人 return segments[0]["speaker"] if segments else None def associate_speaker_text(self, segments, full_text_chunks): """基于时间重叠度的说话人-文本关联""" for segment in segments: segment_text = "" segment_start = segment["start"] segment_end = segment["end"] for word_info in full_text_chunks: if "global_start" not in word_info: continue word_start = word_info["global_start"] word_end = word_info["global_end"] # 计算重叠度 overlap_start = max(segment_start, word_start) overlap_end = min(segment_end, word_end) overlap = max(0, overlap_end - overlap_start) # 计算重叠比例 word_duration = word_end - word_start segment_duration = segment_end - segment_start if overlap > 0: # 如果重叠超过50%或单词完全在片段内 if (overlap / word_duration > 0.5) or (overlap / segment_duration > 0.5): segment_text += word_info["text"] + " " segment["text"] = segment_text.strip() def analyze_audio(self, audio_path): """完整分析单个音频文件 - 优化版本""" try: # 步骤1: 转换音频并分块(使用临时目录) chunks, duration, temp_dir = self.convert_audio(audio_path) if not chunks or not temp_dir: raise Exception("音频转换失败或未生成分块") try: # 步骤2: 说话人分离 segments = self.diarize_speakers(audio_path) # 步骤3: 批量语音识别 chunk_paths = [chunk["path"] for chunk in chunks] transcribed_data = self.transcribe_audio_batch(chunk_paths) # 步骤4: 处理识别结果 full_text_chunks = [] for idx, (text, chunk_data) in enumerate(transcribed_data): chunk = chunks[idx] # 调整时间戳为全局时间 for word_info in chunk_data: if "timestamp" in word_info: start, end = word_info["timestamp"] word_info["global_start"] = chunk["start_time"] + start word_info["global_end"] = chunk["start_time"] + end else: word_info["global_start"] = chunk["start_time"] word_info["global_end"] = chunk["end_time"] full_text_chunks.extend(chunk_data) # 步骤5: 基于时间重叠度关联说话人和文本 self.associate_speaker_text(segments, full_text_chunks) # 步骤6: 智能识别客服身份 agent_id = self.identify_agent(segments, full_text_chunks) # 步骤7: 提取客服和客户文本 agent_text = "" customer_text = "" opening_found = False closing_found = False forbidden_found = False agent_weights = [] # 单独收集客服权重 customer_weights = [] # 单独收集客户权重 negative_context = False # 用于情感分析上下文权重 # 收集上下文信息用于情感权重 for i, segment in enumerate(segments): if segment["speaker"] == agent_id: agent_text += segment["text"] + " " agent_weights.append(1.2 if negative_context else 1.0) # 客服在消极上下文后权重更高 else: customer_text += segment["text"] + " " customer_weights.append(1.0) # 客户权重不变 # 检测消极情绪上下文 if "生气" in segment["text"] or "愤怒" in segment["text"] or "失望" in segment["text"]: negative_context = True elif "解决" in segment["text"] or "满意" in segment["text"]: negative_context = False # 使用高级关键词匹配 if not opening_found and self.match_keywords(segment["text"], "opening"): opening_found = True if not closing_found and self.match_keywords(segment["text"], "closing"): closing_found = True if not forbidden_found and self.match_keywords(segment["text"], "forbidden"): forbidden_found = True # 步骤8: 批量情感分析 - 应用平均权重 agent_avg_weight = np.mean(agent_weights) if agent_weights else 1.0 customer_avg_weight = np.mean(customer_weights) if customer_weights else 1.0 sentiment_results = self.analyze_sentiment_batch( [agent_text, customer_text], context_weights=[agent_avg_weight, customer_avg_weight] ) if sentiment_results: agent_sentiment = sentiment_results[0]["sentiment"] agent_emotion = sentiment_results[0]["emotion"] customer_sentiment = sentiment_results[1]["sentiment"] customer_emotion = sentiment_results[1]["emotion"] else: agent_sentiment = "未知" agent_emotion = "无" customer_sentiment = "未知" customer_emotion = "无" # 问题解决率分析 solution_found = self.match_keywords(agent_text, "solution") # 语速分析 agent_words = len(agent_text.split()) agent_duration = sum([s["end"] - s["start"] for s in segments if s["speaker"] == agent_id]) agent_speed = agent_words / (agent_duration / 60) if agent_duration > 0 else 0 # 词/分钟 # 音量分析(简单版) try: y, sr = librosa.load(audio_path, sr=MODEL_CONFIG["sample_rate"]) rms = librosa.feature.rms(y=y) avg_volume = np.mean(rms) volume_stability = np.std(rms) / avg_volume if avg_volume > 0 else 0 except: avg_volume = 0 volume_stability = 0 # 构建结果 result = { "file_name": os.path.basename(audio_path), "duration": round(duration, 2), "opening_check": "是" if opening_found else "否", "closing_check": "是" if closing_found else "极狐", "forbidden_check": "是" if forbidden_found else "否", "agent_sentiment": agent_sentiment, "agent_emotion": agent_emotion, "customer_sentiment": customer_sentiment, "customer_emotion": customer_emotion, "agent_speed": round(agent_speed, 1), "volume_level": round(avg_volume, 4), "volume_stability": round(volume_stability, 2), "solution_rate": "是" if solution_found else "否", "agent_text": agent_text[:500] + "..." if len(agent_text) > 500 else agent_text, "customer_text": customer_text[:500] + "..." if len(customer_text) > 500 else customer_text } return result finally: # 自动清理临时目录 try: temp_dir.cleanup() except Exception as e: print(f"清理临时目录失败: {str(e)}") except Exception as e: error_msg = f"分析文件 {os.path.basename(audio_path)} 时出错: {str(e)}" raise Exception(error_msg) from e class AnalysisThread(QThread): """分析线程 - 并行优化版本""" progress = pyqtSignal(int, str) result_ready = pyqtSignal(dict) finished_all = pyqtSignal() error_occurred = pyqtSignal(str, str) def __init__(self, audio_files, keywords_file, output_dir, models): super().__init__() self.audio_files = audio_files self.keywords_file = keywords_file self.output_dir = output_dir self.stop_requested = False self.analyzer = AudioAnalyzer(models) self.completed_count = 0 self.executor = None # 用于线程池引用 def run(self): try: total = len(self.audio_files) # 加载关键词 if self.keywords_file: success, msg = self.analyzer.load_keywords(self.keywords_file) if not success: self.error_occurred.emit("关键词加载", msg) results = [] errors = [] # 使用线程池进行并行处理 with ThreadPoolExecutor(max_workers=MODEL_CONFIG["max_workers"]) as executor: self.executor = executor # 保存引用用于停止操作 # 提交所有任务 future_to_file = { executor.submit(self.analyzer.analyze_audio, audio_file): audio_file for audio_file in self.audio_files } # 处理完成的任务 for future in as_completed(future_to_file): if self.stop_requested: break audio_file = future_to_file[future] try: result = future.result() if result: results.append(result) self.result_ready.emit(result) except Exception as e: error_msg = str(e) errors.append({ "file": audio_file, "error": error_msg }) self.error_occurred.emit(os.path.basename(audio_file), error_msg) # 更新进度 self.completed_count += 1 progress = int(self.completed_count / total * 100) self.progress.emit( progress, f"已完成 {self.completed_count}/{total} ({progress}%)" ) # 生成报告 if results: self.generate_reports(results, errors) self.finished_all.emit() except Exception as e: self.error_occurred.emit("全局错误", str(e)) def stop(self): """停止分析 - 强制终止线程池任务""" self.stop_requested = True # 强制终止线程池中的任务 if self.executor: # 先尝试优雅关闭 self.executor.shutdown(wait=False) # 强制取消所有未完成的任务 for future in self.executor._futures: if not future.done(): future.cancel() def generate_reports(self, results, errors): """生成Excel和Word报告 - 优化版本""" try: # 生成Excel报告 df = pd.DataFrame(results) excel_path = os.path.join(self.output_dir, "质检分析报告.xlsx") # 创建Excel写入器 with pd.ExcelWriter(excel_path, engine='xlsxwriter') as writer: df.to_excel(writer, sheet_name='详细结果', index=False) # 添加统计摘要 stats_data = { "指标": ["分析文件总数", "成功分析文件数", "分析失败文件数", "开场白合格率", "结束语合格率", "禁语出现率", "客服积极情绪占比", "客户消极情绪占比", "问题解决率"], "数": [ len(results) + len(errors), len(results), len(errors), f"{df['opening_check'].value_counts(normalize=True).get('是', 0) * 100:.1f}%", f"{df['closing_check'].value_counts(normalize=True).get('是', 0) * 100:.1f}%", f"{df['forbidden_check'].value_counts(normalize=True).get('是', 0) * 100:.1极狐%", f"{df[df['agent_sentiment'] == '积极'].shape[0] / len(df) * 100:.1f}%", f"{df[df['customer_sentiment'] == '消极'].shape[0] / len(df) * 100:.1f}%", f"{df['solution_rate'].value_counts(normalize=True).get('是', 0) * 100:.1f}%" ] } stats_df = pd.DataFrame(stats_data) stats_df.to_excel(writer, sheet_name='统计摘要', index=False) # 生成Word报告 doc = Document() doc.add_heading('外呼电话质检分析汇总报告', 0) # 添加统计信息 doc.add_heading('整体统计', level=1) stats = [ f"分析文件总数: {len(results) + len(errors)}", f"成功分析文件数: {len(results)}", f"分析失败文件数: {len(errors)}", f"开场白合格率: {stats_data['数'][3]}", f"结束语合格率: {stats_data['数'][4]}", f"禁语出现率: {stats_data['数'][5]}", f"客服积极情绪占比: {stats_data['数'][6]}", f"客户消极情绪占比: {stats_data['数'][7]}", f"问题解决率: {stats_data['数'][8]}" ] for stat in stats: doc.add_paragraph(stat) # 添加图表 self.add_charts(doc, df) # 添加错误列表 if errors: doc.add_heading('分析失败文件', level=1) table = doc.add_table(rows=1, cols=2) hdr_cells = table.rows[0].cells hdr_cells[0].text = '文件' hdr_cells[1].text = '错误原因' for error in errors: row_cells = table.add_row().cells row_cells[0].text = os.path.basename(error['file']) row_cells[1].text = error['error'] word_path = os.path.join(self.output_dir, "可视化分析报告.docx") doc.save(word_path) return True, f"报告已保存到: {self.output_dir}" except Exception as e: return False, f"生成报告失败: {str(e)}" def add_charts(self, doc, df): """在Word文档中添加图表 - 显式释放内存""" try: # 客服情感分布 fig1, ax1 = plt.subplots(figsize=(6, 4)) sentiment_counts = df['agent_sentiment'].value_counts() sentiment_counts.plot(kind='bar', ax=ax1, color=['green', 'red', 'blue', 'darkred', 'darkgreen']) ax1.set_title('客服情感分布') ax1.set_xlabel('情感类型') ax1.set_ylabel('数量') fig1.tight_layout() fig1.savefig('agent_sentiment.png') doc.add_picture('agent_sentiment.png', width=Inches(5)) os.remove('agent_sentiment.png') plt.close(fig1) # 显式关闭图表释放内存 # 客户情感分布 fig2, ax2 = plt.subplots(figsize=(6, 4)) df['customer_sentiment'].value_counts().plot(kind='bar', ax=ax2, color=['green', 'red', 'blue', 'darkred', 'darkgreen']) ax2.set_title('客户情感分布') ax2.set_xlabel('情感类型') ax2.set_ylabel('数量') fig2.tight_layout() fig2.savefig('customer_sentiment.png') doc.add_picture('customer_sentiment.png', width=Inches(5)) os.remove('customer_sentiment.png') plt.close(fig2) # 显式关闭图表释放内存 # 合规性检查 fig3, ax3 = plt.subplots(figsize=(6, 4)) compliance = df[['opening_check', 'closing_check', 'forbidden_check']].apply( lambda x: x.value_counts().get('是', 0)) compliance.plot(kind='bar', ax=ax3, color=['blue', 'green', 'red']) ax3.set_title('合规性检查') ax3.set_xlabel('检查项') ax3.set_ylabel('合格数量') fig3.tight_layout() fig3.savefig('compliance.png') doc.add_picture('compliance.png', width=Inches(5)) os.remove('compliance.png') plt.close(fig3) # 显式关闭图表释放内存 except Exception as e: print(f"生成图表失败: {str(e)}") # 确保异常情况下也关闭所有图表 if 'fig1' in locals(): plt.close(fig1) if 'fig2' in locals(): plt.close(fig2) if 'fig3' in locals(): plt.close(fig3) class MainWindow(QMainWindow): """主界面 - 优化版本""" def __init__(self): super().__init__() self.setWindowTitle("外呼电话录音质检分析系统") self.setGeometry(100, 100, 1000, 800) # 初始化变量 self.audio_files = [] self.keywords_file = "" self.output_dir = os.getcwd() self.analysis_thread = None self.model_loader = None self.models = {} self.models_loaded = False # 初始化为False # 设置全局字体 app_font = QFont("Microsoft YaHei", 10) QApplication.setFont(app_font) # 创建主布局 main_widget = QWidget() main_layout = QVBoxLayout() main_layout.setSpacing(10) main_layout.setContentsMargins(15, 15, 15, 15) # 状态栏 self.status_label = QLabel("准备就绪") self.status_label.setAlignment(Qt.AlignCenter) self.status_label.setStyleSheet("background-color: #f0f0f0; padding: 5px; border-radius: 5px;") # 文件选择区域 file_group = QGroupBox("文件选择") file_layout = QVBoxLayout() file_layout.setSpacing(10) # 音频选择 audio_layout = QHBoxLayout() self.audio_label = QLabel("音频文件/文件夹:") self.audio_path_edit = QLineEdit() self.audio极狐_edit.setReadOnly(True) self.audio_path_edit.setPlaceholderText("请选择音频文件或文件夹") self.audio_browse_btn = QPushButton("浏览...") self.audio_browse_btn.setFixedWidth(80) self.audio_browse_btn.clicked.connect(self.browse_audio) audio_layout.addWidget(self.audio_label) audio_layout.addWidget(self.audio_path_edit, 1) audio_layout.addWidget(self.audio_browse_btn) # 关键词选择 keyword_layout = QHBoxLayout() self.keyword_label = QLabel("关键词文件:") self.keyword_path_edit = QLineEdit() self.keyword_path_edit.setReadOnly(True) self.keyword_path_edit.setPlaceholderText("可选:选择关键词Excel文件") self.keyword_browse_btn = QPushButton("浏览...") self.keyword_browse_btn.setFixedWidth(80) self.keyword_browse_btn.clicked.connect(self.browse_keywords) keyword_layout.addWidget(self.keyword_label) keyword_layout.addWidget(self.keyword_path_edit, 1) keyword_layout.addWidget(self.keyword_browse_btn) # 输出目录 output_layout = QHBoxLayout() self.output_label = QLabel("输出目录:") self.output_path_edit = QLineEdit(os.getcwd()) self.output_path_edit.setReadOnly(True) self.output_browse_btn = QPushButton("浏览...") self.output_browse_btn.setFixedWidth(80) self.output_browse_btn.clicked.connect(self.browse_output) output_layout.addWidget(self.output_label) output_layout.addWidget(self.output_path_edit, 1) output_layout.addWidget(self.output_browse_btn) file_layout.addLayout(audio_layout) file_layout.addLayout(keyword_layout) file_layout.addLayout(output_layout) file_group.setLayout(file_layout) # 控制按钮区域 control_layout = QHBoxLayout() control_layout.setSpacing(15) self.start_btn = QPushButton("开始分析") self.start_btn.setFixedHeight(40) self.start_btn.setStyleSheet("background-color: #4CAF50; color: white; font-weight: bold;") self.start_btn.clicked.connect(self.start_analysis) self.stop_btn = QPushButton("停止分析") self.stop_btn.setFixedHeight(40) self.stop_btn.setStyleSheet("background-color: #f44336; color: white; font-weight: bold;") self.stop_btn.clicked.connect(self.stop_analysis) self.stop_btn.setEnabled(False) self.clear_btn = QPushButton("清空") self.clear_btn.setFixedHeight(40) self.clear_btn.setStyleSheet("background-color: #2196F3; color: white; font-weight: bold;") self.clear_btn.clicked.connect(self.clear_all) # 添加模型重试按钮 self.retry_btn = QPushButton("重试加载模型") self.retry_btn.setFixedHeight(40) self.retry_btn.setStyleSheet("background-color: #FF9800; color: white; font-weight: bold;") self.retry_btn.clicked.connect(self.retry_load_models) self.retry_btn.setVisible(False) # 初始隐藏 control_layout.addWidget(self.start_btn) control_layout.addWidget(self.stop_btn) control_layout.addWidget(self.clear_btn) control_layout.addWidget(self.retry_btn) # 进度条 self.progress_bar = QProgressBar() self.progress_bar.setRange(0, 100) self.progress_bar.setTextVisible(True) self.progress_bar.setStyleSheet("QProgressBar {border: 1px solid grey; border-radius: 5px; text-align: center;}" "QProgressBar::chunk {background-color: #4CAF50; width: 10px;}") # 结果展示区域 result_group = QGroupBox("分析结果") result_layout = QVBoxLayout() result_layout.setSpacing(10) # 结果标签 result_header = QHBoxLayout() self.result_label = QLabel("分析结果:") self.result_count_label = QLabel("0/0") self.result_count_label.setAlignment(Qt.AlignRight) result_header.addWidget(self.result_label) result_header.addWidget(self.result_count_label) self.result_text = QTextEdit() self.result_text.setReadOnly(True) self.result_text.setStyleSheet("font-family: Consolas, 'Microsoft YaHei';") # 错误列表 error_header = QHBoxLayout() self.error_label = QLabel("错误信息:") self.error_count_label = QLabel("0") self.error_count_label.setAlignment(Qt.AlignRight) error_header.addWidget(self.error_label) error_header.addWidget(self.error_count_label) self.error_list = QListWidget() self.error_list.setFixedHeight(120) self.error_list.setStyleSheet("color: #d32f2f;") result_layout.addLayout(result_header) result_layout.addWidget(self.result_text) result_layout.addLayout(error_header) result_layout.addWidget(self.error_list) result_group.setLayout(result_layout) # 添加到主布局 main_layout.addWidget(file_group) main_layout.addLayout(control_layout) main_layout.addWidget(self.progress_bar) main_layout.addWidget(self.status_label) main_layout.addWidget(result_group) main_widget.setLayout(main_layout) self.setCentralWidget(main_widget) # 启动模型加载 self.load_models() def load_models(self): """后台加载模型""" self.status_label.setText("正在加载AI模型,请稍候...") self.start_btn.setEnabled(False) self.retry_btn.setVisible(False) # 隐藏重试按钮 self.model_loader = ModelLoader() self.model_loader.progress.connect(self.update_model_loading_status) self.model_loader.finished.connect(self.handle_model_loading_finished) self.model_loader.start() def retry_load_models(self): """重试加载模型""" self.retry_btn.setVisible(False) self.load_models() def update_model_loading_status(self, message): """更新模型加载状态""" self.status_label.setText(message) def handle_model_loading_finished(self, success, message): """处理模型加载完成""" if success: self.models = self.model_loader.models self.models_loaded = True self.status_label.setText(message) self.start_btn.setEnabled(True) self.retry_btn.setVisible(False) else: self.status_label.setText(message) self.start_btn.setEnabled(False) self.retry_btn.setVisible(True) # 显示重试按钮 QMessageBox.critical(self, "模型加载失败", f"{message}\n\n点击'重试加载模型'按钮尝试重新加载") def browse_audio(self): """选择音频文件或文件夹""" options = QFileDialog.Options() files, _ = QFileDialog.getOpenFileNames( self, "选择音频文件", "", "音频文件 (*.mp3 *.wav *.amr *.flac *.m4a);;所有文件 (*)", options=options ) if files: self.audio_files = files self.audio_path_edit.setText(f"已选择 {len(files)} 个文件") self.result_count_label.setText(f"0/{len(files)}") def browse_keywords(self): """选择关键词文件""" options = QFileDialog.Options() file, _ = QFileDialog.getOpenFileName( self, "选择关键词文件", "", "Excel文件 (*.xlsx);;所有文件 (*)", options=options ) if file: self.keywords_file = file self.keyword_path_edit.setText(os.path.basename(file)) def browse_output(self): """选择输出目录""" options = QFileDialog.Options() directory = QFileDialog.getExistingDirectory( self, "选择输出目录", options=options ) if directory: self.output_dir = directory self.output_path_edit.setText(directory) def start_analysis(self): """开始分析""" if not self.audio_files: self.show_message("错误", "请先选择音频文件!") return if not self.models_loaded: self.show_message("错误", "AI模型尚未加载完成!") return # 检查输出目录 if not os.path.exists(self.output_dir): try: os.makedirs(self.output_dir) except Exception as e: self.show_message("错误", f"无法创建输出目录: {str(e)}") return # 更新UI状态 self.start_btn.setEnabled(False) self.stop_btn.setEnabled(True) self.result_text.clear() self.error_list.clear() self.error_count_label.setText("0") self.result_text.append("开始分析音频文件...") self.progress_bar.setValue(0) # 创建并启动分析线程 self.analysis_thread = AnalysisThread( self.audio_files, self.keywords_file, self.output_dir, self.models ) # 连接信号 self.analysis_thread.progress.connect(self.update_progress) self.analysis_thread.result_ready.connect(self.handle_result) self.analysis_thread.finished_all.connect(self.analysis_finished) self.analysis_thread.error_occurred.connect(self.handle_error) self.analysis_thread.start() def stop_analysis(self): """停止分析""" if self.analysis_thread and self.analysis_thread.isRunning(): self.analysis_thread.stop() self.analysis_thread.wait() self.result_text.append("分析已停止") self.status_label.setText("分析已停止") self.start_btn.setEnabled(True) self.stop_btn.setEnabled(False) def clear_all(self): """清空所有内容""" self.audio_files = [] self.keywords_file = "" self.audio_path_edit.clear() self.keyword_path_edit.clear() self.result_text.clear() self.error_list.clear() self.progress_bar.setValue(0) self.status_label.setText("准备就绪") self.result_count_label.setText("0/0") self.error_count_label.setText("0") def update_progress(self, value, message): """更新进度""" self.progress_bar.setValue(value) self.status_label.setText(message) # 更新结果计数 if "已完成" in message: parts = message.split() if len(parts) >= 2: self.result_count_label.setText(parts[1]) def handle_result(self, result): """处理单个结果""" summary = f""" 文件: {result['file_name']} 时长: {result['duration']}秒 ---------------------------------------- 开场白: {result['opening_check']} | 结束语: {result['closing_check']} | 禁语: {result['forbidden_check']} 客服情感: {result['agent_sentiment']} ({result['agent_emotion']}) | 语速: {result['agent_speed']}词/分 客户情感: {result['customer_sentiment']} ({result['customer_emotion']}) 问题解决: {result['solution_rate']} 音量水平: {result['volume_level']} | 稳定性: {result['volume_stability']} ---------------------------------------- """ self.result_text.append(summary) def handle_error(self, file_name, error): """处理错误""" self.error_list.addItem(f"{file_name}: {error}") self.error_count_label.setText(str(self.error_list.count())) def analysis_finished(self): """分析完成""" self.start_btn.setEnabled(True) self.stop_btn.setEnabled(False) self.status_label.setText(f"分析完成! 报告已保存到: {self.output_dir}") self.result_text.append("分析完成!") # 显示完成消息 self.show_message("完成", f"分析完成! 报告已保存到: {self.output_dir}") def show_message(self, title, message): """显示消息对话框""" msg = QMessageBox(self) msg.setWindowTitle(title) msg.setText(message) msg.setStandardButtons(QMessageBox.Ok) msg.exec_() if __name__ == "__main__": app = QApplication(sys.argv) # 检查GPU可用性 if MODEL_CONFIG["device"] == "cuda": try: gpu_mem = torch.cuda.get_device_properties(0).total_memory / (1024 ** 3) print(f"GPU内存: {gpu_mem:.2f}GB") # 根据GPU内存调整并行度 if gpu_mem < 4: # 确保有足够内存 MODEL_CONFIG["device"] = "cpu" MODEL_CONFIG["max_workers"] = 4 print("GPU内存不足,切换到CPU模式") elif gpu_mem < 8: MODEL_CONFIG["max_workers"] = 2 else: MODEL_CONFIG["max_workers"] = 4 except: MODEL_CONFIG["device"] = "cpu" MODEL_CONFIG["max_workers"] = 4 print("无法获取GPU信息,切换到CPU模式") window = MainWindow() window.show() sys.exit(app.exec_())
最新发布
07-22
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值