傻子的心情日记

三天了,你走了有三天了,好快哦!自从我们恋爱到现在,还没有分开这么久的时间,是的,很难受,最近再生病,每天要工作,还要坚持打针/治疗,心理很苦,很累~~~~

他一天会给我一个电话:"好好吃饭,坚持打针,照顾好自己"只会这么说,他心里根本就不明白,我什么都不想要,想要的就是此时此刻能呆在我的身边!

工作和事业对男人这么重要么?那么,我也是一个好强的女人,我为什么不能把事业放头位把爱情放次位,就是因为我是女人么?

这次生病,受伤最大的不是心理么?虽然我坚持让他去,是因为站在一个女人的立场上想,如果你一定绑着这个男人不让他去,他可能不在象以前那样爱你,可是,我赌了一把,我给自己赌了一把,是的,我输了,他匆匆的选择了他的事业,也许,他的做法是对的吧~~~

可是我们还很年轻,以后机会多多,可我真的对他来说是这么的微不足道么,难道老婆在世界上不是只有唯一的一个么?他这样的离开不担心我么?机会失去不是还有么,老婆失去了也还会再有么?

我很困惑、不解!

为什么每个人都会有不开心,是不是意味着长大,人长大了,就会有不开心和烦恼吗 ??
谁能进入谁的世界呢, 每个人的世界都不同,每个人都有属于自己的“地方”,在自己的“地方”做自己喜欢的事情,傻子的天堂,傻子可以在自己的天堂哭泣,傻子最爱在不开心的时候“一个人哭泣”

傻子你什么时候才会长大,什么时候才会坚强的面对这些不开心,什么时候,才会天真的看事情呢 傻子不要不开心好吗?不要哭好吗?真的没有什么的,已经过去了, 不要想这些不开心了!

你的朋友不会希望看到不开心的你。你不是一个人的,你还有很多爱你的人,关心你的人,不要把一切不开心让自己受。

多少次对自己说爱的太累就放手吧,也许是大家的感觉都错了,强行绑着一个人再自己的身边,会有很多人都会不开心,不是我情愿放手,而是不得不放开你,我还能怎么做呢,爱的太深了,这次连自己的命都赌了进去,命都没有了,都赌输了,还剩下什么呢?

这个夏季的燥热,我在听着强烈的DJ,随着身体的痛楚,阵阵的恶心,呕吐!这次,我们也许只有转身................




import os import sys import json import gc import time import concurrent.futures import traceback import numpy as np import librosa import torch import psutil import noisereduce as nr from typing import List, Dict, Tuple, Optional, Any from pydub import AudioSegment, effects from pydub.silence import split_on_silence from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks from transformers import AutoModelForSequenceClassification, AutoTokenizer from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QLineEdit, QTextEdit, QFileDialog, QProgressBar, QGroupBox, QMessageBox, QListWidget, QSplitter, QTabWidget, QTableWidget, QTableWidgetItem, QHeaderView, QAction, QMenu, QToolBar, QComboBox, QSpinBox, QDialog, QDialogButtonBox) from PyQt5.QtCore import QThread, pyqtSignal, Qt from PyQt5.QtGui import QFont, QColor, QIcon from collections import deque import logging import shutil import subprocess import tempfile # 配置日志 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger("DialectQA") # ====================== 工具函数 ====================== def check_ffmpeg_available() -> Tuple[bool, str]: """检查ffmpeg是否可用并返回检查结果和说明""" if not shutil.which("ffmpeg"): return False, "系统中未找到ffmpeg,请安装并添加到PATH" try: result = subprocess.run( ["ffmpeg", "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, timeout=3 ) if "ffmpeg version" in result.stdout: return True, "FFmpeg已正确安装并可用" return False, "FFmpeg可执行但返回异常输出" except (subprocess.TimeoutExpired, FileNotFoundError): return False, "FFmpeg执行失败" except Exception as e: return False, f"FFmpeg检查出错: {str(e)}" def is_gpu_available() -> bool: """检查GPU是否可用""" return torch.cuda.is_available() and torch.cuda.device_count() > 0 # ====================== 增强型资源监控器 ====================== class EnhancedResourceMonitor: def __init__(self): self.gpu_available = is_gpu_available() self.history_size = 60 # 保留60秒历史数据 self.cpu_history = deque(maxlen=self.history_size) self.gpu_history = deque(maxlen=self.history_size) self.last_check_time = time.time() def __del__(self): """析构时释放资源""" if self.gpu_available: torch.cuda.empty_cache() def memory_percent(self) -> Dict[str, float]: """获取当前内存使用百分比""" try: result = {"cpu": psutil.virtual_memory().percent} if self.gpu_available: allocated = torch.cuda.memory_allocated() / (1024 ** 3) reserved = torch.cuda.memory_reserved() / (1024 ** 3) total = torch.cuda.get_device_properties(0).total_memory / (1024 ** 3) gpu_usage = (allocated + reserved) / total * 100 if total > 0 else 0 result["gpu"] = gpu_usage else: result["gpu"] = 0.0 current_time = time.time() if current_time - self.last_check_time >= 1.0: self.cpu_history.append(result["cpu"]) if self.gpu_available: self.gpu_history.append(result["gpu"]) self.last_check_time = current_time return result except Exception as e: logger.error(f"内存监控失败: {str(e)}") return {"cpu": 0, "gpu": 0} def get_usage_trend(self) -> Dict[str, float]: """获取内存使用趋势(移动平均值)""" if not self.cpu_history: return {"cpu": 0, "gpu": 0} cpu_avg = sum(self.cpu_history) / len(self.cpu_history) gpu_avg = sum(self.gpu_history) / len(self.gpu_history) if self.gpu_available and self.gpu_history else 0 return {"cpu": cpu_avg, "gpu": gpu_avg} def is_under_heavy_load(self, threshold: float = 85.0) -> bool: """检查系统是否处于高负载状态""" current = self.memory_percent() trend = self.get_usage_trend() return any([ current["cpu"] > threshold, current["gpu"] > threshold, trend["cpu"] > threshold, trend["gpu"] > threshold ]) # ====================== 方言处理器(增强版) ====================== class EnhancedDialectProcessor: KEYWORDS = { "opening": ("您好", "很高兴为您服务", "请问有什么可以帮您", "麻烦您喽", "请问搞哪样", "有咋个可以帮您", "多谢喽", "你好", "早上好", "下午好", "晚上好"), "closing": ("感谢来电", "祝您生活愉快", "再见", "搞归一喽", "麻烦您喽", "再见喽", "慢走喽", "谢谢", "拜拜"), "forbidden": ("不知道", "没办法", "你投诉吧", "随便你", "搞不成", "没得法", "随便你喽", "你投诉吧喽", "我不懂", "自己看"), "salutation": ("先生", "女士", "小姐", "老师", "师傅", "哥", "姐", "兄弟", "妹儿", "老板", "同志"), "reassurance": ("非常抱歉", "请不要着急", "我们会尽快处理", "理解您的心情", "实在对不住", "莫急哈", "马上帮您整", "理解您得很", "不好意思", "请您谅解", "我们会尽快解决") } # 扩展贵州方言到普通话的映射 _DIALECT_ITEMS = ( ("恼火得很", "非常生气"), ("鬼火戳", "很愤怒"), ("搞不成", "无法完成"), ("没得", "没有"), ("搞哪样嘛", "做什么呢"), ("归一喽", "完成了"), ("咋个", "怎么"), ("克哪点", "去哪里"), ("麻烦您喽", "麻烦您了"), ("多谢喽", "多谢了"), ("憨包", "傻瓜"), ("归一", "结束"), ("板扎", "很好"), ("鬼火冒", "非常生气"), ("背时", "倒霉"), ("吃豁皮", "占便宜"), ("扯拐", "出问题"), ("打脑壳", "头疼"), ("二天", "以后"), ("鬼火绿", "非常生气"), ("哈数", "规矩"), ("经事", "耐用"), ("抠脑壳", "思考"), ("拉稀摆带", "不靠谱"), ("马起脸", "板着脸"), ("哦豁", "哎呀"), ("皮坨", "拳头"), ("千翻", "顽皮"), ("日鼓鼓", "生气"), ("煞角", "结束"), ("舔肥", "巴结"), ("弯酸", "刁难"), ("歪得很", "凶"), ("悬掉掉", "危险"), ("妖艳儿", "炫耀"), ("渣渣", "垃圾") ) class TrieNode: __slots__ = ('children', 'is_end', 'value') def __init__(self): self.children = {} self.is_end = False self.value = "" # 类加载时直接构建Trie树 _trie_root = TrieNode() for dialect, standard in sorted(_DIALECT_ITEMS, key=lambda x: len(x[0]), reverse=True): node = _trie_root for char in dialect: if char not in node.children: node.children[char] = EnhancedDialectProcessor.TrieNode() node = node.children[char] node.is_end = True node.value = standard @classmethod def preprocess_text(cls, texts: List[str]) -> List[str]: """使用预构建的Trie树进行方言转换""" return [cls._process_single_text(text) for text in texts] @classmethod def _process_single_text(cls, text: str) -> str: """处理单个文本的核心逻辑""" result = [] i = 0 n = len(text) while i < n: node = cls._trie_root j = i last_match = None # 查找最长匹配 while j < n and text[j] in node.children: node = node.children[text[j]] j += 1 if node.is_end: last_match = (j, node.value) if last_match: end_index, replacement = last_match result.append(replacement) i = end_index else: result.append(text[i]) i += 1 return ''.join(result) # ====================== 系统配置管理器 ====================== class ConfigManager: __slots__ = ('config', 'dirty') _instance = None _DEFAULT_CONFIG = { "model_paths": { "asr": "D:/models/ASR-models/iic/speech_paraformer-large-vad-punc-spk_asr_nat-zh-cn", "sentiment": "D:/models/distilbert-base-multilingual-cased-sentiments-student" }, "sample_rate": 16000, "silence_thresh": -40, "min_silence_len": 1000, "max_concurrent": 1, "max_audio_duration": 3600, "enable_fp16": True, "enable_quantization": True, "max_sentiment_batch_size": 16 } def __new__(cls): if cls._instance is None: cls._instance = super().__new__(cls) cls._instance.dirty = False cls._instance.config = cls._DEFAULT_CONFIG.copy() cls._instance.load_config() return cls._instance def load_config(self): """加载配置文件""" try: if os.path.exists("config.json"): with open("config.json", "r", encoding="utf-8") as f: file_config = json.load(f) # 深度合并配置 for key, value in file_config.items(): if key in self.config and isinstance(self.config[key], dict) and isinstance(value, dict): self.config[key].update(value) else: self.config[key] = value except json.JSONDecodeError: logger.warning("配置文件格式错误,部分使用默认配置") except Exception as e: logger.error(f"加载配置失败: {str(e)},部分使用默认配置") def save_config(self, force=False): """延迟保存机制:仅当配置变化时保存""" if not force and not self.dirty: return try: with open("config.json", "w", encoding="utf-8") as f: json.dump(self.config, f, indent=2, ensure_ascii=False) self.dirty = False except Exception as e: logger.error(f"保存配置失败: {str(e)}") def get(self, key: str, default=None): return self.config.get(key, default) def set(self, key: str, value, immediate_save=False): self.config[key] = value self.dirty = True if immediate_save: self.save_config(force=True) def check_model_paths(self) -> Tuple[bool, List[str]]: errors = [] model_paths = self.get("model_paths", {}) for model_name, path in model_paths.items(): if not path: errors.append(f"{model_name}模型路径未设置") elif not os.path.exists(path): errors.append(f"{model_name}模型路径不存在: {path}") elif not os.path.isdir(path): errors.append(f"{model_name}模型路径不是有效的目录: {path}") return len(errors) == 0, errors def __del__(self): """析构时自动保存未持久化的更改""" if self.dirty: self.save_config(force=True) # ====================== 增强型音频处理器 ====================== class EnhancedAudioProcessor: SUPPORTED_FORMATS = ('.mp3', '.wav', '.amr', '.m4a') MAX_SEGMENT_DURATION = 5 * 60 * 1000 # 5分钟分段限制 ENHANCEMENT_CONFIG = { 'noise_sample_duration': 0.5, # 噪声采样时长(秒) 'telephone_filter_range': (300, 3400), # 电话频段范围(Hz) 'compression_threshold': -25.0, # 压缩阈值(dBFS) 'compression_ratio': 3.0 # 压缩比 } def __init__(self): self._noise_profile = None self._sample_rate = ConfigManager().get("sample_rate", 16000) @staticmethod def check_dependencies(): try: # 尝试导入所需库 import librosa import noisereduce return True, "依赖检查通过" except ImportError as e: return False, f"缺少依赖库: {str(e)}" def process_audio(self, input_path: str, temp_dir: str) -> Optional[List[str]]: """处理音频文件并返回分段文件路径列表""" if not self._validate_input(input_path, temp_dir): return None try: # 使用临时目录处理音频 with tempfile.TemporaryDirectory() as process_dir: audio = self._load_audio(input_path) if audio is None: return None # 基础预处理 audio = self._basic_preprocessing(audio) # 音频增强处理 audio = self._enhance_audio(audio) # 分段并保存 return self._segment_audio(audio, input_path, temp_dir or process_dir) except Exception as e: logger.error(f"音频处理失败: {str(e)}", exc_info=True) return None def _validate_input(self, input_path: str, temp_dir: str) -> bool: """验证输入参数有效性""" ffmpeg_available, ffmpeg_msg = check_ffmpeg_available() if not ffmpeg_available: logger.error(f"ffmpeg错误: {ffmpeg_msg}") return False deps_ok, deps_msg = self.check_dependencies() if not deps_ok: logger.error(f"依赖错误: {deps_msg}") return False os.makedirs(temp_dir, exist_ok=True) ext = os.path.splitext(input_path)[1].lower() if ext not in self.SUPPORTED_FORMATS: logger.error(f"不支持的音频格式: {ext}") return False if not os.path.exists(input_path): logger.error(f"文件不存在: {input_path}") return False return True def _load_audio(self, input_path: str) -> Optional[AudioSegment]: """加载音频文件""" try: return AudioSegment.from_file(input_path) except Exception as e: logger.error(f"无法加载音频文件: {str(e)}") return None def _basic_preprocessing(self, audio: AudioSegment) -> AudioSegment: """基础预处理:统一采样率和通道数""" # 确保音频为单声道 if audio.channels > 1: audio = audio.set_channels(1) # 统一采样率 if audio.frame_rate != self._sample_rate: audio = audio.set_frame_rate(self._sample_rate) return audio def _enhance_audio(self, audio: AudioSegment) -> AudioSegment: """执行音频增强处理流水线""" self._analyze_noise_profile(audio) audio = self._extract_main_voice(audio) audio = self._enhance_telephone_quality(audio) return self._normalize_audio(audio) def _analyze_noise_profile(self, audio: AudioSegment): """分析噪声样本以创建噪声剖面""" try: samples = np.array(audio.get_array_of_samples()) sr = audio.frame_rate noise_duration = int(sr * self.ENHANCEMENT_CONFIG['noise_sample_duration']) self._noise_profile = samples[:min(noise_duration, len(samples))].astype(np.float32) except Exception as e: logger.warning(f"噪声分析失败: {str(e)}") self._noise_profile = None def _extract_main_voice(self, audio: AudioSegment) -> AudioSegment: """从音频中提取主要人声""" if self._noise_profile is None: logger.warning("无噪声样本可用,跳过说话人提取") return audio try: samples = np.array(audio.get_array_of_samples()) sr = audio.frame_rate reduced_noise = nr.reduce_noise( y=samples.astype(np.float32), sr=sr, y_noise=self._noise_profile, prop_decrease=0.8 ) return AudioSegment( reduced_noise.astype(np.int16).tobytes(), frame_rate=sr, sample_width=2, channels=1 ) except Exception as e: logger.warning(f"降噪处理失败: {str(e)}") return audio def _enhance_telephone_quality(self, audio: AudioSegment) -> AudioSegment: """增强电话语音质量(带通滤波)""" try: low, high = self.ENHANCEMENT_CONFIG['telephone_filter_range'] return audio.low_pass_filter(high).high_pass_filter(low) except Exception as e: logger.warning(f"电话质量增强失败: {str(e)}") return audio def _normalize_audio(self, audio: AudioSegment) -> AudioSegment: """音频归一化处理""" try: # 动态范围压缩 audio = effects.compress_dynamic_range( audio, threshold=self.ENHANCEMENT_CONFIG['compression_threshold'], ratio=self.ENHANCEMENT_CONFIG['compression_ratio'] ) # 标准化音量 return effects.normalize(audio) except Exception as e: logger.warning(f"音频标准化失败: {str(e)}") return audio def _segment_audio(self, audio: AudioSegment, input_path: str, output_dir: str) -> List[str]: """根据静音分割音频""" min_silence_len = ConfigManager().get("min_silence_len", 1000) silence_thresh = ConfigManager().get("silence_thresh", -40) try: segments = split_on_silence( audio, min_silence_len=min_silence_len, silence_thresh=silence_thresh, keep_silence=500 ) # 确保分段不超过5分钟 merged_segments = [] current_segment = AudioSegment.silent(duration=0, frame_rate=self._sample_rate) for seg in segments: if len(current_segment) + len(seg) <= self.MAX_SEGMENT_DURATION: current_segment += seg else: merged_segments.append(current_segment) current_segment = seg if len(current_segment) > 0: merged_segments.append(current_segment) # 保存分段 output_files = [] base_name = os.path.splitext(os.path.basename(input_path))[0] for i, seg in enumerate(merged_segments): output_file = os.path.join(output_dir, f"{base_name}_segment_{i + 1}.wav") seg.export(output_file, format="wav") output_files.append(output_file) return output_files except Exception as e: logger.error(f"音频分割失败: {str(e)}") return [] # ====================== ASR处理器 ====================== class ASRProcessor: def __init__(self): self.config = ConfigManager() self._asr_pipeline = None self._gpu_available = is_gpu_available() self._initialize_pipeline() def _initialize_pipeline(self): """初始化ASR管道""" model_path = self.config.get("model_paths", {}).get("asr") if not model_path: logger.error("未配置ASR模型路径") return try: device = "gpu" if self._gpu_available else "cpu" self._asr_pipeline = pipeline( task=Tasks.auto_speech_recognition, model=model_path, device=device ) logger.info(f"ASR模型初始化完成,使用设备: {device}") except Exception as e: logger.error(f"ASR模型初始化失败: {str(e)}") self._asr_pipeline = None def transcribe(self, audio_path: str) -> Optional[str]: """转录单个音频文件""" if not self._asr_pipeline: logger.error("ASR管道未初始化") return None try: result = self._asr_pipeline(audio_path) return result.get('text', '') except Exception as e: logger.error(f"音频转录失败: {str(e)}") return None def batch_transcribe(self, audio_files: List[str]) -> List[Optional[str]]: """批量转录音频文件""" if not self._asr_pipeline: logger.error("ASR管道未初始化") return [None] * len(audio_files) results = [] for audio_file in audio_files: results.append(self.transcribe(audio_file)) # 转录后立即释放内存 torch.cuda.empty_cache() if self._gpu_available else gc.collect() return results # ====================== 情感分析器 ====================== class SentimentAnalyzer: def __init__(self): self.config = ConfigManager() self._tokenizer = None self._model = None self._gpu_available = is_gpu_available() self._initialize_model() def _initialize_model(self): """初始化情感分析模型""" model_path = self.config.get("model_paths", {}).get("sentiment") if not model_path: logger.error("未配置情感分析模型路径") return try: self._tokenizer = AutoTokenizer.from_pretrained(model_path) self._model = AutoModelForSequenceClassification.from_pretrained(model_path) if self._gpu_available: self._model = self._model.cuda() logger.info("情感分析模型初始化完成") except Exception as e: logger.error(f"情感分析模型初始化失败: {str(e)}") self._tokenizer = None self._model = None def analyze(self, texts: List[str]) -> List[Dict[str, float]]: """分析文本情感""" if not self._model or not self._tokenizer: logger.error("情感分析模型未初始化") return [{"positive": 0.0, "negative": 0.0, "neutral": 0.0}] * len(texts) try: # 分批处理 batch_size = self.config.get("max_sentiment_batch_size", 16) results = [] for i in range(0, len(texts), batch_size): batch = texts[i:i + batch_size] inputs = self._tokenizer( batch, padding=True, truncation=True, max_length=128, return_tensors="pt" ) if self._gpu_available: inputs = {k: v.cuda() for k, v in inputs.items()} with torch.no_grad(): outputs = self._model(**inputs) # 获取概率分布 probs = torch.nn.functional.softmax(outputs.logits, dim=-1).cpu().numpy() # 转换为字典格式 for j in range(probs.shape[0]): results.append({ "negative": float(probs[j][0]), "neutral": float(probs[j][1]), "positive": float(probs[j][2]) }) return results except Exception as e: logger.error(f"情感分析失败: {str(e)}") return [{"positive": 0.0, "negative": 0.0, "neutral": 0.0}] * len(texts) # ====================== 核心处理线程 ====================== class ProcessingThread(QThread): progress = pyqtSignal(int, str) finished = pyqtSignal(dict) error = pyqtSignal(str) def __init__(self, audio_path: str): super().__init__() self.audio_path = audio_path self.resource_monitor = EnhancedResourceMonitor() self._stop_requested = False def run(self): """处理流程主函数""" try: # 1. 初始化配置 config = ConfigManager() ok, errors = config.check_model_paths() if not ok: self.error.emit(f"模型路径配置错误: {'; '.join(errors)}") return # 2. 创建临时目录 temp_dir = tempfile.mkdtemp(prefix="dialectqa_") self.progress.emit(10, "创建临时目录完成") # 3. 预处理音频 audio_processor = EnhancedAudioProcessor() segments = audio_processor.process_audio(self.audio_path, temp_dir) if not segments: self.error.emit("音频预处理失败") return self.progress.emit(30, f"音频预处理完成,生成{len(segments)}个分段") # 4. ASR转录 asr = ASRProcessor() transcripts = asr.batch_transcribe(segments) if not any(transcripts): self.error.emit("ASR转录失败") return self.progress.emit(50, f"转录完成,总计{len(''.join(transcripts))}字") # 5. 方言预处理 transcripts = EnhancedDialectProcessor.preprocess_text(transcripts) self.progress.emit(60, "方言转换完成") # 6. 情感分析 sentiment = SentimentAnalyzer() sentiments = sentiment.analyze(transcripts) self.progress.emit(80, "情感分析完成") # 7. 关键字检测 keywords_stats = self._analyze_keywords(transcripts) self.progress.emit(90, "关键字检测完成") # 8. 结果汇总 result = { "audio_path": self.audio_path, "segments": segments, "transcripts": transcripts, "sentiments": sentiments, "keywords": keywords_stats } # 9. 清理资源 gc.collect() if self._gpu_available: torch.cuda.empty_cache() self.finished.emit(result) self.progress.emit(100, "处理完成") except Exception as e: self.error.emit(f"处理失败: {str(e)}\n{traceback.format_exc()}") finally: # 延迟清理临时目录(实际应用中可能需要保留结果) pass def _analyze_keywords(self, transcripts: List[str]) -> Dict[str, int]: """分析关键字出现频率""" stats = {category: 0 for category in EnhancedDialectProcessor.KEYWORDS} full_text = "".join(transcripts) for category, keywords in EnhancedDialectProcessor.KEYWORDS.items(): for kw in keywords: stats[category] += full_text.count(kw) return stats def stop(self): """请求停止处理""" self._stop_requested = True self.terminate() # ====================== 主界面 ====================== class DialectQAAnalyzer(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("方言客服语音质量分析系统") self.setGeometry(100, 100, 1200, 800) self.setWindowIcon(QIcon("icon.png")) # 初始化状态 self.audio_path = "" self.processing_thread = None self.results = None self._init_ui() self.check_dependencies() self.show() def _init_ui(self): """初始化用户界面""" # 创建主布局 main_widget = QWidget(self) main_layout = QVBoxLayout(main_widget) # 创建选项卡 tab_widget = QTabWidget() main_layout.addWidget(tab_widget) # 创建输入选项卡 input_tab = QWidget() input_layout = QVBoxLayout(input_tab) tab_widget.addTab(input_tab, "输入") # 音频选择区域 audio_group = QGroupBox("音频文件") audio_layout = QHBoxLayout(audio_group) self.audio_path_edit = QLineEdit() self.audio_path_edit.setReadOnly(True) audio_layout.addWidget(self.audio_path_edit, 4) browse_btn = QPushButton("浏览...") browse_btn.clicked.connect(self.select_audio) audio_layout.addWidget(browse_btn, 1) input_layout.addWidget(audio_group) # 进度区域 progress_group = QGroupBox("处理进度") progress_layout = QVBoxLayout(progress_group) self.progress_bar = QProgressBar() self.progress_bar.setRange(0, 100) self.progress_text = QLabel("准备就绪") progress_layout.addWidget(self.progress_bar) progress_layout.addWidget(self.progress_text) input_layout.addWidget(progress_group) # 操作按钮 button_layout = QHBoxLayout() self.start_btn = QPushButton("开始分析") self.start_btn.clicked.connect(self.start_processing) self.start_btn.setEnabled(False) self.stop_btn = QPushButton("停止分析") self.stop_btn.clicked.connect(self.stop_processing) self.stop_btn.setEnabled(False) button_layout.addWidget(self.start_btn) button_layout.addWidget(self.stop_btn) input_layout.addLayout(button_layout) # 结果预览区域 preview_group = QGroupBox("预览") preview_layout = QVBoxLayout(preview_group) self.preview_text = QTextEdit() self.preview_text.setReadOnly(True) preview_layout.addWidget(self.preview_text) input_layout.addWidget(preview_group) # 结果选项卡 result_tab = QWidget() result_layout = QVBoxLayout(result_tab) tab_widget.addTab(result_tab, "详细结果") # 结果表格 result_group = QGroupBox("分析明细") result_layout = QVBoxLayout(result_group) self.results_table = QTableWidget() self.results_table.setColumnCount(5) self.results_table.setHorizontalHeaderLabels(["分段", "文本内容", "积极", "中性", "消极"]) self.results_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) result_layout.addWidget(self.results_table) result_layout.addWidget(result_group) # 关键字统计 keywords_group = QGroupBox("关键字统计") keywords_layout = QVBoxLayout(keywords_group) self.keywords_table = QTableWidget() self.keywords_table.setColumnCount(2) self.keywords_table.setHorizontalHeaderLabels(["类别", "出现次数"]) self.keywords_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) keywords_layout.addWidget(self.keywords_table) result_layout.addWidget(keywords_group) # 状态栏 self.statusBar().showMessage("就绪") # 设置中心控件 self.setCentralWidget(main_widget) def check_dependencies(self): """检查系统依赖""" # 检查GPU if not is_gpu_available(): self.statusBar().showMessage("警告: 未检测到GPU,将使用CPU模式运行", 10000) # 检查FFmpeg ffmpeg_ok, ffmpeg_msg = check_ffmpeg_available() if not ffmpeg_ok: QMessageBox.warning(self, "依赖缺失", ffmpeg_msg) # 检查模型路径 config = ConfigManager() ok, errors = config.check_model_paths() if not ok: QMessageBox.warning(self, "配置错误", "\n".join(errors)) def select_audio(self): """选择音频文件""" file_path, _ = QFileDialog.getOpenFileName( self, "选择音频文件", "", "音频文件 (*.mp3 *.wav *.amr *.m4a)" ) if file_path: self.audio_path = file_path self.audio_path_edit.setText(file_path) self.start_btn.setEnabled(True) self.preview_text.setText(f"已选择文件: {file_path}") def start_processing(self): """开始处理音频""" if not self.audio_path: QMessageBox.warning(self, "错误", "请先选择音频文件") return # 禁用UI按钮 self.start_btn.setEnabled(False) self.stop_btn.setEnabled(True) self.preview_text.clear() # 创建处理线程 self.processing_thread = ProcessingThread(self.audio_path) self.processing_thread.progress.connect(self.update_progress) self.processing_thread.finished.connect(self.on_processing_finished) self.processing_thread.error.connect(self.on_processing_error) self.processing_thread.start() self.statusBar().showMessage("处理中...") def stop_processing(self): """停止处理""" if self.processing_thread and self.processing_thread.isRunning(): self.processing_thread.stop() self.stop_btn.setEnabled(False) self.statusBar().showMessage("已停止处理") def update_progress(self, value: int, message: str): """更新进度""" self.progress_bar.setValue(value) self.progress_text.setText(message) self.preview_text.append(message) def on_processing_finished(self, result: dict): """处理完成事件""" self.results = result self.stop_btn.setEnabled(False) self.start_btn.setEnabled(True) self.statusBar().showMessage("处理完成") # 更新结果表格 self.update_results_table() # 显示成功消息 QMessageBox.information(self, "完成", f"分析完成!\n音频时长: {self.calculate_audio_duration()}秒\n总字数: {len(''.join(result['transcripts']))}字") def on_processing_error(self, error: str): """处理错误事件""" self.stop_btn.setEnabled(False) self.start_btn.setEnabled(True) self.statusBar().showMessage("处理失败") # 显示错误详情 error_dialog = QDialog(self) error_dialog.setWindowTitle("处理错误") layout = QVBoxLayout() text_edit = QTextEdit() text_edit.setPlainText(error) text_edit.setReadOnly(True) layout.addWidget(text_edit) buttons = QDialogButtonBox(QDialogButtonBox.Ok) buttons.accepted.connect(error_dialog.accept) layout.addWidget(buttons) error_dialog.setLayout(layout) error_dialog.exec() def update_results_table(self): """更新结果表格""" if not self.results: return # 更新分段结果表格 segments = self.results.get("segments", []) transcripts = self.results.get("transcripts", []) sentiments = self.results.get("sentiments", []) self.results_table.setRowCount(len(segments)) for i in range(len(segments)): # 分段编号 self.results_table.setItem(i, 0, QTableWidgetItem(f"分段 {i + 1}")) # 文本内容 self.results_table.setItem(i, 1, QTableWidgetItem(transcripts[i])) # 情感分析结果 if i < len(sentiments): sentiment = sentiments[i] self.results_table.setItem(i, 2, QTableWidgetItem(f"{sentiment['positive'] * 100:.1f}%")) self.results_table.setItem(i, 3, QTableWidgetItem(f"{sentiment['neutral'] * 100:.1f}%")) self.results_table.setItem(i, 4, QTableWidgetItem(f"{sentiment['negative'] * 100:.1f}%")) # 更新关键字统计表格 keywords = self.results.get("keywords", {}) self.keywords_table.setRowCount(len(keywords)) for i, (category, count) in enumerate(keywords.items()): # 类别名称 self.keywords_table.setItem(i, 0, QTableWidgetItem(self._translate_category(category))) # 出现次数 self.keywords_table.setItem(i, 1, QTableWidgetItem(str(count))) # 根据次数设置颜色 if count > 0: for j in range(2): self.keywords_table.item(i, j).setBackground(QColor(255, 230, 230)) def _translate_category(self, category: str) -> str: """翻译关键字类别名称""" translations = { "opening": "开场白", "closing": "结束语", "forbidden": "禁用语", "salutation": "称呼语", "reassurance": "安抚语" } return translations.get(category, category) def calculate_audio_duration(self) -> float: """计算音频总时长(秒)""" if not self.audio_path or not os.path.exists(self.audio_path): return 0.0 try: audio = AudioSegment.from_file(self.audio_path) return len(audio) / 1000.0 # 转换为秒 except: return 0.0 # ====================== 主程序入口 ====================== @staticmethod def main(): # 启用高分屏支持 os.environ["QT_ENABLE_HIGHDPI_SCALING"] = "1" QApplication.setHighDpiScaleFactorRoundingPolicy(Qt.HighDpiScaleFactorRoundingPolicy.PassThrough) app = QApplication(sys.argv) app.setFont(QFont("Microsoft YaHei UI", 9)) # 设置默认字体 # 创建主窗口 window = DialectQAAnalyzer() window.show() # 检查资源 monitor = EnhancedResourceMonitor() if monitor.is_under_heavy_load(): QMessageBox.warning(window, "系统警告", "当前系统资源负载较高,性能可能受影响") # 运行应用 sys.exit(app.exec_()) if __name__ == "__main__": try: DialectQAAnalyzer.main() # 调用静态方法 except Exception as e: error_msg = f"致命错误: {str(e)}\n{traceback.format_exc()}" logger.critical(error_msg) # 创建临时错误报告 temp_file = os.path.join(os.getcwd(), "crash_report.txt") with open(temp_file, "w", encoding="utf-8") as f: f.write(error_msg) # 显示错误对话框 app = QApplication(sys.argv) msg_box = QMessageBox() msg_box.setIcon(QMessageBox.Critical) msg_box.setWindowTitle("系统崩溃") msg_box.setText("程序遇到致命错误,已终止运行") msg_box.setInformativeText(f"错误报告已保存到: {temp_file}") msg_box.exec() 运行以上代码时错先错误提示: 未解析的引用 'EnhancedDialectProcessor':164行
最新发布
09-09
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值