再见c3p0


c3p0已经很久不维护了,以后java数据库连接池的代码打算都迁移为dbcp,想想当年一点不懂JAVA开始用hibernate+c3p0,还是有点怀念。下面为c3p0介绍:
 核心思想是通过AsynchronousRunner来实现方法的异步执行,AsynchronousRunner本身通过队列同步的方式对任务进行异步调度。

当basicresourcepool创建的时候,checkin和refresh以及idel的后台任务也开始执行,AsynchronousRunner会对其进行自动管理。

 

其实原理很简单,关键它的代码跑了那么久都还算是稳定,虽然偶尔会出现莫名其妙的原因,不过还是可以忍受的。

import os import sys import time import wave import struct import random import datetime import threading import tkinter as tk from tkinter import ttk, messagebox import numpy as np import pyaudio import webrtcvad import torch import torchaudio import pyttsx3 from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC # 检查必要库是否安装 try: import pyaudio import webrtcvad import torch import torchaudio from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC import pyttsx3 except ImportError as e: messagebox.showerror("缺少依赖库", f"请安装必要的依赖库:\n{e}\n" "pip install pyaudio webrtcvad torch torchaudio transformers pyttsx3") sys.exit(1) # 检查设备 device = "cuda" if torch.cuda.is_available() else "cpu" print(f"使用设备: {device}") class OfflineVoiceAssistant: def __init__(self, root): self.root = root self.root.title("离线语音陪伴助手") self.root.geometry("800x600") self.root.configure(bg="#f0f8ff") # 淡蓝色背景 # 状态变量 self.listening = False self.processing = False self.model_loaded = False self.capture_complete = False # 音频参数 - 优化不抢话功能 self.FORMAT = pyaudio.paInt16 self.CHANNELS = 1 self.RATE = 16000 self.CHUNK = 480 self.VAD_AGGRESSIVENESS = 1 # 降低VAD敏感度 (0-3, 0最不敏感) self.SILENCE_DURATION = 1.5 # 增加静音检测时间 (1.5秒) self.POST_SPEECH_DELAY = 0.5 # 语音结束后额外等待时间 self.MAX_RECORDING_TIME = 15 # 最大录音时间(秒) # 模型路径 - 使用中文模型 self.model_dir = "C://Users//24596//offline_models//chinese_model" print(f"模型目录: {self.model_dir}") # 确保模型目录存在 if not os.path.exists(self.model_dir): os.makedirs(self.model_dir) messagebox.showinfo("创建目录", f"已创建模型目录: {self.model_dir}") # 初始化TTS引擎 self.tts_engine = self.init_tts() # 创建UI self.create_widgets() # 加载模型 self.load_models() def init_tts(self): """初始化文本转语音引擎""" try: engine = pyttsx3.init() engine.setProperty('rate', 150) # 语速 engine.setProperty('volume', 0.9) # 音量 # 设置中文语音 voices = engine.getProperty('voices') chinese_voices = [v for v in voices if 'chinese' in v.name.lower() or 'zh' in v.id.lower()] if chinese_voices: engine.setProperty('voice', chinese_voices[0].id) print(f"使用中文语音: {chinese_voices[0].name}") else: print("未找到中文语音,将使用默认语音") return engine except Exception as e: messagebox.showerror("TTS初始化失败", f"错误: {str(e)}") return None def create_widgets(self): """创建用户界面""" # 主框架 main_frame = ttk.Frame(self.root, padding=20) main_frame.pack(fill=tk.BOTH, expand=True) # 标题 title_label = ttk.Label(main_frame, text="离线语音陪伴助手", font=("微软雅黑", 20, "bold"), foreground="#2c3e50") title_label.pack(pady=10) # 状态区域 status_frame = ttk.Frame(main_frame) status_frame.pack(fill=tk.X, pady=10) self.status_label = ttk.Label(status_frame, text="状态: 初始化中...", font=("微软雅黑", 12)) self.status_label.pack(side=tk.LEFT) # 模型状态 self.model_status = ttk.Label(status_frame, text="模型: 未加载", font=("微软雅黑", 10), foreground="#e74c3c") self.model_status.pack(side=tk.RIGHT, padx=10) # 指示灯 light_frame = ttk.Frame(main_frame) light_frame.pack(pady=5) self.light_canvas = tk.Canvas(light_frame, width=30, height=30, bg="#f0f8ff", highlightthickness=0) self.light_canvas.pack() self.light = self.light_canvas.create_oval(5, 5, 25, 25, fill="gray") # 对话区域 conv_frame = ttk.LabelFrame(main_frame, text="对话记录", padding=10) conv_frame.pack(fill=tk.BOTH, expand=True, pady=10) self.conversation = tk.Text(conv_frame, height=15, width=70, font=("微软雅黑", 10), wrap=tk.WORD, bg="white") scrollbar = ttk.Scrollbar(conv_frame, command=self.conversation.yview) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.conversation.config(yscrollcommand=scrollbar.set) self.conversation.pack(fill=tk.BOTH, expand=True) self.conversation.tag_config("user", foreground="#2980b9") self.conversation.tag_config("assistant", foreground="#27ae60") self.conversation.tag_config("system", foreground="#7f8c8d") self.conversation.insert(tk.END, "系统: 初始化完成,准备加载模型...\n", "system") self.conversation.config(state=tk.DISABLED) # 控制按钮 btn_frame = ttk.Frame(main_frame) btn_frame.pack(pady=10) self.listen_btn = ttk.Button(btn_frame, text="开始聆听", width=12, command=self.toggle_listening, state=tk.DISABLED) self.listen_btn.pack(side=tk.LEFT, padx=5) self.clear_btn = ttk.Button(btn_frame, text="清空记录", width=12, command=self.clear_conversation) self.clear_btn.pack(side=tk.LEFT, padx=5) self.quit_btn = ttk.Button(btn_frame, text="退出", width=12, command=self.on_closing) self.quit_btn.pack(side=tk.RIGHT, padx=5) # 底部信息 bottom_frame = ttk.Frame(main_frame) bottom_frame.pack(fill=tk.X, pady=5) device_info = f"设备: {'GPU加速' if device == 'cuda' else 'CPU运行'}" ttk.Label(bottom_frame, text=device_info, font=("微软雅黑", 9), foreground="#7f8c8d").pack(side=tk.LEFT) ttk.Label(bottom_frame, text="完全离线 | 隐私安全 | 中文识别", font=("微软雅黑", 9), foreground="#7f8c8d").pack(side=tk.RIGHT) def update_status(self, message, color="gray"): """更新状态指示""" self.status_label.config(text=f"状态: {message}") colors = { "gray": "#95a5a6", "green": "#2ecc71", "red": "#e74c3c", "orange": "#f39c12", "blue": "#3498db" } self.light_canvas.itemconfig(self.light, fill=colors.get(color, "gray")) def add_to_conversation(self, speaker, text): """添加消息到对话记录""" timestamp = datetime.datetime.now().strftime("%H:%M:%S") self.conversation.config(state=tk.NORMAL) self.conversation.insert(tk.END, f"[{timestamp}] {speaker}: {text}\n", speaker.lower()) self.conversation.see(tk.END) self.conversation.config(state=tk.DISABLED) def toggle_listening(self): """切换监听状态 - 每次点击只识别一次""" if not self.model_loaded: self.add_to_conversation("系统", "模型尚未加载完成,无法开始监听") return if not self.listening: self.listening = True self.capture_complete = False self.listen_btn.config(text="停止聆听") self.update_status("聆听中...", "green") threading.Thread(target=self.start_listening, daemon=True).start() else: self.listening = False self.listen_btn.config(text="开始聆听") self.update_status("就绪", "blue") def clear_conversation(self): """清空对话记录""" self.conversation.config(state=tk.NORMAL) self.conversation.delete(1.0, tk.END) self.conversation.insert(tk.END, "系统: 对话记录已清空\n", "system") self.conversation.config(state=tk.DISABLED) def load_models(self): """加载中文语音识别模型""" try: # 检查模型目录是否存在 if not os.path.exists(self.model_dir): self.add_to_conversation("系统", f"模型目录不存在: {self.model_dir}") self.model_status.config(text="模型: 目录缺失", foreground="#e74c3c") return # 检查模型文件是否存在 required_files = ["config.json", "preprocessor_config.json", "pytorch_model.bin"] model_files = os.listdir(self.model_dir) missing_files = [f for f in required_files if f not in model_files] if missing_files: self.add_to_conversation("系统", f"缺少模型文件: {', '.join(missing_files)}") self.model_status.config(text="模型: 文件缺失", foreground="#e74c3c") return # 加载中文模型 self.add_to_conversation("系统", "开始加载中文语音识别模型...") self.update_status("加载模型中...", "orange") # 使用中文模型 self.asr_processor = Wav2Vec2Processor.from_pretrained(self.model_dir) self.asr_model = Wav2Vec2ForCTC.from_pretrained( self.model_dir, ignore_mismatched_sizes=True ).to(device) self.model_loaded = True self.listen_btn.config(state=tk.NORMAL) self.update_status("就绪", "blue") self.model_status.config(text="模型: 中文已加载", foreground="#27ae60") self.add_to_conversation("系统", "中文模型加载成功!") self.add_to_conversation("助手", "您好!我是您的离线语音助手,请点击'开始聆听'按钮与我对话。") except Exception as e: self.add_to_conversation("系统", f"模型加载失败: {str(e)}") self.model_status.config(text="模型: 加载失败", foreground="#e74c3c") self.update_status("错误", "red") def start_listening(self): """开始监听音频输入 - 优化不抢话功能""" self.add_to_conversation("系统", "启动音频监听...") # 初始化VAD和PyAudio vad = webrtcvad.Vad(self.VAD_AGGRESSIVENESS) p = pyaudio.PyAudio() # 打开音频流 stream = p.open( format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK ) # 音频缓冲区 frames = [] silence_count = 0 speech_detected = False max_silence = int(self.SILENCE_DURATION * self.RATE / self.CHUNK) max_frames = int(self.MAX_RECORDING_TIME * self.RATE / self.CHUNK) frame_count = 0 try: # 检测语音开始 self.add_to_conversation("系统", "请开始说话...") start_time = time.time() while self.listening and not self.capture_complete and frame_count < max_frames: # 读取音频数据 data = stream.read(self.CHUNK, exception_on_overflow=False) frame_count += 1 # 检测语音活动 if vad.is_speech(data, self.RATE): # 第一次检测到语音 if not speech_detected: self.add_to_conversation("系统", "检测到语音,正在聆听...") start_time = time.time() # 重置超时计时 speech_detected = True silence_count = 0 frames.append(data) elif speech_detected: silence_count += 1 frames.append(data) # 检测到静音结束 if silence_count > max_silence: # 语音结束后额外等待一段时间,确保用户说完话 time.sleep(self.POST_SPEECH_DELAY) # 保存录音 audio_file = os.path.join(self.model_dir, "recording.wav") with wave.open(audio_file, 'wb') as wf: wf.setnchannels(self.CHANNELS) wf.setsampwidth(p.get_sample_size(self.FORMAT)) wf.setframerate(self.RATE) wf.writeframes(b''.join(frames)) # 标记捕获完成 self.capture_complete = True # 处理音频 self.add_to_conversation("系统", "检测到语音结束,开始处理...") threading.Thread(target=self.process_audio, args=(audio_file,), daemon=True).start() break else: # 没有检测到语音,清空缓冲区 frames = [] silence_count = 0 # 超时检测 if time.time() - start_time > self.MAX_RECORDING_TIME: self.add_to_conversation("系统", "超时未检测到语音,停止监听") self.listening = False break # 如果用户手动停止 if not self.listening: self.add_to_conversation("系统", "用户手动停止监听") except Exception as e: self.add_to_conversation("系统", f"监听错误: {str(e)}") finally: # 清理资源 stream.stop_stream() stream.close() p.terminate() self.add_to_conversation("系统", "音频监听已停止") # 重置状态 if self.capture_complete: self.listening = False self.listen_btn.config(text="开始聆听") self.update_status("处理中", "orange") else: self.listening = False self.listen_btn.config(text="开始聆听") self.update_status("就绪", "blue") def process_audio(self, audio_file): """处理录制的音频""" if not self.model_loaded: return try: self.processing = True self.add_to_conversation("系统", f"处理音频文件: {os.path.basename(audio_file)}") # 记录开始时间 start_time = time.time() # 加载音频文件 waveform, sample_rate = torchaudio.load(audio_file) audio_duration = len(waveform[0]) / sample_rate self.add_to_conversation("系统", f"音频时长: {audio_duration:.2f}秒, 采样率: {sample_rate}Hz") # 确保采样率正确 if sample_rate != 16000: self.add_to_conversation("系统", f"重新采样: {sample_rate} -> 16000Hz") resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000) waveform = resampler(waveform) # 预处理音频 self.add_to_conversation("系统", "预处理音频中...") input_values = self.asr_processor( waveform.squeeze().numpy(), return_tensors="pt", sampling_rate=16000 ).input_values.to(device) # 语音识别 self.add_to_conversation("系统", "进行语音识别...") with torch.no_grad(): logits = self.asr_model(input_values).logits # 解码识别结果 predicted_ids = torch.argmax(logits, dim=-1) text = self.asr_processor.batch_decode(predicted_ids)[0] if text: self.add_to_conversation("您", text) self.process_command(text) else: self.add_to_conversation("系统", "未识别到有效语音") # 计算处理时间 process_time = time.time() - start_time self.add_to_conversation("系统", f"处理完成,耗时: {process_time:.2f}秒") # 删除临时文件 try: os.remove(audio_file) self.add_to_conversation("系统", "已删除临时音频文件") except: pass except Exception as e: self.add_to_conversation("系统", f"处理错误: {str(e)}") finally: self.processing = False self.capture_complete = False self.update_status("就绪", "blue") def process_command(self, text): """处理用户命令并生成响应(中文)""" text = text.lower() response = "" # 问候 if any(word in text for word in ["你好", "嗨", "哈喽", "您好", "喂"]): greetings = ["您好!", "你好呀!", "很高兴为您服务!", "有什么我可以帮忙的吗?"] response = random.choice(greetings) # 时间 elif any(word in text for word in ["时间", "几点", "现在几点", "当前时间"]): now = datetime.datetime.now() response = f"当前时间是 {now.strftime('%H:%M')}" # 日期 elif any(word in text for word in ["日期", "今天", "今天日期", "几号"]): now = datetime.datetime.now() response = f"今天是 {now.strftime('%Y年%m月%d日')}" # 感谢 elif any(word in text for word in ["谢谢", "感谢", "多谢", "谢谢你"]): responses = ["不客气!", "很高兴能帮到您!", "这是我的荣幸!", "随时为您服务!"] response = random.choice(responses) # 退出 elif any(word in text for word in ["退出", "再见", "拜拜", "关闭"]): response = "再见!感谢您使用离线语音助手。" self.speak(response) time.sleep(1) self.on_closing() return response # 自我介绍 elif any(word in text for word in ["你是谁", "你叫什么", "你的名字"]): response = "我是一个完全离线的语音陪伴助手,保护您的隐私是我的首要任务。" # 天气 elif any(word in text for word in ["天气", "天气预报", "今天天气"]): response = "抱歉,作为一个离线助手,我无法获取实时天气信息。" # 讲笑话 elif any(word in text for word in ["笑话", "讲笑话", "说个笑话", "幽默"]): jokes = [ "为什么程序员喜欢黑暗模式?因为光会吸引bug!", "为什么计算机永远不会感冒?因为它有Windows!", "为什么科学家不相信原子?因为它们构成了一切!", "我告诉我的电脑我需要休息,它说:'没问题,我会在这里缓存。'" ] response = random.choice(jokes) # 默认响应 else: default_responses = [ "我在听,您可以说点别的吗?", "抱歉,我不太明白您的意思。", "能再说一遍吗?", "您需要什么帮助吗?", "我还在学习理解人类语言,请多包涵。" ] response = random.choice(default_responses) self.speak(response) def speak(self, text): """使用TTS引擎播放语音""" if not text or not self.tts_engine: return try: # 添加到对话记录 self.add_to_conversation("助手", text) # 播放语音 self.tts_engine.say(text) self.tts_engine.runAndWait() except Exception as e: self.add_to_conversation("系统", f"语音播放失败: {str(e)}") # 蜂鸣提示 try: import winsound winsound.Beep(440, 300) except: pass def on_closing(self): """关闭程序时的清理工作""" self.listening = False self.root.destroy() print("程序已安全退出") if __name__ == "__main__": # 创建主窗口 root = tk.Tk() # 设置窗口图标(可选) try: root.iconbitmap("icon.ico") except: pass # 创建应用 app = OfflineVoiceAssistant(root) # 启动主循环 root.mainloop() 这段代码是一个基础的语音助手 我想让他改变声音使用我下载好的模型,pth格式 你能帮我修改么,并把完整的代码给我
最新发布
07-29
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值