TargetAction&&Recognizer

本文介绍了一个iOS应用中的手势识别实现方法,并演示了如何通过不同手势改变视图的位置、大小及显示内容等属性。文章还展示了自定义视图响应用户触摸事件的技术。

//

//  RootViewController.m


 

#import "RootViewController.h"

#import "MyView.h"

 

@interfaceRootViewController ()

 

@property (nonatomic,retain)MyView *v;

 

@end

 

@implementationRootViewController

 

 

- (void)dealloc{

   [_v release];

   [superdealloc];

 

}

- (void)viewDidLoad {

   [superviewDidLoad];

 

//   MyView *view1 = [[MyViewalloc]initWithFrame:CGRectMake(100, 100, 100,100)];

//   view1.backgroundColor = [UIColorredColor];

//   view1.tag = 100;

//   [self.view addSubview:view1];

//   // 设置目标对象

//   view1.target = self;

//   // 设置执行的动作

//   view1.action = @selector(changeColor);

//   

//   

//   MyView *view2 = [[MyViewalloc]initWithFrame:CGRectMake(220, 229, 50,50)];

//   view2.backgroundColor = [UIColorgrayColor];

//   self.v = view2;

//   view2.target = self;

//   view2.action = @selector(changeLocation);

//   [self.view addSubview:view2];

//   

//   

//   // target,action 设计模式可以达到解耦的目的

//   [view1 release];

//   [view2 release];

//   

 

    // 创建图片对象

UIImage *image =[UIImageimageNamed:@"1.jpg"];

     // 重沙盒中获取一张图片

//   UIImage *image1 =[UIImageimageWithContentsOfFile:[[NSBundlemainBundle]pathForResource:@"drink_0.jpg"ofType:nil ] ];

//   NSLog(@"== %@",image1);

 

   // 产生一个图片视图

   // 视图大小根据图片大小而定

 

//   UIImageView *imageV = [[UIImageViewalloc]initWithImage:image];

//   

//   // 给图片视图加背景颜色,加不上去

//   imageV.backgroundColor = [UIColorredColor];

//   // 将视图添加到图片视图上

//   [self.viewaddSubview:imageV];

 

    // 产生一个图片视图对象

   // 图片视图的大小由我们自己来设定

UIImageView *imageV =[[UIImageViewalloc]initWithFrame:CGRectMake(0, 100, 414, 500)];

   // 设置展示图片

imageV.image = image;

   // 设置图片视图的展示模式,不同的值,图片的大小会不一样

imageV.contentMode =UIViewContentModeRedraw;

imageV.tag = 100;

   // 将图片视图添加到self.view上

   [self.viewaddSubview:imageV];

 

   // 打开用户交互

imageV.userInteractionEnabled = YES;

 

    //手势识别总共是七种

   //UIGestureRecognizer是一个抽象类,抽象类不直接使用,一般使用它的子类

 

   // 1、轻拍手势

UITapGestureRecognizer *tap =[[UITapGestureRecognizeralloc]initWithTarget:selfaction:@selector(tap:)];

   // 设置点击次数

tap.numberOfTapsRequired = 2;

 

    // 设置点击需要的手指数量

tap.numberOfTouchesRequired = 2;

 

   // 1、给视图添加轻拍手势

   [imageVaddGestureRecognizer:tap];

 

   // 2、长按手势

UILongPressGestureRecognizer *longPress =[[UILongPressGestureRecognizeralloc]initWithTarget:selfaction:@selector(longP)];

   [imageVaddGestureRecognizer:longPress];

 

   // 3、平移手势

UIPanGestureRecognizer *pan =[[UIPanGestureRecognizeralloc]initWithTarget:self action:@selector(pan:)];

   [imageVaddGestureRecognizer:pan];

 

    // 4、轻扫

UISwipeGestureRecognizer *swip =[[UISwipeGestureRecognizeralloc]initWithTarget:self action:@selector(swip:)];

   [imageVaddGestureRecognizer:swip];

   // 设置轻扫方向

swip.direction =UISwipeGestureRecognizerDirectionUp;

 

   // 5、捏合手势

UIPinchGestureRecognizer *pin =[[UIPinchGestureRecognizeralloc]initWithTarget:self action:@selector(pinch:)];

   //添加捏合手势

   [imageVaddGestureRecognizer:pin];

 

   // 6、旋转手势

UIRotationGestureRecognizer *rotate =[[UIRotationGestureRecognizeralloc]initWithTarget:selfaction:@selector(rotate:)];

   [imageVaddGestureRecognizer:rotate];

 

     // 7、屏幕边缘轻扫手势

UIScreenEdgePanGestureRecognizer *edge =[[UIScreenEdgePanGestureRecognizeralloc]initWithTarget:selfaction:@selector(edges:)];

   // 设置屏幕哪边触发

edge.edges = UIRectEdgeLeft;

   [imageVaddGestureRecognizer:edge];

 

   [tap release];

    [longPressrelease];

   [pan release];

   [swip release];

   [pin release];

   [rotate release];

   [edge release];

 

}

  //  1、轻拍手势

- (void)tap:(UITapGestureRecognizer *)taps{

UIImageView *imageV = (UIImageView*)[self.view viewWithTag:100];

imageV.image =[UIImageimageNamed:@"2.jpg"];

}

   // 2、长按手势

- (void)longP{

UIImageView *imageV = (UIImageView*)[self.view viewWithTag:100];

imageV.image =[UIImageimageNamed:@"3.jpg"];

}

   //  3、平移手势

- (void)pan:(UIPanGestureRecognizer *)pans{

    // 手势偏移量

CGPoint point = [panstranslationInView:self.view];

   //获取imagev

UIImageView *imageV = (UIImageView*)[self.view viewWithTag:100];

   //更改视图位置

imageV.center = CGPointMake(imageV.center.x+ point.x, imageV.center.y+ point.y);

   //清零偏移量,因为偏移量是相对于刚开始移动的偏移量,所以需要使用清零。

   [panssetTranslation:CGPointZeroinView:self.view];

}

 

   // 要注释掉平移手势才有用

   // 4、轻扫手势

- (void)swip:(UISwipeGestureRecognizer*)swipe{

UIImageView *imageV = (UIImageView*)[self.view viewWithTag:100];

imageV.image =[UIImageimageNamed:@"4.jpg"];

}

 

   // 5、捏合手势,按住aption(alt)键

- (void)pinch:(UIPinchGestureRecognizer*)pinch{

UIImageView *imageV = (UIImageView*)[self.view viewWithTag:100];

imageV.transform =CGAffineTransformScale(imageV.transform, pinch.scale, pinch.scale);

    // 若不设置这个语句,每次改变的大小是相对于初始的大小,所以要重新设置,让改变大小是相对于上一次的大小

pinch.scale = 1;

}

   // 6、旋转手势

- (void)rotate:(UIRotationGestureRecognizer*)rotate{

UIImageView *imageV = (UIImageView*)[self.view viewWithTag:100];

imageV.transform =CGAffineTransformRotate(imageV.transform, rotate.rotation);

   // 若不设置这个语句,每次旋转的角度是相对于初始的角度,所以要重新设置,让旋转的角度是相对于上一次的角度

rotate.rotation = 0;

}

 

   // 7、边缘轻扫手势,从左边边缘向右滑动改变图片

-(void)edges:(UIScreenEdgePanGestureRecognizer *)edge{

UIImageView *imageV = (UIImageView*)[self.view viewWithTag:100];

imageV.image =[UIImageimageNamed:@"5.jpg"];

}

 

- (void)changeColor{

MyView *view = (MyView *)[self.viewviewWithTag:100];

view.backgroundColor =[UIColorcolorWithRed:self.randomColorgreen:self.randomColorblue:self.randomColoralpha:self.randomColor];

}

 

 

- (void)changeLocation{

 

self.v.center = CGPointMake(arc4random() %(400 - 20 + 1) + 20, arc4random() % (400 - 20 + 1) + 20);

}

 

 

- (CGFloat)randomColor{

return arc4random() % 256 / 255.0;

}

 

- (void)didReceiveMemoryWarning {

   [superdidReceiveMemoryWarning];

   // Dispose of any resources that can be recreated.

}

 

@end

 

 

 

//

//  MyView.h


 

#import<UIKit/UIKit.h>

 

@interfaceMyView : UIView

 

  // 模拟UIButton的内部实现

@property (nonatomic,assign)id target;

@property (nonatomic,assign)SEL action;

 

@end

 

 

 

 

//

//  MyView.m


 

#import "MyView.h"

 

@implementationMyView

 

 

- (void)touchesBegan:(NSSet<UITouch*> *)touches withEvent:(UIEvent *)event{

      // 由sel的target 执行action方法。

   // 到此刻为止,要执行的方法,以及谁执行方法都是不定的。

   [self.targetperformSelector:self.actionwithObject:self];

 

}

 

@end

 

""" 【语音识别模块】Speech Recognition 使用麦克风进行实时语音识别,基于 speech_recognition + Google Web API """ import speech_recognition as sr import threading import time import logging # --- 导入配置 --- from database import config # --- 导入日志工具 --- from Progress.utils.logger_utils import log_time, log_step, log_var, log_call from Progress.utils.logger_config import setup_logger # --- 配置参数 --- VOICE_TIMEOUT = config.timeout # 等待语音开始的超时时间(秒) VOICE_PHRASE_TIMEOUT = config.phrase_timeout # 单个语句最大持续时间 VOICE_RECOGNITION_LANGUAGE = config.lang # 识别语言,如 'zh-CN' # --- 初始化日志器 --- logger = logging.getLogger(__name__) class SpeechRecognizer: """ 基于 Google Web Speech API 的语音识别器 支持连续监听模式与单次识别模式 """ def __init__(self): self.recognizer = sr.Recognizer() self.microphone = sr.Microphone() self.is_listening = False self.callback = None self._last_text = "" # 用于 main.py 判断退出指令 self._calibrate_noise() @log_step("校准环境噪音") @log_time def _calibrate_noise(self): """根据当前环境调整噪音敏感度""" try: with self.microphone as source: logger.debug(f"🎤 正在监听环境噪音(持续 {2.0}s)...") self.recognizer.adjust_for_ambient_noise(source, duration=2.0) energy_threshold = self.recognizer.energy_threshold dynamic_energy = self.recognizer.dynamic_energy_threshold log_var("噪音校准完成", f"energy_threshold={energy_threshold}, dynamic={dynamic_energy}") except Exception as e: logger.exception("❌ 校准环境噪音失败") @log_time def set_callback(self, callback): """设置语音识别结果回调函数""" if callable(callback): self.callback = callback log_call("已设置语音识别回调函数") else: logger.warning("⚠️ 尝试设置非可调用对象作为回调") @log_step("启动语音监听") @log_time def start_listening(self): """启动后台线程持续监听语音输入""" if self.is_listening: logger.info("🎧 监听已在运行中,无需重复启动") return self.is_listening = True thread = threading.Thread(target=self._listen_loop, name="VoiceListenThread") thread.daemon = True thread.start() logger.info("✅ 已启动语音监听循环") @log_step("停止语音监听") @log_time def stop_listening(self): """停止语音监听""" was_listening = self.is_listening self.is_listening = False if was_listening: logger.info("🛑 语音监听已停止") @log_time def _listen_loop(self): """后台监听循环:持续捕捉语音并识别""" logger.debug("🎧 进入语音监听主循环") log_var("参数设置", { "timeout": VOICE_TIMEOUT, "phrase_timeout": VOICE_PHRASE_TIMEOUT, "language": VOICE_RECOGNITION_LANGUAGE }) while self.is_listening: try: with self.microphone as source: log_call("正在录音...") audio = self.recognizer.listen( source, timeout=VOICE_TIMEOUT, phrase_time_limit=VOICE_PHRASE_TIMEOUT ) log_call("录音完成,开始识别") text = self.recognizer.recognize_google( audio, language=VOICE_RECOGNITION_LANGUAGE ).strip() self._last_text = text log_var("🟢 识别成功", text) if self.callback: self.callback(text) except sr.WaitTimeoutError: log_call("⏳ 等待语音输入超时,继续监听") continue except sr.UnknownValueError: log_call("🔇 无法理解音频内容") if self.callback: self.callback("") self._last_text = "" except sr.RequestError as e: logger.error(f"☁️ 语音识别服务请求失败: {e}") self._last_text = "" # 避免频繁重试 time.sleep(1) except Exception as e: logger.exception("💥 语音监听过程中发生未预期异常") self._last_text = "" time.sleep(1) logger.debug("🔚 语音监听循环已退出") @log_step("执行单次语音识别") @log_time def listen_and_recognize(self): """ 执行一次语音识别(阻塞式),适用于命令触发后立即听取用户回应 :return: 识别到的文本,失败或超时返回空字符串 """ logger.debug("🎙️ 开始单次语音识别...") try: with self.microphone as source: logger.info("🔊 请说话...") audio = self.recognizer.listen( source, timeout=VOICE_TIMEOUT ) log_call("录音完成") text = self.recognizer.recognize_google( audio, language=VOICE_RECOGNITION_LANGUAGE ).strip() self._last_text = text logger.info(f"🎯 识别结果: '{text}'") return text except sr.WaitTimeoutError: logger.info("💤 未检测到语音输入(超时)") self._last_text = "" return "" except sr.UnknownValueError: logger.info("❓ 音频无法识别(无意义声音或噪音)") self._last_text = "" return "" except sr.RequestError as e: logger.error(f"🌐 无法连接语音识别服务: {e}") self._last_text = "" return "" except Exception as e: logger.exception("🚨 单次语音识别过程出错") self._last_text = "" return "" @property def last_text(self): """获取最后一次识别到的文本""" return self._last_text def is_available(self): """检查麦克风是否可用""" try: with self.microphone as source: pass return True except Exception as e: logger.error(f"麦克风不可用: {e}") return False # ============================= # 🧪 测试代码 # ============================= if __name__ == "__main__": # 初始化全局日志系统 if not logging.getLogger().handlers: setup_logger(name="ai_assistant", log_dir="logs") logger.info("🧪 开始测试 SpeechRecognizer...") recognizer = SpeechRecognizer() if not recognizer.is_available(): logger.critical("🔴 麦克风初始化失败,请检查设备连接和权限") exit(1) def on_recognized(text): if text: logger.info(f"✅ 回调收到识别文本: '{text}'") else: logger.info("⚪ 未识别到有效语音") recognizer.set_callback(on_recognized) logger.info("🎙️ 启动连续监听模式(按 Ctrl+C 退出)...") recognizer.start_listening() try: while True: time.sleep(1) except KeyboardInterrupt: recognizer.stop_listening() logger.info("👋 语音识别测试结束") 把两个文件对照修正
10-22
""" 【语音识别模块】Speech Recognition (Offline) 使用麦克风进行实时语音识别,基于 Vosk 离线模型 支持单次识别 & 持续监听模式 """ import threading import time import logging import json import numpy as np from database import config from Progress.utils.logger_utils import log_time, log_step, log_var, log_call from Progress.utils.logger_config import setup_logger from vosk import Model, KaldiRecognizer import pyaudio # --- 配置参数 --- VOICE_TIMEOUT = config.timeout # 最大等待语音输入时间(秒) VOICE_PHRASE_TIMEOUT = config.phrase_timeout # 单句话最长录音时间 VOICE_RECOGNITION_LANGUAGE = config.lang # 如 'zh-CN', 'en-US' VOSK_MODEL_PATH = "./vosk-model-small-cn-0.22" # --- 初始化日志器 --- logger = logging.getLogger("ai_assistant") class SpeechRecognizer: def __init__(self): self.model = None self.recognizer = None self.audio = None self.is_listening = False self.callback = None # 用户注册的回调函数:callback(text) self._last_text = "" self._listen_thread = None self.sample_rate = 16000 # Vosk 要求采样率 16kHz self.chunk_size = 8000 # 每次读取帧大小(字节),可根据性能调整 self._load_model() self._calibrate_noise() @log_step("加载 Vosk 离线模型") @log_time def _load_model(self): """加载本地 Vosk 模型""" try: logger.info(f"📦 正在加载模型: {VOSK_MODEL_PATH}") self.model = Model(VOSK_MODEL_PATH) log_call("✅ 模型加载成功") except Exception as e: logger.critical(f"🔴 加载 Vosk 模型失败,请确认路径正确并下载模型: {e}") raise RuntimeError("Failed to load Vosk model") from e @log_step("校准环境噪音(初始化音频流)") @log_time def _calibrate_noise(self): """启动音频流并准备识别器""" try: self.audio = pyaudio.PyAudio() stream = self.audio.open( format=pyaudio.paInt16, channels=1, rate=self.sample_rate, input=True, frames_per_buffer=self.chunk_size ) # 创建识别器 self.recognizer = KaldiRecognizer(self.model, self.sample_rate) stream.close() # 关闭临时流 logger.debug("✅ 音频系统初始化完成") except Exception as e: logger.exception("❌ 初始化音频失败") raise @property def last_text(self) -> str: return self._last_text def is_available(self) -> bool: """检查麦克风是否可用""" try: temp_stream = self.audio.open( format=pyaudio.paInt16, channels=1, rate=self.sample_rate, input=True, frames_per_buffer=self.chunk_size ) temp_stream.close() return True except Exception as e: logger.error(f"🔴 麦克风不可用: {e}") return False @log_step("执行单次语音识别") @log_time def listen_and_recognize(self, timeout=None) -> str: timeout = timeout or VOICE_TIMEOUT start_time = time.time() in_speech = False result_text = "" self.recognizer.Reset() logger.debug(f"🎙️ 开始单次语音识别 (timeout={timeout})...") logger.info("🔊 请说话...") try: stream = self.audio.open( format=pyaudio.paInt16, channels=1, rate=self.sample_rate, input=True, frames_per_buffer=1600 # 更小块,更快响应 ) except Exception as e: logger.error(f"🔴 无法打开音频流: {e}") return "" while (time.time() - start_time) < timeout and self.is_listening: try: data = stream.read(1600, exception_on_overflow=False) # 分析音量(可选调试) audio_np = np.frombuffer(data, dtype=np.int16) volume = np.abs(audio_np).mean() if volume < 30: logger.debug(f"🔇 音量过低: {volume:.1f}") # 送入 Vosk if self.recognizer.AcceptWaveform(data): final_result = json.loads(self.recognizer.Result()) text = final_result.get("text", "").strip() if text: result_text = text break else: # 检查是否有部分语音 partial = json.loads(self.recognizer.PartialResult()) partial_text = partial.get("partial", "") if partial_text.strip(): in_speech = True # 标记已经开始说话 except Exception as e: logger.exception("读取音频出错") break # 如果一直没有说话,允许超时;否则继续等待说完 if not in_speech and (time.time() - start_time) > timeout: logger.info("💤 超时未检测到语音") break log_call("当前时间为:"+str(time.time())) stream.stop_stream() stream.close() if result_text: self._last_text = result_text logger.info(f"🎯 识别结果: '{result_text}'") return result_text else: logger.info("❓ 未识别到有效内容") self._last_text = "" return "" @log_step("启动持续语音监听") def start_listening(self, callback=None, language=None): """ 启动后台线程持续监听语音输入 :param callback: 回调函数,接受一个字符串参数 text :param language: 语言代码(忽略,由模型决定) """ if self.is_listening: logger.warning("⚠️ 已在监听中,忽略重复启动") return if callback: self.callback = callback self.is_listening = True self._listen_thread = threading.Thread(target=self._background_listen, args=(language,), daemon=True) self._listen_thread.start() logger.info("🟢 已启动后台语音监听") @log_step("停止语音监听") def stop_listening(self): """安全停止后台监听""" if not self.is_listening: return self.is_listening = False logger.info("🛑 正在停止语音监听...") if self._listen_thread and self._listen_thread != threading.current_thread(): self._listen_thread.join(timeout=3) if self._listen_thread.is_alive(): logger.warning("🟡 监听线程未能及时退出(可能阻塞)") elif self._listen_thread == threading.current_thread(): logger.error("❌ 无法在当前线程中 join 自己!请检查调用栈") else: logger.debug("No thread to join") logger.info("✅ 语音监听已停止") @log_time def _background_listen(self, language=None): """后台循环监听线程""" logger.debug("🎧 后台监听线程已启动") try: stream = self.audio.open( format=pyaudio.paInt16, channels=1, rate=self.sample_rate, input=True, frames_per_buffer=self.chunk_size ) self.recognizer.Reset() except Exception as e: logger.error(f"🔴 无法打开音频流: {e}") return while self.is_listening: try: data = stream.read(self.chunk_size, exception_on_overflow=False) if self.recognizer.AcceptWaveform(data): result_json = self.recognizer.Result() result_dict = json.loads(result_json) text = result_dict.get("text", "").strip() if text and self.callback: logger.info(f"🔔 回调触发: '{text}'") self.callback(text) except Exception as e: logger.exception("Background listening error") time.sleep(0.1) stream.stop_stream() stream.close() logger.debug("🔚 后台监听线程退出") def on_recognized(text): print(f"\n🔔 回调收到: '{text}'") if "退出" in text or "停止" in text: recognizer.stop_listening() 给你看一下整个代码 """ 【AI语音助手】主程序入口 集成语音识别、Qwen 意图理解、TTS 与动作执行 ✅ 已修复:不再访问 _last_text 私有字段 ✅ 增强:异常防护、类型提示、唤醒词预留接口 """ import sys import time import logging # --- 导入日志工具 --- from Progress.utils.logger_config import setup_logger from Progress.utils.logger_utils import log_time, log_step, log_var, log_call # --- 显式导入各模块核心类 --- from Progress.app.voice_recognizer import SpeechRecognizer from Progress.app.qwen_assistant import QWENAssistant from Progress.app.text_to_speech import TTSEngine from Progress.app.system_controller import SystemController, TaskOrchestrator from database import config # --- 初始化全局日志器 --- logger = logging.getLogger("ai_assistant") @log_step("初始化语音识别模块") @log_time def initialize_speech_recognizer() -> SpeechRecognizer: try: recognizer = SpeechRecognizer() if not recognizer.is_available(): raise RuntimeError("麦克风不可用,请检查设备连接和权限") log_call("✅ 语音识别器初始化完成") return recognizer except Exception as e: logger.critical(f"🔴 初始化语音识别失败: {e}") raise @log_step("初始化 AI 助手模块") @log_time def initialize_qwen_assistant() -> QWENAssistant: try: assistant = QWENAssistant() log_call("✅ Qwen 助手初始化完成") return assistant except Exception as e: logger.critical(f"🔴 初始化 Qwen 助手失败: {e}") raise @log_step("初始化文本转语音模块") @log_time def initialize_tts_engine() -> TTSEngine: try: tts_engine = TTSEngine() if not tts_engine.is_available(): raise RuntimeError("TTS引擎初始化失败") log_call("✅ TTS 引擎初始化完成") return tts_engine except Exception as e: logger.critical(f"🔴 初始化 TTS 失败: {e}") raise @log_step("初始化动作执行器") @log_time def initialize_action_executor() -> TaskOrchestrator: system_controller = SystemController() task_orchestrator = TaskOrchestrator(system_controller=system_controller) log_call("✅ 动作执行器初始化完成") return task_orchestrator @log_step("安全执行单次交互") @log_time def handle_single_interaction_safe( recognizer: SpeechRecognizer, assistant: QWENAssistant, tts_engine: TTSEngine, executor: TaskOrchestrator ): try: handle_single_interaction(recognizer, assistant, tts_engine, executor) except Exception as e: logger.exception("⚠️ 单次交互过程中发生异常,已降级处理") error_msg = "抱歉,我在处理刚才的操作时遇到了一点问题。" logger.info(f"🗣️ 回复: {error_msg}") tts_engine.speak(error_msg) @log_step("处理一次语音交互") @log_time def handle_single_interaction( recognizer: SpeechRecognizer, assistant: QWENAssistant, tts_engine: TTSEngine, executor: TaskOrchestrator ): # 1. 听 voice_text = recognizer.listen_and_recognize() if not voice_text: response = "抱歉,我没有听清楚,请重新说话。" logger.info(f"🗣️ 回复: {response}") tts_engine.speak(response) return log_var("🎤 识别到的语音文本", voice_text) # 2. 理解 ai_response = assistant.process_voice_command(voice_text) ai_reply = ai_response.get("response", "好的,已处理。") intent = ai_response.get("intent") action = ai_response.get("action") params = ai_response.get("parameters") log_var("🧠 AI响应.intent", intent) log_var("🧠 AI响应.action", action) log_var("🧠 AI响应.parameters", params) # 3. 执行(若无需确认) if not ai_response.get("needs_confirmation", False): try: success = executor.execute(ai_response) if not success: ai_reply = "执行该操作时遇到了一些问题。" except Exception as e: logger.exception("💥 执行动作时发生异常") ai_reply = "抱歉,我在尝试执行这个操作时出了点问题。" # 4. 说 logger.info(f"🗣️ 回复: {ai_reply}") tts_engine.speak(ai_reply) @log_step("启动 AI 语音助手") @log_time def main(): logger.info("🚀 正在启动 AI 语音助手系统...") try: recognizer = initialize_speech_recognizer() assistant = initialize_qwen_assistant() tts_engine = initialize_tts_engine() executor = initialize_action_executor() log_call("✅ 所有模块初始化完成,进入监听循环") print("\n" + "—" * 50) print("🎙️ 语音助手已就绪") print("💡 说出你的命令,例如:'打开浏览器'、'写一篇春天的文章'") print("🛑 说出‘退出’、‘关闭’、‘停止’或‘拜拜’来结束程序") print("—" * 50 + "\n") while True: handle_single_interaction_safe(recognizer, assistant, tts_engine, executor) last_text = recognizer.last_text.lower() exit_keywords = ['退出', '关闭', '停止', '拜拜', '再见'] if any(word in last_text for word in exit_keywords): logger.info("🎯 用户请求退出,程序即将终止") break time.sleep(0.5) logger.info("👋 语音助手已安全退出") except KeyboardInterrupt: logger.info("🛑 用户通过 Ctrl+C 中断程序") print("\n👋 再见!") except Exception as e: logger.exception("❌ 主程序运行时发生未预期异常") print(f"\n🚨 程序异常终止:{e}") sys.exit(1) if __name__ == "__main__": if not logging.getLogger().handlers: setup_logger(name="ai_assistant", log_dir="logs", level=logging.INFO) main() """ # 使用配置(推荐方式) #增加或修改新的设置配置 config.update_key("shortcuts",key = "exit",value = "Ctrl+C") config.update_key("shortcuts",key = "select_all",value = "Shift+Alt+A") #修改资源路径 config.set_resource_path("./resoures") """
最新发布
10-23
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值