php的warning:session_start() [function session_start()]:open (c:/windows/TEMP/.......,O_RDWR) failed:Permission denied...

     在远程调试程序的时候出现了这样的问题,警告原文的意思是代码中调用session_start()函数,打开默认session的存储文件被拒绝。

      

     我之前都是在localhost下进行的调试,所以没发现这个毛病。查看了网上的文章以及官方的资料,再看看代码,也确保了在

session_start()函数调用之前也并没有任何html输出。

 

     于是干脆躲开壁垒,修改php.ini配置文件中的session_savepath一项,将session保存路径设置到了别的系统分区,比如网站根目录下。例如session_savepath = "f:/web/fairoa/sessiondata"这样以后的session文件就保存在这个目录下了。远程访问程序,结果没有错误,程序运行正常。

# core/handler.py import os from typing import Dict, Any from Progress.app import get_ai_assistant, get_task_executor, get_tts_engine from Progress.utils.logger_utils import logger assistant = get_ai_assistant() executor = get_task_executor() tts_engine = get_tts_engine() def handle_user_input(user_text: str, source: str = "unknown") -> Dict[str, Any]: """ 统一处理用户输入 :param user_text: 用户说的话 :param source: 来源 ('local', 'web', 'mobile', 'api') :return: 结果字典 """ if not user_text.strip(): return { "success": False, "response_to_user": "请输入有效内容", "details": {} } try: # AI 决策 decision = assistant.process_voice_command(user_text) result = executor.execute_task_plan(decision) reply = result.get("message", "操作完成。") if not result.get("success") and not reply.startswith("抱歉"): reply = f"抱歉,{reply}" # 判断是否需要 TTS(仅本地设备) should_play_tts = source in ["local", "raspberry", "desktop"] if should_play_tts: try: audio_path = tts_engine.speak(reply) logger.info(f"🔊 已播放语音: {audio_path}") except Exception as e: logger.warning(f"TTS 播放失败: {e}") return { "success": True, "recognized_text": user_text, "response_to_user": reply, "details": result, "tts_audio_url": f"/api/tts/audio?file={os.path.basename(audio_path)}" if should_play_tts else None, "source": source } except Exception as e: logger.exception("处理用户输入时出错") return { "success": False, "response_to_user": "系统内部错误,请稍后再试。", "error": str(e) } # main.py import time import signal import threading from typing import Any from Progress.utils.logger_config import setup_logger from Progress.app import get_tts_engine, get_voice_recognizer from core.handler import handle_user_input from database.config import config logger = setup_logger("ai_assistant") _shutdown_event = threading.Event() def signal_handler(signum, frame): logger.info(f"🛑 收到信号 {signum},准备退出...") _shutdown_event.set() signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) def handle_single_interaction() -> bool: rec = get_voice_recognizer() text = rec.listen_and_recognize(timeout=3) if _shutdown_event.is_set(): return False if not text: logger.info("🔇 未检测到语音") return True logger.info(f"🗣️ 用户说: '{text}'") # 处理并自动播放 TTS(source='local' 触发播报) result = handle_user_input(user_text=text, source="local") expect_follow_up = result.get("details", {}).get("expect_follow_up", False) rec.current_timeout = 8 if expect_follow_up else 3 should_exit = result.get("details", {}).get("should_exit", False) return not should_exit def main(): ENABLE_API_SERVER = config.get("app", "enable_api_server", default=True) API_HOST = config.get("app", "api_host", default="127.0.0.1") API_PORT = config.get("app", "api_port", default=5000) logger.info("🚀 AI 助手启动中...") # 可选:启动 API 服务 if ENABLE_API_SERVER: try: from api_server import APIServer api_server = APIServer() api_server.start() logger.info(f"🌐 API 服务已启动: http://{API_HOST}:{API_PORT}") except Exception as e: logger.warning(f"⚠️ API 服务启动失败: {e}") else: logger.debug("🚫 API 服务已禁用 (ENABLE_API=false)") logger.info("👂 助手已就绪,请开始说话...") while not _shutdown_event.is_set(): try: should_continue = handle_single_interaction() if not should_continue: break except KeyboardInterrupt: break except Exception as e: logger.exception("🔁 主循环异常") time.sleep(1) # 清理资源 try: get_voice_recognizer().close() except: pass try: get_tts_engine().stop() except: pass logger.info("👋 助手已退出") if __name__ == "__main__": main() # api_server.py import threading import os import base64 import time from typing import Dict, Any from flask import Flask, request, jsonify, send_file from werkzeug.utils import secure_filename from werkzeug.serving import make_server from flask_cors import CORS from Progress.utils.logger_utils import logger from Progress.app import get_voice_recognizer from core.handler import handle_user_input from database.config import config # ============================= # 配置加载(来自全局 config) # ============================= ENABLE_API_SERVER = config.get("app", "enable_api_server", default=True) API_HOST = config.get("app", "api_host", default="127.0.0.1") API_PORT = config.get("app", "api_port", default=5000) RUN_MODE = config.get("app", "run_mode", default="auto") VOICE_RECOGNIZER_TIMEOUT = config.get("stt", "timeout", default=3) TEMP_DIR = config.get("app", "temp_dir", default="temp_audio") os.makedirs(TEMP_DIR, exist_ok=True) # 初始化组件 recognizer = get_voice_recognizer() # ============================= # 全局状态管理 # ============================= current_status = { "is_listening": False, "is_tts_playing": False, "last_command_result": None, "timestamp": int(time.time()) } class APIServer: """ RESTful API 服务器,用于支持 Web / Mobile / IoT 设备远程接入 AI 助手。 所有请求最终交由 core.handler 统一处理,根据 source 决定行为(如是否播放 TTS)。 """ def __init__(self): self.app = Flask(__name__) CORS(self.app) # 允许跨域请求 self.server = None self.thread = None self.running = False self._add_routes() logger.debug("🔧 APIServer 初始化完成") def _update_status(self, **kwargs): """更新全局运行状态""" current_status.update(kwargs) current_status["timestamp"] = int(time.time()) def _determine_source(self) -> str: """ 根据请求头判断客户端来源 返回: 'web', 'mobile', 'local', 'api' """ client_type = request.headers.get("X-Client-Type", "").lower().strip() mapping = { "web": ["web", "browser"], "mobile": ["mobile", "android", "ios"], "local": ["raspberry", "local-device", "pi", "desktop"] } for src, keywords in mapping.items(): if any(k in client_type for k in keywords): return src return "api" def _should_play_tts(self, source: str) -> bool: """判断该来源是否需要触发本地 TTS 播放""" return source == "local" # 仅本地物理设备自动播报 def _add_routes(self): """注册所有 API 路由""" self._add_health_route() self._add_status_route() self._add_text_query_route() self._add_voice_upload_route() self._add_tts_audio_route() def _add_health_route(self): @self.app.route('/api/health', methods=['GET']) def health(): return jsonify({ "status": "ok", "mode": RUN_MODE, "running": True, "timestamp": int(time.time()) }) def _add_status_route(self): @self.app.route('/api/status', methods=['GET']) def status(): return jsonify(current_status.copy()) def _add_text_query_route(self): @self.app.route('/api/text/query', methods=['POST']) def text_query(): data: Dict = request.get_json() or {} text = data.get("text", "").strip() if not text: return jsonify({"error": "缺少文本内容"}), 400 source = self._determine_source() logger.info(f"📩 [{source}] 文本请求: '{text}'") try: # 统一处理(会根据 source 决定是否播放 TTS) result = handle_user_input(user_text=text, source=source) response_data = { "success": result.get("success", False), "response_to_user": result.get("response_to_user", ""), } # 若为本地设备且生成了音频,则返回 URL if self._should_play_tts(source) and result.get("tts_audio_url"): response_data["tts_audio_url"] = result["tts_audio_url"] # 可选:附加细节(任务执行结果等) details = result.get("details") if details is not None: response_data["details"] = details return jsonify(response_data) except Exception as e: logger.exception(f"❌ 处理文本请求失败: {text}") return jsonify({ "success": False, "error": "内部服务错误", "message": str(e) }), 500 def _add_voice_upload_route(self): @self.app.route('/api/voice/upload', methods=['POST']) def voice_upload(): source = self._determine_source() if not self._should_play_tts(source): return jsonify({ "error": "语音上传功能仅限本地设备使用", "hint": "请设置 Header: X-Client-Type: local" }), 403 # 获取音频数据 audio_path = None session_id = request.form.get('session_id', f"upload_{int(time.time())}") try: if 'file' in request.files: file = request.files['file'] if not file.filename: return jsonify({"error": "上传的文件名为空"}), 400 ext = os.path.splitext(file.filename)[1] or ".wav" filename = secure_filename(f"{session_id}_{int(time.time())}{ext}") file_path = os.path.join(TEMP_DIR, filename) file.save(file_path) audio_path = file_path elif 'audio_base64' in request.form: b64_str = request.form['audio_base64'].split(",")[-1] raw_data = base64.b64decode(b64_str) file_path = os.path.join(TEMP_DIR, f"{session_id}.wav") with open(file_path, 'wb') as f: f.write(raw_data) audio_path = file_path else: return jsonify({"error": "请提供 'file' 或 'audio_base64' 字段"}), 400 # 开始语音识别 self._update_status(is_listening=True) logger.debug(f"🎤 正在识别语音文件: {audio_path}") try: recognized_text = recognizer.listen_and_recognize( audio_file=audio_path, timeout=VOICE_RECOGNIZER_TIMEOUT ) finally: self._update_status(is_listening=False) if not recognized_text: logger.warning("⚠️ 语音识别未获取到有效文本") return jsonify({ "success": False, "error": "语音识别失败", "response_to_user": "抱歉,我没听清,请再说一遍。" }), 400 logger.info(f"👂 识别结果: '{recognized_text}'") request.json = {"text": recognized_text} # 模拟 JSON 输入 return self.app.view_functions['text_query']() # 复用 text_query 逻辑 except Exception as e: logger.exception("🎙️ 语音上传处理出错") return jsonify({ "success": False, "error": "语音处理异常", "detail": str(e) }), 500 finally: # 可选:清理临时文件(也可后台定时清理) if audio_path and os.path.exists(audio_path): try: os.remove(audio_path) logger.debug(f"🗑️ 已删除临时语音文件: {audio_path}") except: pass def _add_tts_audio_route(self): @self.app.route('/api/tts/audio', methods=['GET']) def tts_audio(): filename = request.args.get('file') if not filename: return jsonify({"error": "缺少参数 'file'"}), 400 file_path = os.path.join(TEMP_DIR, filename) if not os.path.exists(file_path): logger.warning(f"📁 请求的音频文件不存在: {file_path}") return jsonify({"error": "文件不存在"}), 404 logger.debug(f"📥 下载 TTS 音频: {filename}") return send_file(file_path, mimetype="audio/mpeg") def start(self, host=None, port=None): """ 启动 API 服务(非阻塞,运行在独立线程) :param host: 绑定地址 :param port: 端口号 """ host = host or API_HOST port = port or API_PORT if self.running: logger.warning("⚠️ API 服务器已在运行,忽略重复启动") return def run(): try: self.server = make_server(host, port, self.app) logger.info(f"🌐 API 服务已启动 → http://{host}:{port} (模式: {RUN_MODE})") self.running = True self.server.serve_forever() except Exception as e: if self.running: logger.error(f"🚨 API 服务意外终止: {e}") else: logger.debug("🛑 API 服务已正常关闭") self.thread = threading.Thread(target=run, daemon=True) self.thread.start() def stop(self): """安全关闭 API 服务""" if not self.running: return logger.info("🛑 正在关闭 API 服务...") try: self.server.shutdown() except AttributeError: logger.warning("⚠️ server 对象尚未初始化,跳过 shutdown") except Exception as e: logger.error(f"❌ shutdown 出错: {e}") self.running = False if self.thread: self.thread.join(timeout=3) if self.thread.is_alive(): logger.warning("⚠️ API 线程未能及时退出") logger.info("✅ API 服务已关闭") # ================ # 全局实例(单例) # ================ _api_server_instance = None def get_api_server() -> APIServer: """获取 API 服务单例""" global _api_server_instance if _api_server_instance is None: _api_server_instance = APIServer() return _api_server_instance # 方便直接调用 __all__ = ['APIServer', 'get_api_server'] # database/config.py import json import os import sys from typing import Any, Dict, Optional from pathlib import Path from Progress.utils.logger_utils import logger # 确保 Progress 包可导入 if 'Progress' not in sys.modules: project_root = str(Path(__file__).parent.parent) if project_root not in sys.path: sys.path.insert(0, project_root) try: import Progress except ImportError as e: print(f"⚠️ 无法导入 Progress 模块,请检查路径: {project_root}, 错误: {e}") class ConfigManager: def __init__(self): from Progress.utils.resource_helper import get_internal_path, get_app_path self.BASE_CONFIG_FILE = get_internal_path("database", "base_config.json") self.CONFIG_FILE = os.path.join(get_app_path(), "config.json") self.DEFAULT_CONFIG: Optional[Dict] = None self.config = self.load_config() self._watchers = {} # 监听器字典:key_path -> callback(old, new) def watch(self, *keys, callback): """ 注册一个监听器,当指定配置项变化时触发回调 :param keys: 配置路径,如 ("tts", "voice") :param callback: 回调函数,接受两个参数 (old_value, new_value) """ key_path = ".".join(str(k) for k in keys) self._watchers[key_path] = callback logger.debug(f"👀 开始监听配置项: {key_path}") def set(self, value, *keys): """ 设置配置项,并触发变更通知。 示例: config.set("zh-CN-YunxiNeural", "tts", "voice") 注意:仅修改内存中的值,需调用 .save() 持久化。 """ if not keys: raise ValueError("必须指定至少一个键") # 获取旧值 old_value = self.get(*keys) # 安全设置新值(递归创建嵌套结构) data = self.config for k in keys[:-1]: if k not in data or not isinstance(data[k], dict): data[k] = {} data = data[k] current_key = keys[-1] # 如果值未变,跳过以避免误触发回调 if current_key in data and data[current_key] == value: return data[current_key] = value # 构造 key 路径用于查找 watcher key_path = ".".join(str(k) for k in keys) # 触发监听器 if key_path in self._watchers: try: self._watchers[key_path](old_value, value) except Exception as e: logger.error(f"❌ 执行监听回调失败 [{key_path}]: {e}") def _load_default(self) -> Dict: """加载默认配置模板""" if self.DEFAULT_CONFIG is None: if not os.path.exists(self.BASE_CONFIG_FILE): raise FileNotFoundError(f"❌ 默认配置文件不存在: {self.BASE_CONFIG_FILE}") try: with open(self.BASE_CONFIG_FILE, 'r', encoding='utf-8') as f: self.DEFAULT_CONFIG = json.load(f) except Exception as e: raise RuntimeError(f"❌ 无法读取默认配置文件: {e}") return self.DEFAULT_CONFIG.copy() def load_config(self) -> Dict: """加载用户配置,若不存在则生成""" if not os.path.exists(self.CONFIG_FILE): print(f"🔧 配置文件 {self.CONFIG_FILE} 不存在,正在基于默认模板创建...") default = self._load_default() if self.save_config(default): print(f"✅ 默认配置已生成: {self.CONFIG_FILE}") else: print(f"❌ 默认配置生成失败,请检查路径权限: {os.path.dirname(self.CONFIG_FILE)}") return default try: with open(self.CONFIG_FILE, 'r', encoding='utf-8') as f: user_config = json.load(f) default = self._load_default() merged = default.copy() self.deep_update(merged, user_config) return merged except (json.JSONDecodeError, UnicodeDecodeError) as e: print(f"⚠️ 配置文件格式错误或编码异常: {e}") return self._recover_from_corrupted() except PermissionError as e: print(f"⚠️ 无权限读取配置文件: {e}") return self._recover_from_corrupted() except Exception as e: print(f"⚠️ 未知错误导致配置加载失败: {type(e).__name__}: {e}") return self._recover_from_corrupted() def _recover_from_corrupted(self) -> Dict: """配置损坏时尝试备份并重建""" backup_file = self.CONFIG_FILE + ".backup" try: if os.path.exists(self.CONFIG_FILE): os.rename(self.CONFIG_FILE, backup_file) print(f"📁 原始损坏配置已备份为: {backup_file}") default = self._load_default() self.save_config(default) print(f"✅ 已使用默认配置重建 {self.CONFIG_FILE}") return default except Exception as e: print(f"❌ 自动恢复失败: {e},将返回内存中默认配置") return self._load_default() def deep_update(self, default: Dict, override: Dict): """递归更新嵌套字典""" for k, v in override.items(): if k in default and isinstance(default[k], dict) and isinstance(v, dict): self.deep_update(default[k], v) else: default[k] = v def save_config(self, config: Dict) -> bool: """保存当前配置到 config.json""" try: from Progress.utils.resource_helper import ensure_directory config_dir = os.path.dirname(self.CONFIG_FILE) if not ensure_directory(config_dir): print(f"❌ 无法创建配置目录: {config_dir}") return False with open(self.CONFIG_FILE, 'w', encoding='utf-8') as f: json.dump(config, f, indent=4, ensure_ascii=False) return True except PermissionError: print(f"❌ 权限不足,无法写入配置文件: {self.CONFIG_FILE}") return False except Exception as e: print(f"❌ 保存配置失败: {type(e).__name__}: {e}") return False def get(self, *keys, default=None) -> Any: """ 安全获取嵌套配置项 示例: config.get("ai_model", "api_key", default="none") """ data = self.config try: for k in keys: data = data[k] return data except (KeyError, TypeError): return default def save(self) -> bool: """ 将当前内存中的配置保存到文件 返回: 是否成功 """ return self.save_config(self.config) # 全局单例 config = ConfigManager() from functools import wraps import inspect import logging # 全局注册表 REGISTERED_FUNCTIONS = {} FUNCTION_SCHEMA = [] FUNCTION_MAP = {} # (intent, action) -> method_name logger = logging.getLogger("ai_assistant") def ai_callable( *, description: str, params: dict, intent: str = None, action: str = None, concurrent: bool = False # 新增:是否允许并发执行 ): def decorator(func): func_name = func.__name__ metadata = { "func": func, "description": description, "params": params, "intent": intent, "action": action, "signature": str(inspect.signature(func)), "concurrent": concurrent # 记录是否可并发 } REGISTERED_FUNCTIONS[func_name] = metadata FUNCTION_SCHEMA.append({ "name": func_name, "description": description, "parameters": params }) if intent and action: key = (intent, action) if key in FUNCTION_MAP: raise ValueError(f"冲突:语义 ({intent}, {action}) 已被函数 {FUNCTION_MAP[key]} 占用") FUNCTION_MAP[key] = func_name @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper._ai_metadata = metadata return wrapper return decorator # Progress/utils/resource_helper.py import os import sys from typing import Optional def get_internal_path(*relative_path_parts) -> str: """ 获取内部资源路径(如 base_config.json),适用于开发和打包环境。 示例: get_internal_path("database", "base_config.json") """ if getattr(sys, 'frozen', False): base_path = sys._MEIPASS else: # __file__ → Progress/utils/resource_helper.py current_dir = os.path.dirname(os.path.abspath(__file__)) progress_root = os.path.dirname(current_dir) # Progress/ project_root = os.path.dirname(progress_root) # AI_Manager/ base_path = project_root return os.path.join(base_path, *relative_path_parts) def get_app_path() -> str: """ 获取应用运行数据保存路径(config.json、日志等) 打包后:exe 所在目录 开发时:AI_Manager/ 根目录 """ if getattr(sys, 'frozen', False): return os.path.dirname(sys.executable) else: current_dir = os.path.dirname(os.path.abspath(__file__)) progress_root = os.path.dirname(current_dir) project_root = os.path.dirname(progress_root) return project_root def resource_path(*sub_paths: str, base_key: str = "resource_path") -> str: """ 通用用户资源路径解析(基于 config 的 resource_path) 示例: resource_path("Music", "bgm.mp3") → <resource_path>/Music/bgm.mp3 :param sub_paths: 子路径组件 :param base_key: 在 config.paths 中的键名,默认 "resource_path" """ # 延迟导入,避免循环依赖 from database.config import config raw_base = config.get("paths", base_key) if not raw_base: raise ValueError(f"配置项 paths.{base_key} 未设置") if os.path.isabs(raw_base): base_path = raw_base else: base_path = os.path.join(get_app_path(), raw_base) full_path = os.path.normpath(base_path) for part in sub_paths: clean_part = str(part).strip().lstrip(r'./\ ') for p in clean_part.replace('\\', '/').split('/'): if p: full_path = os.path.join(full_path, p) return os.path.normpath(full_path) def ensure_directory(path: str) -> bool: """ 确保目录存在。若 path 是文件路径,则创建其父目录。 """ dir_path = path basename = os.path.basename(dir_path) if '.' in basename and len(basename) > 1 and not basename.startswith('.'): dir_path = os.path.dirname(path) if not dir_path or dir_path in ('.', './', '..'): return True try: os.makedirs(dir_path, exist_ok=True) return True except PermissionError: print(f"❌ 权限不足,无法创建目录: {dir_path}") return False except Exception as e: print(f"❌ 创建目录失败: {dir_path}, 错误: {type(e).__name__}: {e}") return False 检查一下,最后做个整理
10-29
import asyncio from datetime import datetime from io import BytesIO import streamlit as st import pandas as pd from autogen_agentchat.agents import AssistantAgent import json import os # import xmind import re from autogen_agentchat.messages import TextMessage, AgentEvent, ModelClientStreamingChunkEvent, MultiModalMessage from autogen_core import CancellationToken from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_core import Image as AGImage from PIL import Image as PILImage import re def extract_json_from_markdown(markdown_text: str) -> str: """ 从Markdown文本中提取所有JSON代码块的内容 参数: markdown_text: 包含JSON代码块的Markdown文本 返回: 包含所有JSON代码块内容的列表(按出现顺序) 如果没有找到则返回空列表 """ # 正则表达式匹配 ```json 开头的代码块(不区分大小写,允许空格) pattern = r'```json\s*?\n(.*?)```' matches = re.findall(pattern, markdown_text, re.DOTALL | re.IGNORECASE) # 移除每项内容首尾的空白字符 return matches[0] def get_current_datetime(): now = datetime.now() return now.strftime("%Y%m%d%H%M%S") def validate_json(json_str): """验证并清理JSON字符串""" try: # 尝试直接解析 return json.loads(json_str), True except json.JSONDecodeError: # 如果失败,尝试清理并重新解析 try: # 移除可能存在的非法字符 cleaned = re.sub(r'\u001b$$[^m]*m', '', json_str) # 移除不完整的转义序列 cleaned = re.sub(r'(?<!\)\(?!\\)', r'\\', cleaned) # 只保留有效的JSON内容 json_match = re.search(r'\{(?:[^{}]|(?R))*\}|$$(?:[^$$]|(?R))*$$', cleaned) if json_match: return json.loads(json_match.group()), True return json_str, False except Exception: return json_str, False # 设置页面配置 st.set_page_config(page_title="测试用例生成器", page_icon="🧪", layout="wide") # 初始化会话状态 if 'test_cases' not in st.session_state: st.session_state.test_cases = "" if '需求' not in st.session_state: st.session_state.需求 = "" if 'api_key_set' not in st.session_state: st.session_state.api_key_set = False if 'input_str' not in st.session_state: st.session_state.input_str = "" if 'model_client' not in st.session_state: st.session_state.model_client = None if 'image_files' not in st.session_state: st.session_state.image_files = [] # 定义提示词模板 SYSTEM_PROMPT = """你是一个专业的测试工程师,能够根据用户提供的需求进行分析需求,生成测试用例。 请根据以下要求生成测试用例: 0. 内容约束:不编造未说明的内容 1. 必须包含用例编号、用例名称、优先级、前置条件、操作步骤、预期结果等要素 2. 优先级:高、中、低 4. 返回一个包含多个测试用例的数组 5. 使用中文描述 6. 确保测试用例覆盖各种场景,包括正常流程和边界情况 7. 消除冗余用例,合并重复验证点,冗余用例不要重复生成 8. 步骤描述和预期结果的要一一对应,步骤之间用换行符连接,预期结果之间也用换行符连接 输出格式示例: ```markdown # 测试需求分析文档 ## 测试目标 - [清晰的功能目标描述] ## 需求拆解 | 需求ID | 需求描述 | 测试类型 | 优先级 | 验收标准 | |--------|----------|----------|--------|----------| ## 风险分析 - **高优先级风险**: - [风险描述] → [缓解方案] ## 测试策略 - [功能测试]: - 覆盖场景: - [场景1] - [场景2] - [非功能测试]: - 性能指标: [RPS ≥ 1000] - 安全要求: [OWASP TOP10覆盖] ## 待澄清项 - [问题1] (需业务方确认) - [问题2] (需架构师确认) ``` 对应生成的用例: ```json ``` 工作流 先进行需求分析输出以上格式的需求分析结果 - 根据需求分析列出测试点 - 根据不同测试点生成测试用例 - 将生成的测试用例用markdown格式的json进行标注 """ APPROVE_PROMPT = """你是一位资深测试架构师,拥有10年复杂系统测试经验,擅长在评审用例中识别隐性漏洞与需求偏差,并补充相应的测试用例。。 以下是补充用例时的限制: 1. 包含用例编号、用例名称、前置条件、操作步骤、预期结果等要素 2. 优先级:高、中、低 3. 每个测试用例使用JSON格式表示 4. 返回一个包含多个测试用例的数组 5. 使用中文描述 6. 确保测试用例覆盖各种场景,包括正常流程和边界情况 7. 步骤描述和预期结果的要一一对应,步骤之间用换行符连接,预期结果之间也用换行符连接 8. 消除冗余用例,合并重复验证点,冗余用例不要重复生成 输出格式示例: ### 用例评审结果 根据提供的需求和现有测试用例,以下是对现有用例的评审结果: 1. **TC001 - 正确用户名和密码登录** - 评审结果:合理,覆盖了正常登录场景。 ### 需要补充的测试用例 虽然现有用例覆盖了基本的登录和进件列表查看、筛选功能,但仍需补充以下测试用例以确保全面覆盖各种场景,包括边界情况和异常情况: ```json [ { "用例编号": "TC008", "用例名称": "空用户名和密码登录", "优先级": "中", "前置条件": "无", "操作步骤": "1. 打开登录页面\n2. 不输入用户名\n3. 不输入密码\n4. 点击登录按钮", "预期结果": "1. 系统显示用户名和密码不能为空的提示\n2. 页面停留在登录页" }, { "用例编号": "TC009", "用例名称": "未记住账号登录", "优先级": "低", "前置条件": "无", "操作步骤": "1. 打开登录页面\n2. 输入正确的用户名\n3. 输入正确的密码\n4. 取消记住账号选项\n5. 点击登录按钮", "预期结果": "1. 系统显示登录成功提示\n2. 页面跳转至首页或进件列表页\n3. 关闭浏览器后重新打开,用户名和密码不自动填充" } ] ``` ### 结论 [给出覆盖率评测] 工作流 进行用例评审 - 根据需求和用例给出评审结果 - 根据评审结果生成需要补充测试用例,不需要补充用例输出[],以上生成的测试用例格式要以markdown格式的json进行标注 """ # 页面标题 st.title("🧪 测试用例生成器") tabs1, tabs2 = st.tabs(["测试用例生成器", "模型配置"]) with tabs1: # 需求输入区域 col1, col2 = st.columns([2, 1]) with col1: requirement = st.text_area("请输入测试需求:", value=st.session_state.需求, height=200) upload_images=st.file_uploader("请上传图片", type=["png", "jpg", "jpeg"], accept_multiple_files= True) # upload_images=st.file_uploader("请上传图片", type=["png", "jpg", "jpeg"]) if upload_images: for upload_image in upload_images: st.image(upload_image, caption=upload_image.name, use_container_width=True) input_str = st.text_area("请输入要求:", value=st.session_state.input_str, height=200, placeholder="请按照需求生成测试用例") generate_btn = st.button("生成测试用例") # 测试用例展示区域 st.markdown("### 生成结果") # 创建一个空容器用于流式显示 generate_start_out = st.empty() test_case_output = st.empty() approve_start_output = st.empty() approve_test_output = st.empty() # merger_test_output = st.empty() with col2: st.markdown("### 测试用例列表") async def stream_response(requirement, input_str): """处理流式响应并逐字显示""" if not st.session_state.api_key_set: st.error("请先输入有效的API密钥") return "" # 创建一个空容器用于流式显示 generate_start = generate_start_out.empty() stream_container = test_case_output.empty() approve_start = approve_start_output.empty() approve_stream_container = approve_test_output.empty() if upload_images: # 生成用例智能体 assistant = AssistantAgent( name="assistant_stream", system_message=SYSTEM_PROMPT, model_client=st.session_state.model_client_vl, model_client_stream=True ) # 评审智能体 approve = AssistantAgent( name="assistant_stream", system_message=APPROVE_PROMPT, model_client=st.session_state.model_client_vl, model_client_stream=True ) else: assistant = AssistantAgent( name="assistant_stream", system_message=SYSTEM_PROMPT, model_client=st.session_state.model_client, model_client_stream=True ) # 评审智能体 approve = AssistantAgent( name="assistant_stream", system_message=APPROVE_PROMPT, model_client=st.session_state.model_client, model_client_stream=True ) # 初始化消息历史 generate_message = "" generate_start.markdown("【现在开始生成测试用例:】\n") # 消息队列 message_sequence = [] # 生成测试用例 approve_message_sequence = [] # 评审用例的消息队列 def get_images(up_images): images_temp=[] for uploaded_file in up_images: uploaded_file.seek(0) image_file = PILImage.open(uploaded_file) image_temp = AGImage(image_file) images_temp.append(image_temp) return images_temp if upload_images: content=[f"先根据上传的图片和需求:{requirement},先进行需求分析,给出需求分析结果然后生成测试用例,以上生成的测试用例要以markdown的json格式返回,另外要求:{input_str}"] input_images=get_images(upload_images) content.extend(input_images) print(content) multi_message=MultiModalMessage(content=content, source="user") message_sequence.append(multi_message) else: content=f"先根据需求:{requirement},先进行需求分析,给出需求分析结果然后生成测试用例,以上生成的测试用例要以markdown的json格式返回,另外要求:{input_str}" input_messages = TextMessage( content=content, source="user") message_sequence.append(input_messages) assistant.run_stream() # 测试用例生成. async for res in assistant.on_messages_stream( # type: ignore message_sequence, cancellation_token=CancellationToken(), ): if isinstance(res, ModelClientStreamingChunkEvent): generate_message += res.content print(res.content, end="", flush=True) # 尝试解析并清理JSON cleaned_message, is_valid = validate_json(generate_message) if is_valid: # 如果是有效JSON,格式化显示 try: formatted = json.dumps(cleaned_message, indent=2, ensure_ascii=False) stream_container.code(formatted, language="json") except Exception: stream_container.markdown(cleaned_message) # stream_container.write(cleaned_message) else: # 如果不是有效JSON,直接显示原始内容 stream_container.markdown(generate_message) # stream_container.write(message) generate_test = extract_json_from_markdown(generate_message) generate_test_list = json.loads(generate_test) # 测试用例评审 approve_promt = """ 需求为:{requirement},测试用例列表{generate_test},请根据需求和用例列表,先进行用例评审决定是否补充用例,另外限制:步骤描述和预期结果的要一一对应,步骤之间用换行符连接,预期结果之间也用换行符连接 """ approve_message = "" if upload_images: approve_content=[approve_promt.format(requirement=requirement, generate_test=generate_test)] approve_images = get_images(upload_images) # upload_images.seek(0) # images = PILImage.open(upload_images) # print(type(images)) # agImage = AGImage(images) approve_content.extend(approve_images) approve_multi_message=MultiModalMessage(content=approve_content, source="user") approve_message_sequence.append(approve_multi_message) else: approve_content=approve_promt.format(requirement=requirement, generate_test=generate_test) approve_input_messages = TextMessage( content=approve_content, source="user") approve_message_sequence.append(approve_input_messages) approve_start.markdown("【现在进行用例评审:】\n") async for res in approve.on_messages_stream( approve_message_sequence, cancellation_token=CancellationToken(), ): if isinstance(res, ModelClientStreamingChunkEvent): approve_message += res.content print(res.content, end="", flush=True) # 尝试解析并清理JSON cleaned_message, is_valid = validate_json(approve_message) if is_valid: # 如果是有效JSON,格式化显示 try: formatted = json.dumps(cleaned_message, indent=2, ensure_ascii=False) approve_stream_container.code(formatted, language="json") except Exception: approve_stream_container.markdown(cleaned_message) # stream_container.write(cleaned_message) else: # 如果不是有效JSON,直接显示原始内容 approve_stream_container.markdown(approve_message) # stream_container.write(message) approve_test = extract_json_from_markdown(approve_message) approve_test_list = json.loads(approve_test) if approve_test: generate_test_list.extend(approve_test_list) return generate_test_list async def handle_generate(rem, input_str): with st.spinner('正在生成测试用例...'): return await stream_response(rem, input_str) # 保存生成的测试用例到会话状态 if generate_btn and requirement: print(f"模型配置:{os.getenv('OPENAI_API_MODEL')}") print(f"视觉模型配置:{os.getenv('VL_OPENAI_API_MODEL')}") st.session_state.需求 = requirement st.session_state.test_cases = asyncio.run(handle_generate(requirement, input_str)) # 显示测试用例 if st.session_state.test_cases: # 用可折叠的样式展示测试用例 for i, case in enumerate(st.session_state.test_cases): with st.expander(f"{case['用例编号']}: {case['用例名称']}"): st.markdown(f"**优先级**: {case['优先级']}") st.markdown(f"**前置条件**: {case['前置条件']}") st.markdown("**测试步骤**:") st.text(case['操作步骤']) st.markdown("**预期结果**:") st.text(case['预期结果']) st.subheader("下载测试用例") excel_data = BytesIO() # 创建一个DataFrame rows = [] for case in st.session_state.test_cases: rows.append(case) # 创建DataFrame df = pd.DataFrame(rows) # 保证字段顺序 columns = ['用例编号', '用例名称', '优先级', '前置条件', '操作步骤', '预期结果'] df = df.reindex(columns=columns) # 保存为Excel文件 df.to_excel(excel_data, index=False,engine="openpyxl") excel_data.seek(0) st.download_button( label="下载Excel", data=excel_data, file_name=f"test_cases_{get_current_datetime()}.xlsx", mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ) with tabs2: st.subheader("模型配置") # API密钥输入 api_key = st.text_input("请输入API密钥:", type="password", value=os.getenv("OPENAI_API_KEY", "sk-d3yLWf4sx4zuDm8sEKwd5jxf8qDDSNLBX0FOteJLGgUPjbv7")) #https://dashscope.aliyuncs.com/compatible-mode/v1 api_base_url = st.text_input("请输入model地址:", type="default", value=os.getenv("OPENAI_API_BASE", "https://api.qingyuntop.top/v1")) api_model = st.text_input("请输入使用的模型:", type="default", value=os.getenv("OPENAI_API_MODEL", "qwen-plus-latest")) st.subheader("视觉模型配置") vl_api_key = st.text_input("请输入视觉模型API密钥:", type="password", value=os.getenv("VL_OPENAI_API_KEY", "sk-d3yLWf4sx4zuDm8sEKwd5jxf8qDDSNLBX0FOteJLGgUPjbv7")) vl_api_base_url = st.text_input("请输入视觉模型model地址:", type="default", value=os.getenv("VL_OPENAI_API_BASE", "https://api.qingyuntop.top/v1")) vl_api_model = st.text_input("请输入视觉模型使用的模型:", type="default", value=os.getenv("VL_OPENAI_API_MODEL", "qwen-vl-max")) if api_key: os.environ["OPENAI_API_KEY"] = api_key os.environ["OPENAI_API_BASE"] = api_base_url os.environ["OPENAI_API_MODEL"] = api_model st.session_state.api_key_set = True else: st.warning("请先输入API密钥以启用模型交互功能") if vl_api_key: os.environ["VL_OPENAI_API_KEY"] = vl_api_key os.environ["VL_OPENAI_API_BASE"] = vl_api_base_url os.environ["VL_OPENAI_API_MODEL"] = vl_api_model st.session_state.api_key_set = True else: st.warning("请先输入API密钥以启用模型交互功能") # 创建AutoGen代理 model_config = { # "model": "deepseek-r1", "model": os.getenv("OPENAI_API_MODEL"), "api_key": os.getenv("OPENAI_API_KEY"), "base_url": os.getenv("OPENAI_API_BASE"), "model_info": { "vision": False, # 是否启用视觉 "function_calling": True, "json_output": True, "family": "unknown", "multiple_system_messages": True, "structured_output": True }, } vl_model_config = { # "model": "deepseek-r1", "model": os.getenv("VL_OPENAI_API_MODEL"), "api_key": os.getenv("VL_OPENAI_API_KEY"), "base_url": os.getenv("VL_OPENAI_API_BASE"), "model_info": { "vision": True, # 是否启用视觉 "function_calling": True, "json_output": True, "family": "unknown", "multiple_system_messages": True, "structured_output": True }, } st.session_state.model_client= OpenAIChatCompletionClient(**model_config) st.session_state.model_client_vl= OpenAIChatCompletionClient(**vl_model_config) 我的代码是这样的怎么修改
08-02
我现在在使用 Electron框架构建UI实现对python主程序的api接口调用完成AI语音助手 # api_server.py import threading import os import base64 import time from typing import Dict, Any from flask import Flask, request, jsonify, send_file from werkzeug.utils import secure_filename from werkzeug.serving import make_server from flask_cors import CORS from Progress.utils.logger_utils import logger from Progress.app import get_voice_recognizer from core.handler import handle_user_input from database.config import config # ============================= # 配置加载(来自全局 config) # ============================= ENABLE_API_SERVER = config.get("app", "enable_api_server", default=True) API_HOST = config.get("app", "api_host", default="127.0.0.1") API_PORT = config.get("app", "api_port", default=5000) RUN_MODE = config.get("app", "run_mode", default="auto") VOICE_RECOGNIZER_TIMEOUT = config.get("stt", "timeout", default=3) TEMP_DIR = config.get("app", "temp_dir", default="temp_audio") os.makedirs(TEMP_DIR, exist_ok=True) # 初始化组件 recognizer = get_voice_recognizer() # ============================= # 全局状态管理 # ============================= current_status = { "is_listening": False, "is_tts_playing": False, "last_command_result": None, "timestamp": int(time.time()) } class APIServer: """ RESTful API 服务器,用于支持 Web / Mobile / IoT 设备远程接入 AI 助手。 所有请求最终交由 core.handler 统一处理,根据 source 决定行为(如是否播放 TTS)。 """ def __init__(self): self.app = Flask(__name__) CORS(self.app) # 允许跨域请求 self.server = None self.thread = None self.running = False self._add_routes() logger.debug("🔧 APIServer 初始化完成") def _update_status(self, **kwargs): """更新全局运行状态""" current_status.update(kwargs) current_status["timestamp"] = int(time.time()) def _determine_source(self) -> str: """ 根据请求头判断客户端来源 返回: 'web', 'mobile', 'local', 'api' """ client_type = request.headers.get("X-Client-Type", "").lower().strip() mapping = { "web": ["web", "browser"], "mobile": ["mobile", "android", "ios"], "local": ["raspberry", "local-device", "pi", "desktop"] } for src, keywords in mapping.items(): if any(k in client_type for k in keywords): return src return "api" def _should_play_tts(self, source: str) -> bool: """判断该来源是否需要触发本地 TTS 播放""" return source == "local" # 仅本地物理设备自动播报 def _add_routes(self): """注册所有 API 路由""" self._add_health_route() self._add_status_route() self._add_text_query_route() self._add_voice_upload_route() self._add_tts_audio_route() def _add_health_route(self): @self.app.route('/api/health', methods=['GET']) def health(): return jsonify({ "status": "ok", "mode": RUN_MODE, "running": True, "timestamp": int(time.time()) }) def _add_status_route(self): @self.app.route('/api/status', methods=['GET']) def status(): return jsonify(current_status.copy()) def _add_text_query_route(self): @self.app.route('/api/text/query', methods=['POST']) def text_query(): data: Dict = request.get_json() or {} text = data.get("text", "").strip() if not text: return jsonify({"error": "缺少文本内容"}), 400 source = self._determine_source() logger.info(f"📩 [{source}] 文本请求: '{text}'") try: # 统一处理(会根据 source 决定是否播放 TTS) result = handle_user_input(user_text=text, source=source) response_data = { "success": result.get("success", False), "response_to_user": result.get("response_to_user", ""), } # 若为本地设备且生成了音频,则返回 URL if self._should_play_tts(source) and result.get("tts_audio_url"): response_data["tts_audio_url"] = result["tts_audio_url"] # 可选:附加细节(任务执行结果等) details = result.get("details") if details is not None: response_data["details"] = details return jsonify(response_data) except Exception as e: logger.exception(f"❌ 处理文本请求失败: {text}") return jsonify({ "success": False, "error": "内部服务错误", "message": str(e) }), 500 def _add_voice_upload_route(self): @self.app.route('/api/voice/upload', methods=['POST']) def voice_upload(): source = self._determine_source() if not self._should_play_tts(source): return jsonify({ "error": "语音上传功能仅限本地设备使用", "hint": "请设置 Header: X-Client-Type: local" }), 403 # 获取音频数据 audio_path = None session_id = request.form.get('session_id', f"upload_{int(time.time())}") try: if 'file' in request.files: file = request.files['file'] if not file.filename: return jsonify({"error": "上传的文件名为空"}), 400 ext = os.path.splitext(file.filename)[1] or ".wav" filename = secure_filename(f"{session_id}_{int(time.time())}{ext}") file_path = os.path.join(TEMP_DIR, filename) file.save(file_path) audio_path = file_path elif 'audio_base64' in request.form: b64_str = request.form['audio_base64'].split(",")[-1] raw_data = base64.b64decode(b64_str) file_path = os.path.join(TEMP_DIR, f"{session_id}.wav") with open(file_path, 'wb') as f: f.write(raw_data) audio_path = file_path else: return jsonify({"error": "请提供 'file' 或 'audio_base64' 字段"}), 400 # 开始语音识别 self._update_status(is_listening=True) logger.debug(f"🎤 正在识别语音文件: {audio_path}") try: recognized_text = recognizer.listen_and_recognize( audio_file=audio_path, timeout=VOICE_RECOGNIZER_TIMEOUT ) finally: self._update_status(is_listening=False) if not recognized_text: logger.warning("⚠️ 语音识别未获取到有效文本") return jsonify({ "success": False, "error": "语音识别失败", "response_to_user": "抱歉,我没听清,请再说一遍。" }), 400 logger.info(f"👂 识别结果: '{recognized_text}'") request.json = {"text": recognized_text} # 模拟 JSON 输入 return self.app.view_functions['text_query']() # 复用 text_query 逻辑 except Exception as e: logger.exception("🎙️ 语音上传处理出错") return jsonify({ "success": False, "error": "语音处理异常", "detail": str(e) }), 500 finally: # 可选:清理临时文件(也可后台定时清理) if audio_path and os.path.exists(audio_path): try: os.remove(audio_path) logger.debug(f"🗑️ 已删除临时语音文件: {audio_path}") except: pass def _add_tts_audio_route(self): @self.app.route('/api/tts/audio', methods=['GET']) def tts_audio(): filename = request.args.get('file') if not filename: return jsonify({"error": "缺少参数 'file'"}), 400 file_path = os.path.join(TEMP_DIR, filename) if not os.path.exists(file_path): logger.warning(f"📁 请求的音频文件不存在: {file_path}") return jsonify({"error": "文件不存在"}), 404 logger.debug(f"📥 下载 TTS 音频: {filename}") return send_file(file_path, mimetype="audio/mpeg") def start(self, host=None, port=None): """ 启动 API 服务(非阻塞,运行在独立线程) :param host: 绑定地址 :param port: 端口号 """ host = host or API_HOST port = port or API_PORT if self.running: logger.warning("⚠️ API 服务器已在运行,忽略重复启动") return def run(): try: self.server = make_server(host, port, self.app) logger.info(f"🌐 API 服务已启动 → http://{host}:{port} (模式: {RUN_MODE})") self.running = True self.server.serve_forever() except Exception as e: if self.running: logger.error(f"🚨 API 服务意外终止: {e}") else: logger.debug("🛑 API 服务已正常关闭") self.thread = threading.Thread(target=run, daemon=True) self.thread.start() def stop(self): """安全关闭 API 服务""" if not self.running: return logger.info("🛑 正在关闭 API 服务...") try: self.server.shutdown() except AttributeError: logger.warning("⚠️ server 对象尚未初始化,跳过 shutdown") except Exception as e: logger.error(f"❌ shutdown 出错: {e}") self.running = False if self.thread: self.thread.join(timeout=3) if self.thread.is_alive(): logger.warning("⚠️ API 线程未能及时退出") logger.info("✅ API 服务已关闭") # ================ # 全局实例(单例) # ================ _api_server_instance = None def get_api_server() -> APIServer: """获取 API 服务单例""" global _api_server_instance if _api_server_instance is None: _api_server_instance = APIServer() return _api_server_instance # 方便直接调用 __all__ = ['get_api_server'] 这是api工具
10-29
def _add_voice_upload_route(self): @self.app.route('/api/voice/upload', methods=['POST']) def voice_upload(): # 1. 获取客户端信息 ctx = self._parse_client_info() source, mode, session_id = ctx["source"], ctx["mode"], ctx["session_id"] # 2. 权限判断:仅允许 local 类设备上传语音 if "local" not in source: return jsonify({ "error": "语音上传功能仅限本地设备使用", "hint": "请设置 Header: X-Client-Type: local 或 body 中指定 'source': 'local'" }), 403 # 3. 创建临时路径 timestamp = int(time.time()) raw_filename = request.form.get('filename', f"upload_{timestamp}.wav") ext = os.path.splitext(raw_filename)[1] or ".wav" filename = secure_filename(f"{session_id}_{timestamp}{ext}") file_path = os.path.join(TEMP_DIR, filename) audio_path = None try: # 4. 接收音频数据 if 'file' in request.files: file = request.files['file'] if not file.filename: return jsonify({"error": "上传的文件名为空"}), 400 file.save(file_path) audio_path = file_path elif 'audio_base64' in request.form: b64_str = request.form['audio_base64'].split(",")[-1] raw_data = base64.b64decode(b64_str) with open(file_path, 'wb') as f: f.write(raw_data) audio_path = file_path else: return jsonify({"error": "请提供 'file' 或 'audio_base64' 字段"}), 400 # 5. 开始识别 self._update_status(is_listening=True) logger.debug(f"🎤 正在识别语音文件: {audio_path}") try: recognized_text = recognizer.listen_and_recognize( audio_file=audio_path, timeout=VOICE_RECOGNIZER_TIMEOUT ) finally: self._update_status(is_listening=False) if not recognized_text: logger.warning("⚠️ 语音识别未获取到有效文本") return jsonify({ "success": False, "error": "语音识别失败", "response_to_user": "抱歉,我没听清,请再说一遍。" }), 400 logger.info(f"👂 识别结果: '{recognized_text}'") # 6. 复用 text_query 逻辑(推荐方式:提取为公共函数) request.json = {"text": recognized_text, "source": source, "mode": mode, "session_id": session_id} return self.app.view_functions['text_query']() except Exception as e: logger.exception("🎙️ 语音上传处理出错") return jsonify({ "success": False, "error": "语音处理异常", "detail": str(e) }), 500 finally: # 7. 清理临时文件 if audio_path and os.path.exists(audio_path): self._safe_remove(audio_path) 这是语音接口 import { useState, useRef } from 'react'; import { uploadApi } from '../services/apiClient'; import { FaMicrophone, FaStopCircle } from 'react-icons/fa'; export default function VoiceRecorder() { const [recording, setRecording] = useState(false); const [transcript, setTranscript] = useState(''); const [response, setResponse] = useState(''); const [audioUrl, setAudioUrl] = useState<string | null>(null); const mediaRecorderRef = useRef<MediaRecorder | null>(null); const audioChunksRef = useRef<BlobPart[]>([]); const [mode] = useState<'interactive' | 'auto'>('interactive'); const startRecording = async () => { try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); mediaRecorderRef.current = new MediaRecorder(stream); audioChunksRef.current = []; mediaRecorderRef.current.ondataavailable = (e) => { audioChunksRef.current.push(e.data); }; mediaRecorderRef.current.onstop = async () => { const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' }); await uploadAudio(audioBlob); }; mediaRecorderRef.current.start(); setRecording(true); setTranscript(''); setResponse(''); setAudioUrl(null); } catch (err: any) { alert('无法访问麦克风:' + err.message); } }; const stopRecording = () => { if (mediaRecorderRef.current && recording) { mediaRecorderRef.current.stop(); mediaRecorderRef.current.stream.getTracks().forEach(track => track.stop()); setRecording(false); } }; const uploadAudio = async (audioBlob: Blob) => { const formData = new FormData(); formData.append('file', audioBlob, 'recording.webm'); formData.append('mode', mode); formData.append('source', 'web'); formData.append('session_id', `web_${Date.now()}`); try { const res = await uploadApi.post('/api/voice/upload', formData, { headers: { 'Content-Type': 'multipart/form-data', 'X-Client-Type': 'web', }, }); const data = res.data; if (data.success) { setTranscript(data.recognized_text || '语音已识别'); setResponse(data.response_to_user); if (data.tts_audio_url) { const url = data.tts_audio_url.startsWith('http') ? data.tts_audio_url : `${import.meta.env.VITE_API_BASE}${data.tts_audio_url}`; setAudioUrl(url); if (mode === 'auto' && data.should_play_tts) { const audio = new Audio(url); audio.play().catch(console.warn); } } if (data.details?.expect_follow_up) { setTimeout(() => { startRecording(); }, 1000); } } else { setTranscript('识别失败'); setResponse(data.error || '语音处理失败'); } } catch (err: any) { setResponse('上传失败:' + err.message); } }; return ( <div className="widget-card"> <h3 className="widget-title">🎙️ 语音助手(支持追问)</h3> <button onClick={recording ? stopRecording : startRecording} className={`record-button ${recording ? 'stop-button' : ''}`} > {recording ? <FaStopCircle /> : <FaMicrophone />} {recording ? '停止录音' : '开始录音'} </button> {recording && ( <div className="recording-indicator"> <div style={{ width: '12px', height: '12px', backgroundColor: '#dc2626', borderRadius: '50%' }}></div> 正在录音... </div> )} {transcript && ( <p className="response-box response-user"> <strong>你说:</strong> {transcript} </p> )} {response && ( <p className="response-box response-ai"> <strong>AI 回复:</strong> {response} </p> )} {audioUrl && ( <div className="mt-4"> <audio controls src={audioUrl} className="audio-player"> 您的浏览器不支持音频播放。 </audio> </div> )} </div> ); } 这是语音功能UI,分析一下403的问题
最新发布
10-31
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值