1.”# E:\AI_System\main.py (完全E盘操作版)
import os
import sys
import traceback
import time
import logging
import signal
import json
from pathlib import Path
from core.config import config
from core.command_listener import start_command_listener
from agent.model_manager import ModelManager
from agent.cognitive_architecture import CognitiveSystem
from utils.path_utils import normalize_path, clean_path_cache
====================== 基础路径配置 ======================
确保所有操作都在E盘
BASE_DRIVE = “E:” # 固定使用E盘
PROJECT_ROOT = Path(file).parent.resolve() # 项目根目录在E盘
WORKSPACE_PATH = Path(BASE_DRIVE) / “AI_Workspace” # 工作空间根路径
创建必要目录
WORKSPACE_PATH.mkdir(parents=True, exist_ok=True)
====================== 日志配置 ======================
def setup_logging() -> logging.Logger:
“”“配置日志系统(完全在E盘操作)”“”
# 日志目录(必须在E盘)
logs_dir = WORKSPACE_PATH / “logs”
try: # 创建日志目录 logs_dir.mkdir(parents=True, exist_ok=True) # 配置日志级别 log_level_name = config.get("log_level", "INFO").upper() log_level_mapping = { "DEBUG": logging.DEBUG, "INFO": logging.INFO, "WARNING": logging.WARNING, "ERROR": logging.ERROR, "CRITICAL": logging.CRITICAL } log_level = log_level_mapping.get(log_level_name, logging.INFO) # 配置日志格式 log_format = "%(asctime)s - [%(levelname)s] - %(name)s - %(message)s" formatter = logging.Formatter(log_format) # 配置根日志记录器 root_logger = logging.getLogger() root_logger.setLevel(log_level) # 移除所有现有处理器 for handler in root_logger.handlers[:]: root_logger.removeHandler(handler) # 创建日志文件(在E盘) log_file = logs_dir / f"system_{time.strftime('%Y%m%d_%H%M%S')}.log" try: file_handler = logging.FileHandler(log_file, encoding='utf-8') file_handler.setFormatter(formatter) root_logger.addHandler(file_handler) except Exception as e: # 日志文件创建失败时使用控制台 print(f"⚠️ 无法创建日志文件: {e}") # 添加控制台处理器 console_handler = logging.StreamHandler() console_handler.setFormatter(formatter) root_logger.addHandler(console_handler) # 创建主日志记录器 main_logger = root_logger.getChild("Main") main_logger.info(f"日志系统初始化完成 (级别: {log_level_name})") main_logger.info(f"日志文件位置: {log_file}") return main_logger except Exception as e: # 完全失败时的后备方案 print(f"🔥 日志配置失败: {e}") return logging.getLogger("FallbackLogger")
====================== 模型路径处理 ======================
def get_model_paths(model_name: str) -> list:
“”“返回所有可能的模型路径(仅限E盘)”“”
return [
WORKSPACE_PATH / “AI_Models” / model_name, # 首选位置
Path(BASE_DRIVE) / “AI_Models” / model_name, # 备选位置1
Path(BASE_DRIVE) / “Models” / model_name, # 备选位置2
WORKSPACE_PATH / “models” / model_name # 项目内置模型
]
def find_valid_model_path(model_name: str, logger: logging.Logger) -> Path:
“”“在E盘查找有效的模型路径”“”
possible_paths = get_model_paths(model_name)
for path in possible_paths: if path.exists(): logger.info(f"✅ 找到模型路径: {path}") return path # 没有找到有效路径 logger.critical(f"🛑 在E盘找不到模型: {model_name}") logger.critical("检查位置:") for path in possible_paths: logger.critical(f" - {path}") raise FileNotFoundError(f"在E盘找不到模型: {model_name}")
====================== 主函数 ======================
def main():
“”“主函数(完全E盘操作)”“”
# 清理路径缓存
clean_path_cache()
# 设置日志 logger = setup_logging() # 记录启动信息 logger.info("=" * 50) logger.info(f"🌟 启动AI系统 - 弹性星型架构") logger.info(f"🚀 工作空间: {WORKSPACE_PATH}") logger.info(f"📂 项目目录: {PROJECT_ROOT}") logger.info(f"🐍 Python版本: {sys.version}") logger.info(f"🖥️ 操作系统: {sys.platform}") logger.info("=" * 50) # 初始化模型管理器 try: model_manager_config = config.get("model_manager", {}) # 模型缓存目录(在E盘) cache_dir = WORKSPACE_PATH / "model_cache" cache_dir.mkdir(parents=True, exist_ok=True) model_manager = ModelManager( config=config, cache_dir=str(cache_dir), use_gpu=model_manager_config.get("use_gpu", True), max_models_in_memory=model_manager_config.get("max_models_in_memory", 3) ) logger.info("✅ 模型管理器初始化完成") except Exception as e: logger.error(f"❌ 模型管理器初始化失败: {e}", exc_info=True) sys.exit(1) # 加载基础模型(仅在E盘查找) base_model_key = "TEXT_BASE" model_settings = config.get("model_settings", {}) base_model_info = model_settings.get(base_model_key, {}) model_type = base_model_info.get("type", "text") model_name = base_model_info.get("name", "Qwen2-7B") # 默认模型名称 try: # 查找模型路径 model_path = find_valid_model_path(model_name, logger) # 注册并加载模型 if model_manager.register_model(base_model_key, str(model_path), model_type): success, model = model_manager.load_model(base_model_key) if success: logger.info(f"✅ 基础模型加载成功: {base_model_key}") else: logger.error(f"❌ 模型加载失败: {base_model_key}") raise RuntimeError(f"模型加载失败: {base_model_key}") else: logger.error(f"❌ 模型注册失败: {base_model_key}") raise RuntimeError(f"模型注册失败: {base_model_key}") except Exception as e: logger.error(f"❌ 模型初始化失败: {e}", exc_info=True) sys.exit(1) # 初始化认知系统 try: cognitive_config = config.get("cognitive_config", {}) # 认知系统状态存储(在E盘) state_dir = WORKSPACE_PATH / "system_state" state_dir.mkdir(parents=True, exist_ok=True) cognitive_config["state_dir"] = str(state_dir) cognitive_system = CognitiveSystem( name="认知系统", model_manager=model_manager, config=cognitive_config ) # 初始化认知系统 if cognitive_system.initialize(): logger.info("✅ 认知系统初始化成功") else: raise RuntimeError("认知系统初始化失败") except Exception as e: logger.error(f"❌ 认知系统初始化失败: {e}", exc_info=True) sys.exit(1) # 命令处理器 def command_handler(command: str) -> dict: """处理用户命令""" logger.info(f"🔄 处理命令: {command}") # 特殊命令处理 if command.lower() in ["exit", "quit", "stop"]: return {"action": "shutdown", "message": "正在关闭系统..."} if command.lower() in ["status", "health"]: return cognitive_system.get_system_status() try: # 通过认知系统处理命令 return cognitive_system.process_command(command) except Exception as e: logger.error(f"❌ 命令处理错误: {e}", exc_info=True) return {"error": f"处理命令时出错: {e}"} # 系统关闭处理 def shutdown_handler(): """系统关闭处理""" logger.info("🛑 关闭系统中...") try: # 保存状态 if cognitive_system: logger.info("💾 保存认知系统状态...") cognitive_system.save_state() # 关闭认知系统 if cognitive_system: logger.info("🛑 关闭认知系统...") cognitive_system.shutdown() # 关闭模型管理器 if model_manager: logger.info("🛑 关闭模型管理器...") model_manager.shutdown() # 停止命令监听器 if 'command_listener' in locals() and command_listener.running: logger.info("📵 停止命令监听器...") command_listener.stop() logger.info("✅ 系统已完全关闭") sys.exit(0) except Exception as e: logger.error(f"❌ 关闭过程中出错: {e}", exc_info=True) sys.exit(1) # 信号处理 def signal_handler(sig, frame): """处理系统信号""" signals = {signal.SIGINT: "Ctrl+C", signal.SIGTERM: "终止信号"} logger.warning(f"⚠️ 收到 {signals.get(sig, sig)},关闭系统...") shutdown_handler() signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) # 启动命令监听器 try: command_listener = start_command_listener(command_handler=command_handler) logger.info("✅ 命令监听器已启动") except Exception as e: logger.error(f"❌ 命令监听器启动失败: {e}", exc_info=True) shutdown_handler() return logger.info("🌟 系统准备就绪! 输入命令开始交互 ('help' 查看命令列表)") # 主循环 try: while True: # 处理命令队列 while not command_listener.command_queue.empty(): command = command_listener.command_queue.get() response = command_handler(command) # 处理关闭指令 if isinstance(response, dict) and response.get("action") == "shutdown": shutdown_handler() return # 打印响应 if response: print("\n" + ("-" * 50)) if isinstance(response, dict): response_str = json.dumps(response, indent=2, ensure_ascii=False) print(f"系统响应:\n{response_str}") else: print(f"系统响应: {str(response)}") print("-" * 50 + "\n") # 休眠避免CPU占用过高 time.sleep(0.1) except KeyboardInterrupt: logger.info("🛑 用户中断操作,关闭系统...") shutdown_handler() except Exception as e: logger.critical(f"🔥 主循环错误: {e}", exc_info=True) shutdown_handler()
if name == “main”:
# 全局异常处理
def global_exception_handler(exc_type, exc_value, exc_traceback):
“”“全局异常处理器”“”
if issubclass(exc_type, KeyboardInterrupt):
sys.excepthook(exc_type, exc_value, exc_traceback)
return
logging.error( "未捕获的全局异常", exc_info=(exc_type, exc_value, exc_traceback) ) print(f"🔥 严重错误: {exc_type.__name__}: {exc_value}") sys.exit(1) sys.excepthook = global_exception_handler # 启动主函数 try: main() except Exception as e: print(f"🔥 主函数异常: {e}") traceback.print_exc() sys.exit(1)
“
2.”# E:\AI_System\agent\environment_interface.py
import logging
import time
import queue
import threading
import json
from typing import Any, Optional, Dict, List
from agent.base_module import UnifiedCognitiveModule
from core.message import Message, MessageType
class EnvironmentInterface(UnifiedCognitiveModule):
“”“环境交互接口 - 完整实现”“”
def __init__( self, name: str = "EnvironmentInterface", coordinator: Optional[Any] = None, config: Optional[Dict] = None ): super().__init__(name=name, coordinator=coordinator, config=config) # 配置参数 config = config or {} self.max_workers = config.get("max_workers", 4) self.response_timeout = config.get("response_timeout", 30.0) self.log_level = config.get("log_level", "INFO") # 日志配置 self.logger = logging.getLogger(name) log_level = getattr(logging, self.log_level.upper(), logging.INFO) self.logger.setLevel(log_level) # 输入输出队列 self.input_queue = queue.Queue() self.output_queue = queue.Queue() # 线程控制 self.running = True self.message_thread = threading.Thread(target=self._process_messages, daemon=True) self.message_thread.start() def shutdown(self) -> bool: """关闭环境接口""" try: self.running = False if self.message_thread.is_alive(): self.message_thread.join(timeout=2.0) super().shutdown() self.logger.info("🛑 环境接口已关闭") return True except Exception as e: self.logger.error(f"❌ 关闭失败: {str(e)}") return False def process(self, input_data: Any) -> dict: """处理输入数据""" if isinstance(input_data, dict): self.add_input(input_data) return {"status": "queued"} elif isinstance(input_data, str): self.add_input({"command": input_data}) return {"status": "queued"} else: return {"error": "不支持的输入类型"} def _process_messages(self): """处理消息的后台线程""" while self.running: try: # 获取用户输入 user_input = self.get_input(timeout=0.5) if user_input: # 发送到认知系统 message = Message( msg_type=MessageType.EVENT, sender=self.name, content=user_input, target="CognitiveSystem" ) self.send_message(message) self.logger.debug(f"📤 发送用户输入: {user_input['command'][:20]}...") # 处理输出队列 if not self.output_queue.empty(): output = self.output_queue.get_nowait() self._display_output(output) except Exception as e: self.logger.error(f"消息处理出错: {str(e)}") time.sleep(0.1) def handle_message(self, message: Message): """处理接收到的消息""" if message.target and message.target != self.name: return self.logger.debug(f"📩 收到消息 [{message.sender}]: {message.msg_type.name}") # 根据消息类型处理 if message.msg_type == MessageType.DATA: self.output(message.content) elif message.msg_type == MessageType.RESPONSE: self.output(message.content) elif message.msg_type == MessageType.STATUS: self.logger.info(f"系统状态更新: {message.content}") def get_input(self, timeout: float = 0.5) -> Optional[Dict]: """获取输入""" try: return self.input_queue.get(timeout=timeout) except queue.Empty: return None def output(self, response: Any): """添加响应""" self.output_queue.put(response) def _display_output(self, response: Any): """格式化并显示输出""" try: # 处理不同类型的响应 if isinstance(response, dict): response.setdefault("timestamp", time.time()) response.setdefault("source", "system") response_str = json.dumps(response, ensure_ascii=False, indent=2) print(f"<< {response_str}") if "message" in response: self.logger.info(f"💬 系统响应: {response['message']}") else: self.logger.info("💬 系统响应: 无内容") elif isinstance(response, str): print(f"<< {response}") self.logger.info(f"💬 系统响应: {response}") elif isinstance(response, Message): print(f"<< [Message from {response.sender}]: {response.content[:50]}...") self.logger.info(f"💬 收到消息响应: {response.content[:50]}...") else: response_str = str(response) print(f"<< {response_str}") self.logger.info(f"💬 系统响应: {response_str}") except Exception as e: self.logger.error(f"输出响应失败: {str(e)}") def add_input(self, input_data: dict): """添加新输入""" if not isinstance(input_data, dict): self.logger.error("输入数据格式错误,必须是字典") return input_data.setdefault("timestamp", time.time()) input_data.setdefault("source", "user") self.input_queue.put(input_data) self.logger.debug(f"手动添加输入: {input_data['command'][:20]}...") def get_health_status(self) -> dict: """返回模块健康状态""" return { "status": "running" if self.running else "stopped", "module": self.name, "queue_size": self.input_queue.qsize(), "last_activity": time.time(), "output_queue_size": self.output_queue.qsize() }
“
3.”# E:\AI_System\agent\model_manager.py
import os
import sys
import logging
import json
import hashlib
import gc
import time
from pathlib import Path
from typing import Dict, Any, Optional, Tuple, List
from utils.path_utils import normalize_path, is_valid_hf_id
class ModelManager:
“”“AI模型管理器 - 完整修复版”“”
MODEL_REGISTRY_FILE = "model_registry.json" DEFAULT_MODEL_PATHS = { "TEXT_BASE": "local_models/text_base", "TEXT_CHAT": "local_models/text_chat", "IMAGE_MODEL": "local_models/image_model" } def __init__(self, config: Dict[str, Any] = None, cache_dir: str = "model_cache", use_gpu: bool = True, max_models_in_memory: int = 3): # 配置日志 self.logger = logging.getLogger("ModelManager") self.logger.setLevel(logging.INFO) if not self.logger.handlers: handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) handler.setFormatter(formatter) self.logger.addHandler(handler) self.logger.info("🚀 初始化模型管理器...") # 初始化参数 self.config = config or {} self.cache_dir = normalize_path(cache_dir) # 使用路径规范化 self.use_gpu = use_gpu self.max_models_in_memory = max_models_in_memory # 确保缓存目录存在 os.makedirs(self.cache_dir, exist_ok=True) # 加载或创建注册表 self._persistent_registry = self._load_or_create_registry() # 已加载的模型 self.loaded_models: Dict[str, Any] = {} # 自动注册默认模型 self._register_default_models() self.logger.info(f"✅ 模型管理器初始化完成 (GPU: {'启用' if use_gpu else '禁用'})") self.logger.info(f"已注册模型: {list(self._persistent_registry.keys())}") def _load_or_create_registry(self) -> Dict[str, dict]: """加载或创建模型注册表""" try: registry_path = Path(normalize_path(self.MODEL_REGISTRY_FILE)) # 路径规范化 if registry_path.exists(): with open(registry_path, 'r', encoding='utf-8') as f: registry = json.load(f) self.logger.info(f"📋 成功加载模型注册表: {registry_path}") return registry self.logger.warning(f"⚠️ 模型注册表不存在,创建新文件: {registry_path}") with open(registry_path, 'w', encoding='utf-8') as f: json.dump({}, f, indent=2) return {} except Exception as e: self.logger.error(f"❌ 处理模型注册表失败: {str(e)}") return {} def _register_default_models(self): """注册配置文件中的默认模型""" model_settings = self.config.get("model_settings", {}) # 合并默认路径和配置路径 default_paths = {**self.DEFAULT_MODEL_PATHS, **{ name: info.get("path", self.DEFAULT_MODEL_PATHS.get(name, "")) for name, info in model_settings.items() }} # 注册模型 for model_name, model_path in default_paths.items(): if model_name not in self._persistent_registry: abs_path = normalize_path(model_path) # 路径规范化 model_type = model_settings.get(model_name, {}).get("type", "text") self.register_model(model_name, abs_path, model_type) def _save_registry(self): """保存模型注册表""" try: registry_path = normalize_path(self.MODEL_REGISTRY_FILE) # 路径规范化 with open(registry_path, 'w', encoding='utf-8') as f: json.dump(self._persistent_registry, f, indent=2, ensure_ascii=False) self.logger.info(f"💾 模型注册表已保存: {registry_path}") return True except Exception as e: self.logger.error(f"❌ 保存模型注册表失败: {str(e)}") return False def register_model(self, model_name: str, model_path: str, model_type: str = "text", adapter_config: Optional[dict] = None) -> bool: """ 注册新模型 """ # 检查模型是否存在 exists, is_local = self._check_model_exists(model_path) if not exists: self.logger.error(f"❌ 模型路径不可访问: {model_path}") return False # 计算校验和 checksum = "unknown" if is_local: try: checksum = self._calculate_checksum(model_path) except Exception as e: self.logger.warning(f"⚠️ 无法计算校验和: {str(e)}") checksum = "error" # 添加到注册表 self._persistent_registry[model_name] = { "path": model_path, "type": model_type, "status": "unloaded", "checksum": checksum, "last_accessed": time.time(), "adapter": adapter_config, "is_local": is_local } self.logger.info(f"✅ 模型注册成功: {model_name} ({model_type})") self._save_registry() return True def _check_model_exists(self, model_path: str) -> Tuple[bool, bool]: """检查模型路径是否有效""" # 如果是HuggingFace模型ID if is_valid_hf_id(model_path): # 使用路径工具验证 self.logger.info(f"🔍 检测到HuggingFace模型ID: {model_path}") return True, False # 检查本地路径 abs_path = normalize_path(model_path) # 路径规范化 if os.path.exists(abs_path): return True, True # 尝试相对路径 if os.path.exists(model_path): return True, True return False, False def _calculate_checksum(self, model_path: str) -> str: """计算模型校验和""" abs_path = normalize_path(model_path) # 路径规范化 if os.path.isdir(abs_path): sha256 = hashlib.sha256() key_files = ["pytorch_model.bin", "model.safetensors", "config.json"] for root, _, files in os.walk(abs_path): for file in files: if file in key_files: file_path = os.path.join(root, file) with open(file_path, 'rb') as f: while chunk := f.read(8192): sha256.update(chunk) return sha256.hexdigest() # 单个模型文件 with open(abs_path, 'rb') as f: return hashlib.sha256(f.read()).hexdigest() def load_model(self, model_name: str, force_reload: bool = False) -> Tuple[bool, Any]: """ 加载模型到内存 """ if model_name not in self._persistent_registry: self.logger.error(f"❌ 模型未注册: {model_name}") return False, None model_info = self._persistent_registry[model_name] model_path = model_info["path"] abs_path = normalize_path(model_path) if model_info.get("is_local", True) else model_path # 路径规范化 # 如果模型已加载且不需要强制重载 if model_name in self.loaded_models and not force_reload: self.logger.info(f"📦 模型已在内存中: {model_name}") model_info["last_accessed"] = time.time() return True, self.loaded_models[model_name] # 检查内存占用 if len(self.loaded_models) >= self.max_models_in_memory: self._unload_least_recently_used() # 实际加载模型 try: self.logger.info(f"🔄 加载模型: {model_name} ({model_info['type']})") model_type = model_info["type"] if model_type == "text": model = self._load_text_model(model_info, abs_path) elif model_type == "image": model = self._load_image_model(model_info, abs_path) elif model_type == "audio": model = self._load_audio_model(model_info, abs_path) else: self.logger.error(f"❌ 不支持的模型类型: {model_type}") return False, None # 更新状态 self.loaded_models[model_name] = model model_info["status"] = "loaded" model_info["last_accessed"] = time.time() self._save_registry() self.logger.info(f"✅ 模型加载成功: {model_name}") return True, model except ImportError as e: self.logger.error(f"❌ 缺失依赖库: {str(e)}") return False, None except Exception as e: self.logger.error(f"❌ 模型加载失败: {model_name}, 路径: {abs_path}, 错误: {str(e)}") model_info["status"] = "error" return False, None def _load_text_model(self, model_info: dict, model_path: str) -> Any: """加载文本模型""" try: from transformers import AutoModelForCausalLM, AutoTokenizer except ImportError: self.logger.error("❌ transformers库未安装") raise RuntimeError("transformers not installed") self.logger.debug(f"🔧 加载文本模型: {model_path}") device = "cuda" if self.use_gpu else "cpu" try: tokenizer = AutoTokenizer.from_pretrained(model_path, cache_dir=self.cache_dir) model = AutoModelForCausalLM.from_pretrained( model_path, cache_dir=self.cache_dir, device_map=device if self.use_gpu else None ) return { "model": model, "tokenizer": tokenizer, "info": model_info } except OSError as e: self.logger.error(f"❌ 加载失败: 请检查路径 '{model_path}' 是否正确") fallback_path = self._try_find_model_path(model_path) if fallback_path: self.logger.warning(f"⚠️ 尝试备用路径: {fallback_path}") return self._load_text_model(model_info, fallback_path) raise except Exception as e: self.logger.error(f"❌ 加载过程中发生意外错误: {str(e)}") raise def _try_find_model_path(self, original_path: str) -> Optional[str]: """尝试找到备用模型路径""" # 1. 检查项目内的模型目录 project_models = os.path.join(os.getcwd(), "local_models", os.path.basename(original_path)) if os.path.exists(project_models): return project_models # 2. 检查缓存目录 cache_path = os.path.join(self.cache_dir, "models", os.path.basename(original_path)) if os.path.exists(cache_path): return cache_path # 3. 尝试父目录 parent_path = os.path.join(os.path.dirname(os.getcwd()), os.path.basename(original_path)) if os.path.exists(parent_path): return parent_path return None def unload_model(self, model_name: str = None) -> bool: """卸载模型""" if model_name is None: self.logger.info("卸载所有模型") for name in list(self.loaded_models.keys()): if not self._unload_single_model(name): self.logger.error(f"❌ 卸载模型失败: {name}") return True return self._unload_single_model(model_name) def _unload_single_model(self, model_name: str) -> bool: """卸载单个模型""" if model_name not in self.loaded_models: self.logger.warning(f"⚠️ 模型未加载: {model_name}") return False try: # 显式释放模型资源 model_data = self.loaded_models[model_name] if "model" in model_data: del model_data["model"] if "tokenizer" in model_data: del model_data["tokenizer"] # 删除引用并调用垃圾回收 del self.loaded_models[model_name] gc.collect() # 如果使用GPU,额外清理CUDA缓存 if self.use_gpu: try: import torch torch.cuda.empty_cache() self.logger.debug("♻️ 已清理GPU缓存") except ImportError: pass # 更新注册表状态 if model_name in self._persistent_registry: self._persistent_registry[model_name]["status"] = "unloaded" self._save_registry() self.logger.info(f"🗑️ 模型已卸载: {model_name}") return True except Exception as e: self.logger.error(f"❌ 卸载模型失败: {model_name}, 错误: {str(e)}") return False def _unload_least_recently_used(self): """卸载最近最少使用的模型""" if not self.loaded_models: return # 找到最近最少使用的模型 lru_model = None lru_time = float('inf') for model_name in self.loaded_models: last_accessed = self._persistent_registry[model_name].get("last_accessed", 0) if last_accessed < lru_time: lru_time = last_accessed lru_model = model_name if lru_model: self.logger.info(f"♻️ 卸载最近最少使用的模型: {lru_model}") self._unload_single_model(lru_model) def _load_image_model(self, model_info: dict, model_path: str) -> Any: """加载图像模型""" self.logger.info(f"🖼️ 加载图像模型: {model_path}") try: # 示例:使用diffusers库加载SDXL模型 from diffusers import StableDiffusionPipeline import torch device = "cuda" if self.use_gpu and torch.cuda.is_available() else "cpu" pipeline = StableDiffusionPipeline.from_pretrained( model_path, cache_dir=self.cache_dir, torch_dtype=torch.float16 if device == "cuda" else torch.float32 ).to(device) return { "pipeline": pipeline, "info": model_info } except ImportError: self.logger.error("❌ diffusers库未安装,无法加载图像模型") raise except Exception as e: self.logger.error(f"❌ 图像模型加载失败: {str(e)}") raise def _load_audio_model(self, model_info: dict, model_path: str) -> Any: """加载音频模型""" self.logger.info(f"🎵 加载音频模型: {model_path}") try: # 示例:加载语音识别模型 from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq import torch device = "cuda" if self.use_gpu and torch.cuda.is_available() else "cpu" processor = AutoProcessor.from_pretrained(model_path, cache_dir=self.cache_dir) model = AutoModelForSpeechSeq2Seq.from_pretrained( model_path, cache_dir=self.cache_dir, torch_dtype=torch.float16 if device == "cuda" else torch.float32 ).to(device) return { "model": model, "processor": processor, "info": model_info } except ImportError: self.logger.error("❌ transformers库未安装,无法加载音频模型") raise except Exception as e: self.logger.error(f"❌ 音频模型加载失败: {str(e)}") raise def shutdown(self): """关闭模型管理器""" self.logger.info("🛑 关闭模型管理器...") self.unload_model() # 卸载所有模型 # 清理缓存(可选) self.logger.info("✅ 模型管理器已关闭")
“
4.”# E:\AI_System\agent\diagnostic_system.py
import logging
import psutil
import time
import random
class DiagnosticSystem:
def init(self):
self.logger = logging.getLogger(“DiagnosticSystem”)
def check_modules(self): """检查核心模块状态""" results = { "cognitive_system": self._check_cognitive(), "environment_interface": self._check_environment(), "affective_system": self._check_affective(), "system_resources": self._check_resources() } return results def _check_cognitive(self): try: # 检查认知系统模块 from .cognitive_architecture import CognitiveSystem # 模拟一些额外信息 return { "status": "✅ 正常运行", "version": CognitiveSystem.VERSION, "last_heartbeat": time.time() - random.randint(1, 10) } except Exception as e: return {"status": "❌ 异常", "error": str(e)} def _check_environment(self): try: # 检查环境接口模块 from .environment_interface import EnvironmentInterface return { "status": "✅ 正常运行", "connection": "active", "last_ping": time.time() - random.randint(1, 5) } except Exception as e: return {"status": "❌ 异常", "error": str(e)} def _check_affective(self): try: # 检查情感系统模块 from .affective_system import AffectiveSystem return { "status": "✅ 正常运行", "emotion_state": "neutral", "intensity": random.randint(1, 100) } except Exception as e: return {"status": "❌ 异常", "error": str(e)} def _check_resources(self): """检查系统资源使用情况""" try: return { "cpu": f"{psutil.cpu_percent()}%", "memory": f"{psutil.virtual_memory().percent}%", "gpu": self._get_gpu_status(), "disk": self._get_disk_usage() } except Exception as e: return {"error": f"资源检查失败: {str(e)}"} def _get_gpu_status(self): try: import gpustat stats = gpustat.new_query() return [{ "id": gpu.index, "utilization": f"{gpu.utilization}%", "memory": f"{gpu.memory_used}/{gpu.memory_total}MB", "temperature": f"{gpu.temperature}°C" } for gpu in stats.gpus] except ImportError: return "⚠️ gpustat 未安装" except Exception as e: return f"❌ GPU检测失败: {str(e)}" def _get_disk_usage(self): try: disk = psutil.disk_usage('/') return { "total": f"{disk.total // (1024 ** 3)}GB", "used": f"{disk.used // (1024 ** 3)}GB", "free": f"{disk.free // (1024 ** 3)}GB", "percent": f"{disk.percent}%" } except Exception as e: return f"❌ 磁盘检测失败: {str(e)}"
“
4.organize_repo.bat 这个你让我新建的文件是干嘛的?我的仓库只需要整理 不需要删除 你能理解吗?你要是可以整理 能帮我把E:\ai_temp里面的”wechat translate 2025-08-09 141611 696.pngwechat translate 2025-08-09 170304 577.pngwechat translate 2025-08-10 024015 869.pngwechat translate 2025-08-10 115402 030.pngwechat translate 2025-08-10 120045 852.pngwechat translate 2025-08-10 121801 405.pngwechat translate 2025-08-10 121939 750.pngwechat translate 2025-08-10 164544 700.pngwechat translate 2025-08-10 164738 407.pngwechat translate 2025-08-10 183846 043.pngwechat translate 2025-08-17 014313 045.pngwechat translate 2025-08-17 014801 635.pngwechat translate 2025-08-17 014813 557.pngwechat translate 2025-08-17 014824 680.pngwechat translate 2025-08-17 014840 935.png“”wechat 2025-08-17 014823 645.pngwechat 2025-08-17 014839 552.png
wechat 2025-08-17 023912 994.png
wechat 2025-08-17 104544 487.png
wechat 2025-08-17 104802 038.png
wechat 2025-08-17 105923 918.png
wechat 2025-08-17 110036 545.png
wechat 2025-08-17 112641 042.png
wechat 2025-08-17 112711 884.png
wechat 2025-08-17 124218 228.png“”b0a50e77-4abb-47bc-b627-c8447dfc0cdb.tmpb0e7b665-9e1b-49b1-b4b6-b82ee9d9c002.tmpb6d33607-1696-4c04-80f5-38baa97cf874.tmpb34d0fed-e3a3-457c-aa5d-bc5c05196d68.tmpb982a35d-8e2b-4357-bf5a-7d8379bde852.tmpb4950b49-3ef1-452f-a121-481149d634d9.tmpb8608a9f-1605-411a-890b-f4d5ac3e13d5.tmpb069211d-c01b-4ddb-b2a4-ca0cc571b894.tmpbc66bff3-e877-4971-b1b8-bccdbd475364.tmpbc9044ce-6842-411c-bfb8-1ddb93efa2bd.tmpc1fa1b81-defb-49cb-adac-7895f1c7ef2f.tmpc17ebdcc-3e62-46f3-ad70-23fb637a01b9.tmpc83c7258-212e-42b1-8305-b91c52a39612.tmpc93d0105-dbe5-4e4b-ab16-ea9179f7659d.tmpc0453cd5-9cf4-46e3-b798-fa625d576a4c.tmpc528dfe9-0091-480a-9d19-bd2f196bf4db.tmpc880a954-fd81-4a05-a7d0-71fafb05c76c.tmpc8654c92-8e10-49c5-8e74-79fe9692a262.tmp“这些文件 都整理好吗?它们太多了,我不想删除 只想整理 我的空间完全够用
5.我发你的三个源文件 你还是改好了给我吧 你让我改我不会,其他的 都按你说的弄完了