可是我觉得你改的 一点也不靠谱呢
1.# E:\AI_System\agent\model_manager.py
import os
import sys
import logging
import json
import hashlib
import gc
import time
from pathlib import Path
from typing import Dict, Any, Optional, Tuple, List
from utils.path_utils import normalize_path, is_valid_hf_id
class ModelManager:
"""AI模型管理器 - 完整修复版(星型架构适配版)"""
MODEL_REGISTRY_FILE = "model_registry.json"
DEFAULT_MODEL_PATHS = {
"TEXT_BASE": "local_models/text_base",
"TEXT_CHAT": "local_models/text_chat",
"IMAGE_MODEL": "local_models/image_model"
}
def __init__(
self,
config: Dict[str, Any] = None,
cache_dir: str = "model_cache",
use_gpu: bool = True,
max_models_in_memory: int = 3
):
# 配置日志
self.logger = logging.getLogger("ModelManager")
self.logger.setLevel(logging.INFO)
if not self.logger.handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.info("🚀 初始化模型管理器...")
# 初始化参数
self.config = config or {}
self.cache_dir = normalize_path(cache_dir) # 使用路径规范化
self.use_gpu = use_gpu
self.max_models_in_memory = max_models_in_memory
# 确保缓存目录存在
os.makedirs(self.cache_dir, exist_ok=True)
# 加载或创建注册表
self._persistent_registry = self._load_or_create_registry()
# 已加载的模型
self.loaded_models: Dict[str, Any] = {}
# 自动注册默认模型
self._register_default_models()
self.logger.info(f"✅ 模型管理器初始化完成 (GPU: {'启用' if use_gpu else '禁用'})")
self.logger.info(f"已注册模型: {list(self._persistent_registry.keys())}")
# 星型架构协调器引用
self.orchestrator = None
def set_orchestrator(self, orchestrator):
"""连接中枢协调器"""
self.orchestrator = orchestrator
def is_healthy(self) -> bool:
"""健康检查"""
return len(self._persistent_registry) > 0
def get_status(self) -> dict:
"""返回模块状态"""
loaded_models = [model for model, info in self._persistent_registry.items()
if info.get("status") == "loaded"]
return {
"status": "running",
"loaded_models": loaded_models,
"total_models": len(self._persistent_registry),
"cache_dir": self.cache_dir
}
def _load_or_create_registry(self) -> Dict[str, dict]:
"""加载或创建模型注册表"""
try:
registry_path = Path(normalize_path(self.MODEL_REGISTRY_FILE)) # 路径规范化
if registry_path.exists():
with open(registry_path, 'r', encoding='utf-8') as f:
registry = json.load(f)
self.logger.info(f"📋 成功加载模型注册表: {registry_path}")
return registry
self.logger.warning(f"⚠️ 模型注册表不存在,创建新文件: {registry_path}")
with open(registry_path, 'w', encoding='utf-8') as f:
json.dump({}, f, indent=2)
return {}
except Exception as e:
self.logger.error(f"❌ 处理模型注册表失败: {str(e)}")
return {}
def _register_default_models(self):
"""注册配置文件中的默认模型"""
model_settings = self.config.get("model_settings", {})
# 合并默认路径和配置路径
default_paths = {
**self.DEFAULT_MODEL_PATHS,
**{
name: info.get("path", self.DEFAULT_MODEL_PATHS.get(name, ""))
for name, info in model_settings.items()
}
}
# 注册模型
for model_name, model_path in default_paths.items():
if model_name not in self._persistent_registry:
abs_path = normalize_path(model_path) # 路径规范化
model_type = model_settings.get(model_name, {}).get("type", "text")
self.register_model(model_name, abs_path, model_type)
def _save_registry(self):
"""保存模型注册表"""
try:
registry_path = normalize_path(self.MODEL_REGISTRY_FILE) # 路径规范化
with open(registry_path, 'w', encoding='utf-8') as f:
json.dump(self._persistent_registry, f, indent=2, ensure_ascii=False)
self.logger.info(f"💾 模型注册表已保存: {registry_path}")
return True
except Exception as e:
self.logger.error(f"❌ 保存模型注册表失败: {str(e)}")
return False
def register_model(
self,
model_name: str,
model_path: str,
model_type: str = "text",
adapter_config: Optional[dict] = None
) -> bool:
"""注册新模型"""
# 检查模型是否存在
exists, is_local = self._check_model_exists(model_path)
if not exists:
self.logger.error(f"❌ 模型路径不可访问: {model_path}")
return False
# 计算校验和
checksum = "unknown"
if is_local:
try:
checksum = self._calculate_checksum(model_path)
except Exception as e:
self.logger.warning(f"⚠️ 无法计算校验和: {str(e)}")
checksum = "error"
# 添加到注册表
self._persistent_registry[model_name] = {
"path": model_path,
"type": model_type,
"status": "unloaded",
"checksum": checksum,
"last_accessed": time.time(),
"adapter": adapter_config,
"is_local": is_local
}
self.logger.info(f"✅ 模型注册成功: {model_name} ({model_type})")
self._save_registry()
return True
def _check_model_exists(self, model_path: str) -> Tuple[bool, bool]:
"""检查模型路径是否有效"""
# 如果是HuggingFace模型ID
if is_valid_hf_id(model_path):
# 使用路径工具验证
self.logger.info(f"🔍 检测到HuggingFace模型ID: {model_path}")
return True, False
# 检查本地路径
abs_path = normalize_path(model_path) # 路径规范化
if os.path.exists(abs_path):
return True, True
# 尝试相对路径
if os.path.exists(model_path):
return True, True
return False, False
def _calculate_checksum(self, model_path: str) -> str:
"""计算模型校验和"""
abs_path = normalize_path(model_path) # 路径规范化
if os.path.isdir(abs_path):
sha256 = hashlib.sha256()
key_files = ["pytorch_model.bin", "model.safetensors", "config.json"]
for root, _, files in os.walk(abs_path):
for file in files:
if file in key_files:
file_path = os.path.join(root, file)
with open(file_path, 'rb') as f:
while chunk := f.read(8192):
sha256.update(chunk)
return sha256.hexdigest()
# 单个模型文件
with open(abs_path, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def load_model(self, model_name: str, force_reload: bool = False) -> Tuple[bool, Any]:
"""加载模型到内存"""
if model_name not in self._persistent_registry:
self.logger.error(f"❌ 模型未注册: {model_name}")
return False, None
model_info = self._persistent_registry[model_name]
model_path = model_info["path"]
abs_path = normalize_path(model_path) if model_info.get("is_local", True) else model_path # 路径规范化
# 如果模型已加载且不需要强制重载
if model_name in self.loaded_models and not force_reload:
self.logger.info(f"📦 模型已在内存中: {model_name}")
model_info["last_accessed"] = time.time()
return True, self.loaded_models[model_name]
# 检查内存占用
if len(self.loaded_models) >= self.max_models_in_memory:
self._unload_least_recently_used()
# 实际加载模型
try:
self.logger.info(f"🔄 加载模型: {model_name} ({model_info['type']})")
model_type = model_info["type"]
if model_type == "text":
model = self._load_text_model(model_info, abs_path)
elif model_type == "image":
model = self._load_image_model(model_info, abs_path)
elif model_type == "audio":
model = self._load_audio_model(model_info, abs_path)
else:
self.logger.error(f"❌ 不支持的模型类型: {model_type}")
return False, None
# 更新状态
self.loaded_models[model_name] = model
model_info["status"] = "loaded"
model_info["last_accessed"] = time.time()
self._save_registry()
self.logger.info(f"✅ 模型加载成功: {model_name}")
return True, model
except ImportError as e:
self.logger.error(f"❌ 缺失依赖库: {str(e)}")
return False, None
except Exception as e:
self.logger.error(f"❌ 模型加载失败: {model_name}, 路径: {abs_path}, 错误: {str(e)}")
model_info["status"] = "error"
return False, None
def _load_text_model(self, model_info: dict, model_path: str) -> Any:
"""加载文本模型"""
try:
from transformers import AutoModelForCausalLM, AutoTokenizer
except ImportError:
self.logger.error("❌ transformers库未安装")
raise RuntimeError("transformers not installed")
self.logger.debug(f"🔧 加载文本模型: {model_path}")
device = "cuda" if self.use_gpu else "cpu"
try:
tokenizer = AutoTokenizer.from_pretrained(model_path, cache_dir=self.cache_dir)
model = AutoModelForCausalLM.from_pretrained(
model_path,
cache_dir=self.cache_dir,
device_map=device if self.use_gpu else None
)
return {
"model": model,
"tokenizer": tokenizer,
"info": model_info
}
except OSError as e:
self.logger.error(f"❌ 加载失败: 请检查路径 '{model_path}' 是否正确")
fallback_path = self._try_find_model_path(model_path)
if fallback_path:
self.logger.warning(f"⚠️ 尝试备用路径: {fallback_path}")
return self._load_text_model(model_info, fallback_path)
raise
except Exception as e:
self.logger.error(f"❌ 加载过程中发生意外错误: {str(e)}")
raise
def _try_find_model_path(self, original_path: str) -> Optional[str]:
"""尝试找到备用模型路径"""
# 1. 检查项目内的模型目录
project_models = os.path.join(os.getcwd(), "local_models", os.path.basename(original_path))
if os.path.exists(project_models):
return project_models
# 2. 检查缓存目录
cache_path = os.path.join(self.cache_dir, "models", os.path.basename(original_path))
if os.path.exists(cache_path):
return cache_path
# 3. 尝试父目录
parent_path = os.path.join(os.path.dirname(os.getcwd()), os.path.basename(original_path))
if os.path.exists(parent_path):
return parent_path
return None
def unload_model(self, model_name: str = None) -> bool:
"""卸载模型"""
if model_name is None:
self.logger.info("卸载所有模型")
for name in list(self.loaded_models.keys()):
if not self._unload_single_model(name):
self.logger.error(f"❌ 卸载模型失败: {name}")
return True
return self._unload_single_model(model_name)
def _unload_single_model(self, model_name: str) -> bool:
"""卸载单个模型"""
if model_name not in self.loaded_models:
self.logger.warning(f"⚠️ 模型未加载: {model_name}")
return False
try:
# 显式释放模型资源
model_data = self.loaded_models[model_name]
if "model" in model_data:
del model_data["model"]
if "tokenizer" in model_data:
del model_data["tokenizer"]
# 删除引用并调用垃圾回收
del self.loaded_models[model_name]
gc.collect()
# 如果使用GPU,额外清理CUDA缓存
if self.use_gpu:
try:
import torch
torch.cuda.empty_cache()
self.logger.debug("♻️ 已清理GPU缓存")
except ImportError:
pass
# 更新注册表状态
if model_name in self._persistent_registry:
self._persistent_registry[model_name]["status"] = "unloaded"
self._save_registry()
self.logger.info(f"🗑️ 模型已卸载: {model_name}")
return True
except Exception as e:
self.logger.error(f"❌ 卸载模型失败: {model_name}, 错误: {str(e)}")
return False
def _unload_least_recently_used(self):
"""卸载最近最少使用的模型"""
if not self.loaded_models:
return
# 找到最近最少使用的模型
lru_model = None
lru_time = float('inf')
for model_name in self.loaded_models:
last_accessed = self._persistent_registry[model_name].get("last_accessed", 0)
if last_accessed < lru_time:
lru_time = last_accessed
lru_model = model_name
if lru_model:
self.logger.info(f"♻️ 卸载最近最少使用的模型: {lru_model}")
self._unload_single_model(lru_model)
def _load_image_model(self, model_info: dict, model_path: str) -> Any:
"""加载图像模型"""
self.logger.info(f"🖼️ 加载图像模型: {model_path}")
try:
# 示例:使用diffusers库加载SDXL模型
from diffusers import StableDiffusionPipeline
import torch
device = "cuda" if self.use_gpu and torch.cuda.is_available() else "cpu"
pipeline = StableDiffusionPipeline.from_pretrained(
model_path,
cache_dir=self.cache_dir,
torch_dtype=torch.float16 if device == "cuda" else torch.float32
).to(device)
return {
"pipeline": pipeline,
"info": model_info
}
except ImportError:
self.logger.error("❌ diffusers库未安装,无法加载图像模型")
raise
except Exception as e:
self.logger.error(f"❌ 图像模型加载失败: {str(e)}")
raise
def _load_audio_model(self, model_info: dict, model_path: str) -> Any:
"""加载音频模型"""
self.logger.info(f"🎵 加载音频模型: {model_path}")
try:
# 示例:加载语音识别模型
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
import torch
device = "cuda" if self.use_gpu and torch.cuda.is_available() else "cpu"
processor = AutoProcessor.from_pretrained(model_path, cache_dir=self.cache_dir)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_path,
cache_dir=self.cache_dir,
torch_dtype=torch.float16 if device == "cuda" else torch.float32
).to(device)
return {
"model": model,
"processor": processor,
"info": model_info
}
except ImportError:
self.logger.error("❌ transformers库未安装,无法加载音频模型")
raise
except Exception as e:
self.logger.error(f"❌ 音频模型加载失败: {str(e)}")
raise
def shutdown(self):
"""关闭模型管理器"""
self.logger.info("🛑 关闭模型管理器...")
self.unload_model() # 卸载所有模型
# 清理缓存(可选)
self.logger.info("✅ 模型管理器已关闭")
2.# E:\AI_System\main.py
import os
import sys
import time
import logging
import signal
import json
from pathlib import Path
# 修复导入问题 - 确保根目录在路径中
sys.path.insert(0, str(Path(__file__).parent.resolve()))
try:
from core.config_system import CoreConfig
except ImportError as e:
print(f"❌ 导入错误: {e}")
sys.exit(1)
try:
from core.command_listener import start_command_listener
from agent.model_manager import ModelManager
from core.cognitive_orchestrator import CognitiveOrchestrator
from agent.environment_interface import EnvironmentInterface
from agent.diagnostic_system import DiagnosticSystem
from utils.path_utils import normalize_path, clean_path_cache
except ImportError as e:
print(f"❌ 关键模块导入失败: {e}")
sys.exit(1)
# ====================== 基础路径配置 ======================
# 确保所有操作都在E盘
BASE_DRIVE = "E:"
PROJECT_ROOT = Path(__file__).parent.resolve()
WORKSPACE_PATH = Path(BASE_DRIVE) / "AI_Workspace"
# 创建必要目录
WORKSPACE_PATH.mkdir(parents=True, exist_ok=True)
# ====================== 日志配置 ======================
def setup_logging() -> logging.Logger:
"""配置日志系统(完全在E盘操作)"""
logs_dir = WORKSPACE_PATH / "logs"
logs_dir.mkdir(parents=True, exist_ok=True)
# 配置日志级别
log_level_name = CoreConfig.get("log_level", "INFO").upper()
log_level_mapping = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL
}
log_level = log_level_mapping.get(log_level_name, logging.INFO)
# 配置日志格式
log_format = "%(asctime)s - [%(levelname)s] - %(name)s - %(message)s"
formatter = logging.Formatter(log_format)
# 配置根日志记录器
root_logger = logging.getLogger()
root_logger.setLevel(log_level)
# 移除所有现有处理器
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
# 创建日志文件(在E盘)
log_file = logs_dir / f"system_{time.strftime('%Y%m%d_%H%M%S')}.log"
try:
file_handler = logging.FileHandler(log_file, encoding='utf-8')
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
except Exception as e:
print(f"⚠️ 无法创建日志文件: {e}")
# 添加控制台处理器
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
# 创建主日志记录器
main_logger = root_logger.getChild("Main")
main_logger.info(f"日志系统初始化完成 (级别: {log_level_name})")
main_logger.info(f"日志文件位置: {log_file}")
return main_logger
# ====================== 模型路径处理 ======================
def get_model_paths(model_name: str) -> list:
"""返回所有可能的模型路径(仅限E盘)"""
return [
WORKSPACE_PATH / "AI_Models" / model_name,
Path(BASE_DRIVE) / "AI_Models" / model_name,
Path(BASE_DRIVE) / "Models" / model_name,
WORKSPACE_PATH / "models" / model_name
]
def find_valid_model_path(model_name: str, logger: logging.Logger) -> Path:
"""在E盘查找有效的模型路径"""
possible_paths = get_model_paths(model_name)
for path in possible_paths:
normalized_path = normalize_path(str(path))
if os.path.exists(normalized_path):
logger.info(f"✅ 找到模型路径: {normalized_path}")
return Path(normalized_path)
# 没有找到有效路径
logger.critical(f"🛑 在E盘找不到模型: {model_name}")
logger.critical("检查位置:")
for path in possible_paths:
logger.critical(f" - {path}")
raise FileNotFoundError(f"在E盘找不到模型: {model_name}")
# ====================== 主函数 ======================
def main():
"""主函数(完全E盘操作)"""
# 清理路径缓存
clean_path_cache()
# 初始化配置系统
CoreConfig.initialize()
# 设置日志
logger = setup_logging()
# 记录启动信息
logger.info("=" * 50)
logger.info(f"🌟 启动AI系统 - 弹性星型架构")
logger.info(f"🚀 工作空间: {WORKSPACE_PATH}")
logger.info(f"📂 项目目录: {PROJECT_ROOT}")
logger.info(f"🐍 Python版本: {sys.version}")
logger.info(f"🖥️ 操作系统: {sys.platform}")
logger.info("=" * 50)
# 初始化模型管理器
try:
# 获取模型管理器配置
model_manager_config = CoreConfig.get("model_manager", {})
# 模型缓存目录
cache_dir = WORKSPACE_PATH / "model_cache"
cache_dir.mkdir(parents=True, exist_ok=True)
model_manager = ModelManager(
config=model_manager_config,
cache_dir=str(cache_dir),
use_gpu=model_manager_config.get("use_gpu", True),
max_models_in_memory=model_manager_config.get("max_models_in_memory", 3)
)
logger.info("✅ 模型管理器初始化完成")
except Exception as e:
logger.error(f"❌ 模型管理器初始化失败: {e}", exc_info=True)
sys.exit(1)
# 初始化卫星模块 - 关键修复
try:
# 获取模块配置
ei_config = CoreConfig.get("environment_interface", {})
ds_config = CoreConfig.get("diagnostic_system", {})
# 创建环境接口实例
environment_interface = EnvironmentInterface(
name="环境接口",
config=ei_config
)
# 创建诊断系统实例
diagnostic_system = DiagnosticSystem(
name="诊断系统",
config=ds_config
)
logger.info("✅ 卫星模块初始化完成")
except Exception as e:
logger.error(f"❌ 卫星模块初始化失败: {e}", exc_info=True)
sys.exit(1)
# 加载基础模型(仅在E盘查找)
base_model_key = "TEXT_BASE"
# 直接从配置获取模型设置
base_model_info = CoreConfig.get("model_settings.TEXT_BASE", {})
model_type = base_model_info.get("type", "text")
model_name = base_model_info.get("name", "Qwen2-7B")
try:
# 查找模型路径
model_path = find_valid_model_path(model_name, logger)
# 注册并加载模型
if model_manager.register_model(base_model_key, str(model_path), model_type):
success, model = model_manager.load_model(base_model_key)
if success:
logger.info(f"✅ 基础模型加载成功: {base_model_key}")
else:
logger.error(f"❌ 模型加载失败: {base_model_key}")
raise RuntimeError(f"模型加载失败: {base_model_key}")
else:
logger.error(f"❌ 模型注册失败: {base_model_key}")
raise RuntimeError(f"模型注册失败: {base_model_key}")
except Exception as e:
logger.error(f"❌ 模型初始化失败: {e}", exc_info=True)
sys.exit(1)
# 初始化星型协调器
try:
# 获取认知配置
cognitive_config = CoreConfig.get("cognitive_config", {})
# 系统状态存储目录
state_dir = WORKSPACE_PATH / "system_state"
state_dir.mkdir(parents=True, exist_ok=True)
cognitive_config["state_dir"] = str(state_dir)
# 创建星型协调器
orchestrator = CognitiveOrchestrator(config=cognitive_config)
# 连接模块 - 关键步骤
if orchestrator.connect_modules():
logger.info("✅ 星型架构协调器初始化成功")
else:
logger.warning("⚠️ 模块连接存在问题,系统进入降级模式")
except Exception as e:
logger.error(f"❌ 协调器初始化失败: {e}", exc_info=True)
sys.exit(1)
# 命令处理器
def command_handler(command: str) -> dict:
"""处理用户命令"""
logger.info(f"🔄 处理命令: {command}")
# 特殊命令处理
if command.lower() in ["exit", "quit", "stop"]:
return {"action": "shutdown", "message": "正在关闭系统..."}
if command.lower() in ["status", "health"]:
return orchestrator.get_system_status()
try:
# 通过协调器处理命令
return orchestrator.process_command(command)
except Exception as e:
logger.error(f"❌ 命令处理错误: {e}", exc_info=True)
return {"error": f"处理命令时出错: {e}"}
# 系统关闭处理
def shutdown_handler():
"""系统关闭处理"""
logger.info("🛑 关闭系统中...")
try:
# 保存状态
if 'orchestrator' in locals() and orchestrator is not None:
logger.info("💾 保存系统状态...")
orchestrator.save_state()
# 关闭协调器
if 'orchestrator' in locals() and orchestrator is not None:
logger.info("🛑 关闭协调器...")
orchestrator.shutdown()
# 关闭模型管理器
if 'model_manager' in locals() and model_manager is not None:
logger.info("🛑 关闭模型管理器...")
model_manager.shutdown()
# 关闭卫星模块
if 'environment_interface' in locals() and environment_interface is not None:
logger.info("🛑 关闭环境接口...")
environment_interface.shutdown()
if 'diagnostic_system' in locals() and diagnostic_system is not None:
logger.info("🛑 关闭诊断系统...")
diagnostic_system.shutdown()
logger.info("✅ 系统已完全关闭")
sys.exit(0)
except Exception as e:
logger.error(f"❌ 关闭过程中出错: {e}", exc_info=True)
sys.exit(1)
# 信号处理
def signal_handler(sig, frame):
"""处理系统信号"""
signals = {signal.SIGINT: "Ctrl+C", signal.SIGTERM: "终止信号"}
logger.warning(f"⚠️ 收到 {signals.get(sig, sig)},关闭系统...")
shutdown_handler()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# 启动命令监听器
try:
command_listener = start_command_listener(command_handler=command_handler)
logger.info("✅ 命令监听器已启动")
except Exception as e:
logger.error(f"❌ 命令监听器启动失败: {e}", exc_info=True)
shutdown_handler()
return
logger.info("🌟 系统准备就绪! 输入命令开始交互 ('help' 查看命令列表)")
# 主循环
try:
while True:
# 休眠避免CPU占用过高
time.sleep(0.1)
# 检查是否有命令需要处理
if command_listener.command_queue.empty():
continue
# 处理命令队列
while not command_listener.command_queue.empty():
command = command_listener.command_queue.get()
response = command_handler(command)
# 处理关闭指令
if isinstance(response, dict) and response.get("action") == "shutdown":
shutdown_handler()
return
# 打印响应
if response:
print("\n" + ("-" * 50))
if isinstance(response, dict):
response_str = json.dumps(response, indent=2, ensure_ascii=False)
print(f"系统响应:\n{response_str}")
else:
print(f"系统响应: {str(response)}")
print("-" * 50 + "\n")
except KeyboardInterrupt:
logger.info("🛑 用户中断操作,关闭系统...")
shutdown_handler()
except Exception as e:
logger.critical(f"🔥 主循环错误: {e}", exc_info=True)
shutdown_handler()
# 全局异常处理
def global_exception_handler(exc_type, exc_value, exc_traceback):
"""全局异常处理器"""
if issubclass(exc_type, KeyboardInterrupt):
sys.excepthook(exc_type, exc_value, exc_traceback)
return
# 尝试记录错误,如果日志系统尚未设置,直接打印
try:
logging.error("未捕获的全局异常", exc_info=(exc_type, exc_value, exc_traceback))
except:
pass
print(f"🔥 严重错误: {exc_type.__name__}: {exc_value}")
sys.exit(1)
if __name__ == "__main__":
sys.excepthook = global_exception_handler
try:
main()
except Exception as e:
print(f"🔥 主函数异常: {e}")
sys.exit(1)
3.E:\AI_System\model_registry.json:”{
"TEXT_BASE": {
"path": "E:\\AI_System\\AI_Models\\Qwen2-7B",
"type": "text",
"status": "unloaded",
"checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"last_accessed": 1756551316.8980067,
"adapter": null,
"is_local": true
},
"TEXT_CHAT": {
"path": "E:\\AI_System\\local_models/text_chat",
"type": "text",
"status": "unloaded",
"checksum": "unknown",
"last_accessed": 1756540612.467894,
"adapter": null,
"is_local": false
},
"IMAGE_MODEL": {
"path": "E:\\AI_System\\local_models/image_model",
"type": "image",
"status": "unloaded",
"checksum": "unknown",
"last_accessed": 1756540612.4689403,
"adapter": null,
"is_local": false
}
}“
4.# E:\AI_System\utils\path_utils.py
import os
import re
import logging
from pathlib import Path
# 路径缓存
_path_cache = {}
logger = logging.getLogger("PathUtils")
def normalize_path(path: str) -> str:
"""标准化路径:处理环境变量、用户目录和相对路径"""
# 尝试从缓存获取
if path in _path_cache:
return _path_cache[path]
# 处理环境变量
if "$" in path or "%" in path:
expanded = os.path.expandvars(path)
else:
expanded = path
# 处理用户目录
if expanded.startswith("~"):
expanded = os.path.expanduser(expanded)
# 转换为绝对路径
abs_path = os.path.abspath(expanded)
# 缓存结果
_path_cache[path] = abs_path
return abs_path
def clean_path_cache():
"""清除路径缓存"""
global _path_cache
_path_cache = {}
logger.info("✅ 路径缓存已清除")
def is_valid_hf_id(model_id: str) -> bool:
"""
检查是否为有效的Hugging Face模型ID
"""
if not isinstance(model_id, str):
return False
# 检查格式:必须包含一个斜杠
if model_id.count('/') != 1:
return False
# 拆分用户名和模型名
user_part, model_part = model_id.split('/')
# 检查用户名部分
if not re.match(r"^[a-zA-Z0-9-_]{3,}$", user_part):
return False
# 检查模型名部分
if not re.match(r"^[a-zA-Z0-9-_\.]{3,}$", model_part):
return False
return True
def is_local_path(path: str) -> bool:
"""
检查路径是否为有效的本地路径
"""
try:
path = normalize_path(path)
return os.path.exists(path)
except Exception:
return False
def find_model_path(model_name: str) -> str:
"""
在 E:\AI_Models 目录中查找模型路径
"""
# 只在 E:\AI_Models 目录中查找
base_dir = "E:\\AI_Models"
# 尝试直接匹配模型目录
model_dir = os.path.join(base_dir, model_name)
if os.path.exists(model_dir):
return normalize_path(model_dir)
# 尝试匹配可能的变体
possible_variants = [
model_name,
model_name.replace("-", "_"),
model_name.lower(),
model_name.replace("_", "-"),
]
# 添加常见后缀变体
for variant in list(possible_variants): # 创建副本避免无限循环
if variant.endswith("-base"):
possible_variants.append(variant[:-5])
else:
possible_variants.append(variant + "-base")
possible_variants.append(variant + "-main")
# 移除重复项
possible_variants = list(set(possible_variants))
# 尝试每种可能的变体
for variant in possible_variants:
variant_path = os.path.join(base_dir, variant)
if os.path.exists(variant_path):
return normalize_path(variant_path)
# 递归搜索子目录
for root, dirs, files in os.walk(base_dir):
if model_name in dirs:
return normalize_path(os.path.join(root, model_name))
return None
def get_all_model_paths() -> dict:
"""
获取 E:\AI_Models 目录中的所有模型路径
"""
models_dir = "E:\\AI_Models"
model_paths = {}
if not os.path.exists(models_dir):
return model_paths
for model_dir in os.listdir(models_dir):
full_path = os.path.join(models_dir, model_dir)
if os.path.isdir(full_path):
model_paths[model_dir] = normalize_path(full_path)
return model_paths
5.这些文件我都有 你发的那些不能直接覆盖 对我来说就没有意义 反而会误导我把源文件错误覆盖 导致核心内容丢失 你懂我的意思吗 你要发就发完整的能直接覆盖的 而不是劣质的 虚假的文件