Task Self moniter

本文介绍了一个使用C#实现的图片质量检查程序,该程序能够从数据库中获取待检查的图片信息,并通过异步任务实时显示剩余待处理数量。图片质量检查包括尺寸和像素点数,依据检查结果更新数据库中的隐藏标记。
public class Program
    {
        readonly static object lockobj = new object();
        static void Main(string[] args)
        {
            log4net.Config.XmlConfigurator.Configure();


            int c = 0;
            var makeIds = new List<int>();
            using (var db = new food_data_prod_imageQualityEntities())
            {
                makeIds = db.Make.Where(m => m.IsHide == false && m.UserId != -1).Select(m => m.Id).ToList();
            }
            var allC = makeIds.Count();


            var task = new Task(() =>
              {
                  while (c < allC)
                  {
                      Console.WriteLine("Left:" + (allC - c));
                      Task.Delay(1000).Wait();
                  }
              }, TaskCreationOptions.LongRunning);


            task.Start();





            foreach (var id in makeIds)
            {
                using (var db = new food_data_prod_imageQualityEntities())
                {
                    var make = db.Make.First(m => m.Id == id);


                    var imageUrl = make.ImageUrl;




                    var hideFlag = CheckQualityToHide(GetImageInfo, imageUrl, id).Result;


                    make.IsHideByImageQuality = hideFlag;


                    db.SaveChanges();
                    lock (lockobj)
                    {
                        c = c + 1;
                    }
                }


            }
        }




        public static async Task<bool?> CheckQualityToHide(Func<string, Tuple<int, int, int>> getImageInfo, string imageUrl, int id)
        {
            log4net.ILog log = log4net.LogManager.GetLogger("logger-name");


            try
            {
                var imageInfo = getImageInfo(imageUrl);


                if (imageInfo.Item1 > 1 && imageInfo.Item2 > 1 && imageInfo.Item3 > 10)
                {
                    return false;
                }
                else
                {
                    return true;
                }
            }
            catch (Exception ex)
            {
                log.Error("Id:" + id + ",Ex:" + ex);
                return null;
            }
        }


        public static Tuple<int, int, int> GetImageInfo(string imageUrl)
        {
            using (var client = new WebClient())
            {
                var dataResult = client.OpenRead(imageUrl);
                Image pic = Image.FromStream(dataResult);


                var width = pic.Width;
                var height = pic.Height;
                return new Tuple<int, int, int>(width, height, width * height);
            }


        }
    }
import yaml import os import threading import time import requests import hashlib import json import base64 from typing import Dict, Any, Callable class ConfigLoader: """Load and manage configuration from YAML file""" def __init__(self, config_path: str = "D:\\sogood\\test\\pyTools\\monitor\\monitor_config.yaml"): self.config_path = config_path self.config = self._load_config() def _load_config(self) -> Dict[str, Any]: """Load configuration from YAML file""" if not os.path.exists(self.config_path): raise FileNotFoundError(f"Config file not found: {self.config_path}") with open(self.config_path, 'r') as f: try: return yaml.safe_load(f) except yaml.YAMLError as e: raise ValueError(f"Invalid YAML in config file: {e}") def get_polling_interval(self) -> int: """Get timer polling interval in seconds""" return self.config.get('polling_interval', 60) def get_failure_threshold(self) -> int: """Get API failure threshold count""" return self.config.get('failure_threshold', 3) def get_api_endpoint(self) -> str: """Get API endpoint URL""" return self.config['api_endpoint'] # Required, no default def get_alert_endpoint(self) -> str: """Get alert notification URL""" return self.config['alert_endpoint'] # Required, no default def get_fund_codes(self) -> str: """Get fund codes for monitoring""" return self.config['fund_codes'] # Required, no default class MonitorThread(threading.Thread): """Thread that runs monitoring task at fixed intervals""" def __init__(self, task_id: int, config: ConfigLoader, callback: Callable[[int, 'MonitorThread'], None]): super().__init__() self.task_id = task_id self.config = config self.callback = callback self._stop_event = threading.Event() self.failure_count = 0 def run(self): """Main thread loop""" while not self._stop_event.is_set(): try: self.callback(self.task_id, self) except Exception as e: print(f"Task {self.task_id} error: {e}") self.failure_count += 1 if self.failure_count >= self.config.get_failure_threshold(): self.trigger_alert() time.sleep(10) # Fixed 10 second interval def trigger_alert(self): """Send alert notification via zabbix_sender""" try: # Format current date as YYYYMMDD date_str = time.strftime("%Y%m%d") # Execute zabbix_sender command and capture output cmd = f"zabbix_sender -p \"10051\" -z 220.112.1.147 -s \"SOGOOD360\" -k sg.wind.api.{self.task_id} -o {date_str}-error" print(f"Executing: {cmd}") result = os.popen(cmd).read() print(f"Command output:\n{result}") print(f"Sent zabbix alert for task {self.task_id}") except Exception as e: print(f"Failed to send zabbix alert: {e}") def stop(self): """Signal thread to stop""" self._stop_event.set() def generate_sign(params: dict, token: str = "AqsZ7aWd4vPas9bt") -> str: """Generate sign by: 1) sort params 2) base64 encode 3) MD5 with token""" sorted_params = sorted(params.items()) param_str = "&".join(f"{k}={v}" for k, v in sorted_params) base64_str = base64.b64encode(param_str.encode()).decode() sign_str = base64_str + token return hashlib.md5(sign_str.encode()).hexdigest() def task1_monitor(thread: MonitorThread): """Task 1: Node API monitoring""" try: # Prepare request params params = { "act": "60630980", "params": json.dumps({"type": 3}), "time": "", "token": "", "ip": "", "user_code": "", "m_id": "" } params["sign"] = generate_sign(params) # Make API call config = ConfigLoader() response = requests.post( config.get_api_endpoint(), json=params, timeout=10 ) # Check response status if response.status_code != 200: raise ValueError(f"API returned {response.status_code}") data = response.json() print(f"data: {data}") # Check response structure if not data or data.get("code") != "0" or not data.get("data"): raise ValueError("Invalid response data") # Get current date in YYYY-MM-DD format today = time.strftime("%Y-%m-%d") # Check update date update_date = data["data"].get("date", "") if update_date != today: raise ValueError(f"Data not updated today (last update: {update_date})") # Reset failure count on success thread.failure_count = 0 except Exception as e: raise # Let MonitorThread handle the failure counting def task2_monitor(thread: MonitorThread): """Task 2: Stock performance monitoring""" try: # Prepare request params params = { "act": "60630960", "params": json.dumps({"stock_code":"NDX.GI"}) } params["sign"] = generate_sign(params) # Make API call config = ConfigLoader() response = requests.post( config.get_api_endpoint(), json=params, timeout=10 ) # Check response status if response.status_code != 200: raise ValueError(f"API returned {response.status_code}") data = response.json() print(f"data: {data}") # Check response structure if not data or data.get("code") != "0" or not data.get("data"): raise ValueError("Invalid response data") # Get current date in YYYY-MM-DD format today = time.strftime("%Y-%m-%d") # Check update time update_time = data["data"].get("update_time", "") if not update_time.startswith(today): raise ValueError(f"Stock data not updated today (last update: {update_time})") # Reset failure count on success thread.failure_count = 0 except Exception as e: raise # Let MonitorThread handle the failure counting def task3_monitor(thread: MonitorThread): """Task 3: Fund quota monitoring""" try: # Make API call config = ConfigLoader() # Prepare request params params = { "act": "60630961", "params": json.dumps({"m_id":"1","fund_codes":config.get_fund_codes()}), "m_id": "1" } params["sign"] = generate_sign(params) response = requests.post( config.get_api_endpoint(), json=params, timeout=10 ) # Check response status if response.status_code != 200: raise ValueError(f"API returned {response.status_code}") data = response.json() print(f"data3: {data}") # Check response structure if not data or data.get("code") != 0 or not data.get("data_list"): raise ValueError("Invalid response data") # Get current date in YYYY-MM-DD format today = time.strftime("%Y-%m-%d") # Check each fund's update time for fund in data["data_list"]: update_time = fund.get("update_time", "") if not update_time.startswith(today): raise ValueError(f"Fund {fund.get('fund_code')} not updated today") # Reset failure count on success thread.failure_count = 0 except Exception as e: print(f"Configuration error: {e}") def task4_monitor(thread: MonitorThread): """Task 4: Placeholder for fourth monitoring task""" try: # Prepare request params params = { "act": "60630932", "params": json.dumps({"fund_code":"160140","date":"","type":1}) } params["sign"] = generate_sign(params) # Make API call config = ConfigLoader() response = requests.post( config.get_api_endpoint(), json=params, timeout=10 ) # Check response if response.status_code != 200: raise ValueError(f"API returned {response.status_code}") data = response.json() print(f"data4: {data}") if not data: raise ValueError("Empty response data") # Reset failure count on success thread.failure_count = 0 except Exception as e: raise # Let MonitorThread handle the failure counting def on_timer(task_id: int, thread: MonitorThread): """Called by each monitor thread on its interval""" print(f"Task {task_id} executing at {time.strftime('%X')}") if task_id == 1: task1_monitor(thread) elif task_id == 2: task2_monitor(thread) elif task_id == 3: task3_monitor(thread) elif task_id == 4: task4_monitor(thread) if __name__ == "__main__": try: config = ConfigLoader() print("Starting 4 monitor threads...") # Create and start threads threads = [ MonitorThread(i, config, on_timer) for i in range(1, 5) # Task IDs 1-4 ] for t in threads: t.start() # Wait for Ctrl+C try: while True: time.sleep(config.get_polling_interval()) except KeyboardInterrupt: print("\nStopping threads...") for t in threads: t.stop() t.join() except Exception as e: print(f"Configuration error: {e}") 给上面的代码,每行加上注释。
07-01
class AsyncSocketManager: """异步Socket管理器 - 协调Reader和Writer线程""" # 分批连接参数 BATCH_SIZE = 250 # 每批处理的连接数 BATCH_INTERVAL = 20.0 # 批次间隔时间(秒) MAX_RETRIES = 5 # 增加最大重试次数 RECONNECT_INITIAL_DELAY = 20.0 # 初始重连延迟 RECONNECT_BACKOFF_FACTOR = 1.5 # 指数退避因子 MAX_RECONNECT_DELAY = 120.0 # 最大重连延迟(2分钟) HEALTH_CHECK_INTERVAL = 5.0 # 健康检查间隔 def __init__(self): self.running = True self._shutdown_event = asyncio.Event() self._cleanup_lock = asyncio.Lock() self.shutdown_requested = asyncio.Event() self.original_sigint_handler = None self.shutdown_requested = asyncio.Event() self.server_config = {'ip': 'charge.b-conn.com', 'port': 5455} self.total = 0 self.writer = None self.reader = None self._active_tasks = set() self.connections: Dict[str, Dict] = {} self.batch_tasks: List[asyncio.Task] = [] # 批次任务列表 self._read_lock = asyncio.Lock() # 读取操作专用锁 self._write_lock = asyncio.Lock() # 写入操作专用锁 self._state_lock = asyncio.Lock() # 状态变更锁 self.logger = logging.getLogger(__name__) self.setup_logging() self.connection_pool = {} # 连接池 # 连接状态统计 self.stats = { "success": 0, "failed": 0, "pending": 0, "active": 0 } # 每个连接一个专用的状态锁 self.connection_locks = defaultdict(asyncio.Lock) def install_signal_handlers(self): """安装信号处理器""" try: loop = asyncio.get_running_loop() # 保存原始信号处理器 self.original_sigint_handler = loop.add_signal_handler( signal.SIGINT, self._initiate_shutdown ) self.logger.info("✅ 信号处理器安装完成") except (NotImplementedError, RuntimeError): # 在某些平台上可能不支持 self.logger.warning("⚠️ 当前平台不支持异步信号处理") def _initiate_shutdown(self): """初始化关闭过程""" self.logger.info("🔄 接收到关闭信号,开始优雅关闭...") self.shutdown_requested.set() def restore_signal_handlers(self): """恢复原始信号处理器""" try: loop = asyncio.get_running_loop() if self.original_sigint_handler is not None: loop.remove_signal_handler(signal.SIGINT) # 重新安装默认处理器 loop.add_signal_handler(signal.SIGINT, signal.default_int_handler) except Exception as e: self.logger.debug(f"恢复信号处理器时发生异常: {e}") async def close_all_connections(self): """异步关闭所有Socket连接""" self.logger.info("正在关闭所有Socket连接...") self.running = False self._shutdown_event.set() # 取消所有活跃任务 for task in self._active_tasks: if not task.done(): task.cancel() # 关闭所有连接 close_tasks = [] for pileCode in list(self.connections.keys()): conn = self.connections[pileCode] if 'writer' in conn and conn['writer'] is not None: writer = conn['writer'] close_tasks.append(self.close_writer(writer, pileCode)) if close_tasks: await asyncio.gather(*close_tasks, return_exceptions=True) self.logger.info("所有连接已关闭") async def close_writer(self, writer, pileCode: str): """安全关闭写入器""" try: if not writer.is_closing(): writer.close() await writer.wait_closed() self.logger.debug(f"关闭连接: {pileCode}") except Exception as e: self.logger.error(f"关闭连接失败: {pileCode} 错误: {e}") async def _close_single_connection(self, conn_key: str): """关闭单个Socket连接""" try: if conn_key in self.connections: conn_info = self.connections[conn_key] # 实际的Socket关闭逻辑 if 'writer' in conn_info: writer = conn_info['writer'] writer.close() await writer.wait_closed() self.logger.debug(f"🔌 关闭连接 {conn_key}") del self.connections[conn_key] except Exception as e: self.logger.error(f"❌ 关闭连接 {conn_key} 时发生异常: {e}") async def batch_create_connections(self, pile_codes: List[str]): """添加健康监控器""" # 启动健康监控器 health_task = asyncio.create_task( self.connection_health_monitor(), name="connection_health_monitor" ) self.batch_tasks.append(health_task) """批量创建Socket连接(分批处理)""" total = len(pile_codes) batches = math.ceil(total / self.BATCH_SIZE) self.total_connections = total self.logger.info(f"开始分批创建连接,共{total}个桩号,分{batches}批处理,每批{self.BATCH_SIZE}个") for batch_num in range(batches): if self.shutdown_requested.is_set(): self.logger.warning("关闭信号已触发,停止创建新批次") break start_idx = batch_num * self.BATCH_SIZE end_idx = min(start_idx + self.BATCH_SIZE, total) batch = pile_codes[start_idx:end_idx] batch_id = batch_num + 1 self.logger.info(f"处理批次 {batch_id}/{batches} ({len(batch)}个桩号)") # 创建批量连接任务 task = asyncio.create_task( self.process_batch(batch, batch_id), name=f"batch_{batch_id}" ) self.batch_tasks.append(task) # 添加批次间隔 if batch_num < batches - 1: await asyncio.sleep(self.BATCH_INTERVAL) # 等待所有批次完成 if self.batch_tasks: await asyncio.gather(*self.batch_tasks) self.logger.info(f"所有批次处理完成! 成功:{self.stats['success']} 失败:{self.stats['failed']}") async def process_batch(self, pile_codes: List[str], batch_id: int): """处理单个批次的连接""" tasks = [] for i, pileCode in enumerate(pile_codes): message_id = f"batch{batch_id}-conn{i + 1}" task = asyncio.create_task( self.socket_worker(message_id, pileCode), name=f"batch{batch_id}-{pileCode}" ) tasks.append(task) self.connection_pool[pileCode] = {"status": "pending"} self.stats["pending"] += 1 # 等待批次内所有连接完成初始化 results = await asyncio.gather(*tasks, return_exceptions=True) # 统计批次结果 for result in results: if isinstance(result, Exception): self.stats["failed"] += 1 else: self.stats["success"] += 1 self.stats["pending"] -= len(pile_codes) self.stats["active"] = len(self.connections) async def socket_worker(self, message_id: str, pileCode: str): global logger """异步Socket工作协程""" retry_count = 0 last_reconnect_time = 0 # health_check_last = time.time() while self.running and not self._shutdown_event.is_set(): try: # 检查是否需要等待重连延迟 current_time = time.time() if last_reconnect_time > 0: delay = self.calculate_reconnect_delay(retry_count) if current_time - last_reconnect_time < delay: await asyncio.sleep(delay - (current_time - last_reconnect_time)) # 尝试创建连接 success = await self.create_socket_connection_with_retry(message_id, pileCode, retry_count) if not success: retry_count = min(retry_count + 1, self.MAX_RETRIES) last_reconnect_time = time.time() continue # 重置重试计数(连接成功) retry_count = 0 last_reconnect_time = 0 # 启动读写任务 await self.start_reader_writer(message_id, pileCode) # 健康检查和状态报告 # current_time = time.time() # if current_time - health_check_last > self.HEALTH_CHECK_INTERVAL: # pending_count = self.stats["pending"] # self.logger.info( # f"等待中={pending_count} 重试={retry_count}" # ) # health_check_last = current_time except asyncio.CancelledError: self.logger.info(f"⚠️ Socket工作协程被取消: {message_id}") if pileCode in self.connections: conn = self.connections[pileCode] if 'writer' in conn and conn['writer'] is not None: writer = conn['writer'] try: if not writer.is_closing(): writer.close() await writer.wait_closed() except Exception: pass if pileCode in self.connections: del self.connections[pileCode] break except Exception as e: self.logger.error(f"❌ Socket工作异常: {message_id} 桩号: {pileCode} 错误: {e}") retry_count = min(retry_count + 1, self.MAX_RETRIES) last_reconnect_time = time.time() # 清理当前连接 if pileCode in self.connections: conn = self.connections[pileCode] if 'writer' in conn and conn['writer'] is not None: writer = conn['writer'] try: if not writer.is_closing(): writer.close() await writer.wait_closed() except Exception: pass if pileCode in self.connections: del self.connections[pileCode] await asyncio.sleep(1) # 短暂暂停 finally: # 更新连接池状态 if pileCode in self.connection_pool: self.connection_pool[pileCode] = { "status": "retrying" if retry_count > 0 else "active", "retry_count": retry_count, "last_retry": last_reconnect_time } def calculate_reconnect_delay(self, retry_count: int) -> float: """计算指数退避的重连延迟""" delay = self.RECONNECT_INITIAL_DELAY * (self.RECONNECT_BACKOFF_FACTOR ** retry_count) return min(delay, self.MAX_RECONNECT_DELAY) def handle_task_completion(self, task: asyncio.Task): """任务完成回调处理""" try: # 首先检查任务是否被取消 if task.cancelled(): self.logger.debug(f"任务 {task.get_name()} 被取消") return if task.done() and task.exception(): self.logger.error(f"任务失败: {task.get_name()} {task.exception()}") # 标记连接需要重启 pile_code = task.get_name().split("_")[1] if pile_code in self.connections: writer = self.connections[pile_code]["writer"] if writer is not None: writer.close() if pile_code in self.connections: del self.connections[pile_code] except Exception as e: self.logger.error(f"任务回调处理错误: {e}") async def connection_health_monitor(self): """连接健康监控器""" while self.running and not self._shutdown_event.is_set(): try: real_active_count = 0 current_time = time.time() connections_to_restart = [] for pile_code, conn in list(self.connections.items()): # 检查是否超过最大重试次数 if conn.get("retry_count", 0) >= self.MAX_RETRIES: self.logger.warning(f"❌ 连接超过最大重试次数: {pile_code}") await self._close_single_connection(pile_code) continue if conn.get("isLogin") is True: real_active_count += 1 # 检查最后活动时间 last_activity = conn.get("last_time_stamp", 0) if current_time - last_activity > 60: # 1分钟无活动 self.logger.warning(f"桩号: {pile_code} 上次报文时间: {last_activity} 当前时间: {current_time} 时间间隔: {current_time - last_activity}") self.logger.warning(f"🕒 连接无活动超时: {pile_code}") connections_to_restart.append(pile_code) # 检查是否需要重启 if conn.get("needs_restart", False): connections_to_restart.append(pile_code) # 重启需要重置的连接 for pile_code in connections_to_restart: if pile_code in self.connections: conn = self.connections[pile_code] if conn['isLogin'] and conn['isLogin'] is not None: connections_to_restart.remove(pile_code) continue else: await self._close_single_connection(pile_code) # 重新启动工作协程 self.logger.info(f"🔄 重启连接: {pile_code}") task = asyncio.create_task( self.socket_worker(f"restart-{pile_code}", pile_code), name=f"restart_{pile_code}" ) self.batch_tasks.append(task) await asyncio.sleep(1) # 定期报告 restart_count = len(connections_to_restart) self.logger.info( f"🔍 健康检查: 活跃连接={real_active_count} 待重启={restart_count}" ) await asyncio.sleep(5) except Exception as e: self.logger.error(f"健康监控器异常: {e}") await asyncio.sleep(10) async def create_socket_connection_with_retry(self, message_id: str, pileCode: str, attempt: int) -> bool: """带重试机制的连接创建""" try: reader, writer = await asyncio.wait_for( asyncio.open_connection(self.server_config['ip'], self.server_config['port']), timeout=30.0 + min(attempt * 5, 30) # 随重试次数增加超时 ) async with self.connection_locks[pileCode]: if pileCode not in self.connections: self.connections[pileCode] = { 'reader': reader, 'writer': writer, 'pileCode': pileCode, 'message_id': message_id, 'retry_count': 0, 'last_heartbeat1': 0, 'last_heartbeat2': 0, 'last_time_stamp': 0, 'isLogin': False, 'timeout_count_login': 0, 'timeout_count': 0, 'heart_serialNum': "00 00", 'status': 'Offline', 'charging': False, 'priceModelList': [] } self.logger.info(f"连接成功: {message_id} 桩号: {pileCode}") return True except asyncio.TimeoutError: self.logger.warning(f"⛔ 连接超时: {message_id} 桩号: {pileCode} (尝试 #{attempt + 1})") if pileCode in self.connections: conn = self.connections[pileCode] if 'writer' in conn and conn['writer'] is not None: writer = conn['writer'] try: if not writer.is_closing(): writer.close() await writer.wait_closed() except Exception: pass if pileCode in self.connections: del self.connections[pileCode] await asyncio.sleep(1) # 短暂暂停 except ConnectionRefusedError: self.logger.warning(f"🚫 连接拒绝: {message_id} 桩号: {pileCode} (尝试 #{attempt + 1})") if pileCode in self.connections: conn = self.connections[pileCode] if 'writer' in conn and conn['writer'] is not None: writer = conn['writer'] try: if not writer.is_closing(): writer.close() await writer.wait_closed() except Exception: pass if pileCode in self.connections: del self.connections[pileCode] await asyncio.sleep(1) # 短暂暂停 except Exception as e: self.logger.error( f"❌连接错误: {message_id} 桩号: {pileCode} 错误: {e} (尝试 #{attempt + 1})") if pileCode in self.connections: conn = self.connections[pileCode] if 'writer' in conn and conn['writer'] is not None: writer = conn['writer'] try: if not writer.is_closing(): writer.close() await writer.wait_closed() except Exception: pass if pileCode in self.connections: del self.connections[pileCode] await asyncio.sleep(1) # 短暂暂停 return False async def start_reader_writer(self, message_id: str, pileCode: str): """启动读写任务""" if pileCode not in self.connections: return conn = self.connections[pileCode] reader = conn['reader'] writer = conn['writer'] self.writer, self.reader = AsyncSocketWriter(writer, pileCode, message_id, self.total, self.connections, self.connection_locks, self.logger), AsyncSocketReader( reader, pileCode, message_id, self.connections, self.connection_locks, self.logger) # 创建读写任务 read_task = asyncio.create_task( self.reader.start_reading(), name=f"reader_{pileCode}_{message_id}" ) write_task = asyncio.create_task( self.writer.start_write(), name=f"writer_{pileCode}_{message_id}" ) # 添加完成回调 read_task.add_done_callback(self.handle_task_completion) write_task.add_done_callback(self.handle_task_completion) # 添加到活跃任务集合 self._active_tasks.add(read_task) self._active_tasks.add(write_task) # 并行运行读写任务 try: await asyncio.gather( read_task, write_task, return_exceptions=True ) except Exception as e: self.logger.error(f"读写任务异常: {message_id} {pileCode}: {e}") 有无问题?
11-08
方案:多任务处理系统 ## 概述 这个程序方案旨在创建一个高效的多任务处理系统,能够同时处理多个任务并优化资源分配。 ## 核心功能 1. **任务队列管理** - 优先级队列实现 - 任务分类与标签系统 - 动态任务调度 2. **资源分配模块** - 自动负载均衡 - 资源监控与预警 - 智能资源分配算法 3. **并行处理引擎** - 多线程/多进程支持 - 异步I/O处理 - 任务依赖关系解析 ## 技术实现 ```python import concurrent.futures import queue import threading class MultiTaskSystem: def __init__(self, max_workers=4): self.task_queue = queue.PriorityQueue() self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) self.resource_monitor = ResourceMonitor() def add_task(self, task, priority=1): """添加任务到队列""" self.task_queue.put((priority, task)) def start_processing(self): """启动任务处理""" while not self.task_queue.empty(): priority, task = self.task_queue.get() if self.resource_monitor.can_allocate(task): self.executor.submit(task.execute) class ResourceMonitor: """资源监控类""" def __init__(self): self.available_resources = {...} def can_allocate(self, task): """检查是否有足够资源执行任务""" return all(self.available_resources[k] >= v for k, v in task.resource_requirements.items()) ``` ## 扩展功能 1. **任务可视化面板** - 实时显示任务状态 - 资源使用图表 - 历史数据分析 2. **智能预测系统** - 任务完成时间预测 - 资源需求预测 - 异常任务检测 3. **API接口** - RESTful API 集成 - WebSocket 实时更新 - 第三方服务对接 ## 部署方案 1. **容器化部署** - Docker 镜像打包 - Kubernetes 集群支持 - 自动扩缩容配置 2. **监控与日志** - Prometheus 指标收集 - ELK 日志系统 - 告警通知集成
08-19
# agent/autonomous_agent.py import os import sys import time import json import logging import traceback import threading import platform import psutil from pathlib import Path from typing import Any, Dict, Optional, List, Callable, Tuple from concurrent.futures import ThreadPoolExecutor, Future, TimeoutError # 原错误导入 from core.config import system_config # 修改为正确导入 from core.config import CoreConfig system_config = CoreConfig() # 创建配置实例 # 确保项目根目录在 sys.path 中 BASE_DIR = Path(__file__).resolve().parent.parent.parent # 指向 E:\AI_System if str(BASE_DIR) not in sys.path: sys.path.insert(0, str(BASE_DIR)) # 导入核心模块 from core.config import system_config from core.exceptions import DependencyError, SubsystemFailure, ConfigurationError from core.metrics import MetricsCollector from core.circuit_breaker import CircuitBreaker from core.subsystem_registry import SubsystemRegistry # 全局线程池 executor = ThreadPoolExecutor(max_workers=system_config.MAX_WORKERS) class AutonomousAgent: def __init__(self): """重构后的自主智能体核心类,负责协调所有子系统""" self.logger = self._setup_logger() self.logger.info("🚀 初始化自主智能体核心模块...") self._running = False self._background_thread = None # 初始化状态跟踪 self.initialization_steps = [] self._last_env_check = 0 self._initialization_time = time.time() self.metrics = MetricsCollector() # 熔断器管理器 self.circuit_breakers: Dict[str, CircuitBreaker] = {} # 子系统注册表 self.subsystem_registry = SubsystemRegistry() # 环境管理器(外部设置) self.environment = None # 确保必要目录存在 self._ensure_directories_exist() try: # 初始化步骤 self._record_step("验证配置") self._validate_configuration() self._record_step("加载环境变量") self._load_environment() self._record_step("验证环境") self.verify_environment() self._record_step("初始化核心组件") self._initialize_core_components() self._record_step("初始化子系统") self._initialize_subsystems() self.logger.info(f"✅ 自主智能体初始化完成 (耗时: {time.time() - self._initialization_time:.2f}秒)") self.logger.info(f"初始化步骤: {', '.join(self.initialization_steps)}") except Exception as e: self.logger.exception(f"❌ 智能体初始化失败: {str(e)}") self.logger.error(f"堆栈跟踪:\n{traceback.format_exc()}") raise RuntimeError(f"智能体初始化失败: {str(e)}") from e def _setup_logger(self) -> logging.Logger: """配置日志记录器""" logger = logging.getLogger('AutonomousAgent') logger.setLevel(system_config.LOG_LEVEL) # 创建控制台处理器 console_handler = logging.StreamHandler() console_handler.setLevel(system_config.LOG_LEVEL) # 创建文件处理器 log_file = Path(system_config.LOG_DIR) / 'autonomous_agent.log' log_file.parent.mkdir(parents=True, exist_ok=True) file_handler = logging.FileHandler(log_file, encoding='utf-8') file_handler.setLevel(system_config.LOG_LEVEL) # 创建格式化器 formatter = logging.Formatter( '%(asctime)s [%(levelname)s] %(name)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) console_handler.setFormatter(formatter) file_handler.setFormatter(formatter) # 添加处理器 logger.addHandler(console_handler) logger.addHandler(file_handler) logger.propagate = False return logger def _ensure_directories_exist(self): """确保所需目录存在""" required_dirs = [ system_config.LOG_DIR, system_config.CONFIG_DIR, system_config.MODEL_CACHE_DIR ] for dir_path in required_dirs: try: if not isinstance(dir_path, Path): dir_path = Path(dir_path) if not dir_path.exists(): dir_path.mkdir(parents=True, exist_ok=True) self.logger.info(f"创建目录: {dir_path}") except Exception as e: self.logger.error(f"创建目录失败 {dir_path}: {str(e)}") def _validate_configuration(self): """验证关键配置项""" required_configs = [ 'LOG_DIR', 'CONFIG_DIR', 'MODEL_CACHE_DIR', 'MAX_WORKERS', 'AGENT_RESPONSE_TIMEOUT' ] missing = [] for config_key in required_configs: if not hasattr(system_config, config_key): missing.append(config_key) if missing: raise ConfigurationError(f"缺失关键配置项: {', '.join(missing)}") # 检查配置值有效性 if system_config.MAX_WORKERS <= 0: raise ConfigurationError(f"无效的MAX_WORKERS值: {system_config.MAX_WORKERS}") def _record_step(self, step_name: str): """记录初始化步骤""" self.initialization_steps.append(step_name) self.logger.info(f"⏳ 步骤 {len(self.initialization_steps)}: {step_name}") def _load_environment(self): """加载环境变量""" env_path = system_config.CONFIG_DIR / ".env" if not env_path.exists(): self.logger.warning(f"⚠️ 环境变量文件不存在: {env_path}") return try: from dotenv import load_dotenv load_dotenv(env_path) self.logger.info(f"✅ 已加载环境变量文件: {env_path}") except ImportError: self.logger.warning("dotenv包未安装,跳过环境变量加载。请安装: pip install python-dotenv") except Exception as e: self.logger.error(f"加载环境变量失败: {str(e)}") def set_environment(self, env_manager): """设置环境管理器引用""" self.environment = env_manager self.logger.info("✅ 已连接环境管理器") # 注册环境监控任务 if self.environment: self.subsystem_registry.register_task( "环境监控", self._monitor_environment, interval=system_config.get('ENVIRONMENT_MONITOR_INTERVAL', 5.0) ) def start(self): """启动智能体后台任务""" if not self._running: self._start_background_tasks() self.logger.info("🏁 智能体后台任务已启动") else: self.logger.warning("智能体已在运行中") def _start_background_tasks(self): """启动后台任务线程""" if self._running: return self._running = True self._background_thread = threading.Thread( target=self._background_task_loop, daemon=True, name="AutonomousAgentBackgroundTasks" ) self._background_thread.start() self.logger.info("✅ 后台任务线程已启动") def _background_task_loop(self): """后台任务循环""" self.logger.info("🔄 后台任务循环启动") while self._running: try: start_time = time.time() # 执行注册的周期性任务 self.subsystem_registry.run_periodic_tasks() # 动态调整睡眠时间 task_time = time.time() - start_time sleep_time = max(0.1, system_config.AGENT_TASK_INTERVAL - task_time) time.sleep(sleep_time) except Exception as e: self.logger.error(f"后台任务错误: {str(e)}") self.metrics.record_error('background_task') time.sleep(30) # 错误后等待更长时间 def verify_environment(self): """验证运行环境是否满足要求""" # 检查必需模块 required_modules = [ 'os', 'sys', 'logging', 'flask', 'werkzeug', 'numpy', 'transformers', 'torch', 'psutil' ] # 检查必需包 required_packages = [ ('dotenv', 'python-dotenv'), ('flask_socketio', 'flask-socketio') ] missing_modules = [] for mod in required_modules: try: __import__(mod) except ImportError: missing_modules.append(mod) missing_packages = [] for import_name, pkg_name in required_packages: try: __import__(import_name) except ImportError: missing_packages.append(pkg_name) # 处理缺失项 errors = [] if missing_modules: errors.append(f"缺失Python模块: {', '.join(missing_modules)}") if missing_packages: errors.append(f"缺失Python包: {', '.join(missing_packages)}") if errors: error_msg = "环境验证失败:\n" + "\n".join(errors) self.logger.error(error_msg) raise DependencyError(error_msg) self.logger.info("✅ 环境验证通过") def _log_environment_status(self): """记录环境状态信息""" try: # 获取系统信息 sys_info = { "os": platform.system(), "os_version": platform.version(), "cpu": platform.processor(), "cpu_cores": psutil.cpu_count(logical=False), "memory_total": round(psutil.virtual_memory().total / (1024 ** 3), 1), "memory_used": round(psutil.virtual_memory().used / (1024 ** 3), 1), "disk_total": round(psutil.disk_usage('/').total / (1024 ** 3), 1), "disk_used": round(psutil.disk_usage('/').used / (1024 ** 3), 1), } self.logger.info( f"📊 系统状态: OS={sys_info['os']} {sys_info['os_version']}, " f"CPU={sys_info['cpu']} ({sys_info['cpu_cores']}核), " f"内存={sys_info['memory_used']}/{sys_info['memory_total']}GB, " f"磁盘={sys_info['disk_used']}/{sys_info['disk_total']}GB" ) except Exception as e: self.logger.error(f"环境状态获取失败: {str(e)}") self.metrics.record_error('environment_status') def _initialize_core_components(self): """初始化不依赖其他组件的核心组件""" self._log_environment_status() # 初始化熔断器 self._initialize_circuit_breakers() # 注册核心任务 self.subsystem_registry.register_task( "子系统心跳检查", self._check_subsystem_heartbeats, interval=system_config.get('HEARTBEAT_INTERVAL', 60.0) ) self.subsystem_registry.register_task( "子系统恢复", self._recover_failed_subsystems, interval=system_config.get('RECOVERY_INTERVAL', 300.0) ) def _initialize_circuit_breakers(self): """为所有子系统初始化熔断器""" subsystems = [ '健康系统', '模型管理器', '记忆系统', '情感系统', '认知架构', '通信系统' ] for subsystem in subsystems: breaker = CircuitBreaker( failure_threshold=system_config.get('CIRCUIT_BREAKER_THRESHOLD', 5), recovery_timeout=system_config.get('CIRCUIT_BREAKER_TIMEOUT', 300) ) self.circuit_breakers[subsystem] = breaker self.logger.info(f"⚡ 为 {subsystem} 初始化熔断器") def _initialize_subsystems(self): """初始化所有子系统""" # 定义子系统初始化顺序 subsystems = [ ('健康系统', self._create_health_system, {}), ('模型管理器', self._create_model_manager, {}), ('记忆系统', self._create_memory_system, {}), ('情感系统', self._create_affective_system, {}), ('认知架构', self._create_cognitive_architecture, {}), ('通信系统', self._create_communication_system, {}) ] # 注册子系统依赖关系 dependencies = { '通信系统': ['认知架构'], '情感系统': ['健康系统', '记忆系统'], '认知架构': ['记忆系统'] } for name, creator_func, kwargs in subsystems: try: # 检查依赖是否满足 if name in dependencies: missing_deps = [dep for dep in dependencies[name] if not self.subsystem_registry.get_subsystem(dep)] if missing_deps: self.logger.warning(f"⚠️ 子系统 {name} 缺少依赖: {', '.join(missing_deps)}") # 尝试自动初始化缺失依赖 for dep in missing_deps: self._initialize_dependency(dep) # 创建实例 instance = creator_func(**kwargs) self.subsystem_registry.register_subsystem(name, instance) # 注册子系统任务 if hasattr(instance, 'periodic_task'): self.subsystem_registry.register_task( f"{name}更新", instance.periodic_task, interval=system_config.get(f'{name}_INTERVAL', 60.0) ) self.logger.info(f"✅ {name}初始化完成") except Exception as e: self.logger.error(f"❌ {name}初始化失败: {str(e)}") self.metrics.record_error(f'subsystem_init_{name.lower()}') def _initialize_dependency(self, subsystem_name: str): """初始化依赖子系统""" creators = { '健康系统': self._create_health_system, '模型管理器': self._create_model_manager, '记忆系统': self._create_memory_system, '情感系统': self._create_affective_system, '认知架构': self._create_cognitive_architecture, '通信系统': self._create_communication_system } if subsystem_name in creators: try: instance = creators[subsystem_name]() self.subsystem_registry.register_subsystem(subsystem_name, instance) self.logger.info(f"✅ 依赖子系统 {subsystem_name} 初始化完成") except Exception as e: self.logger.error(f"❌ 依赖子系统 {subsystem_name} 初始化失败: {str(e)}") raise # 各子系统实现(增强功能) def _create_health_system(self): class HealthSystem: def __init__(self): self.status = "healthy" self.metrics: Dict[str, Any] = {} self.logger = logging.getLogger('HealthSystem') def periodic_task(self): """更新健康状态""" try: # 获取系统状态 cpu_usage = psutil.cpu_percent() mem_usage = psutil.virtual_memory().percent disk_usage = psutil.disk_usage('/').percent # 更新状态 self.status = "healthy" if cpu_usage < 90 and mem_usage < 90 else "warning" self.metrics = { "cpu_usage": cpu_usage, "mem_usage": mem_usage, "disk_usage": disk_usage, "timestamp": time.time() } self.logger.debug(f"健康状态更新: {self.status}") except Exception as e: self.logger.error(f"健康系统更新失败: {str(e)}") def record_environment_status(self, env_data: Dict[str, Any]): """记录环境状态""" self.metrics['environment'] = env_data def get_status(self) -> Dict[str, Any]: return { "status": self.status, "metrics": self.metrics } return HealthSystem() def _create_model_manager(self): class ModelManager: def __init__(self): self.loaded_models: Dict[str, Dict[str, Any]] = {} self.logger = logging.getLogger('ModelManager') def load_model(self, model_name: str) -> bool: """加载模型""" if model_name not in self.loaded_models: # 模拟模型加载 self.logger.info(f"加载模型: {model_name}") self.loaded_models[model_name] = { "status": "loaded", "load_time": time.time() } return True return False def periodic_task(self): """模型管理器周期性任务""" # 检查模型状态 for model_name, model_info in list(self.loaded_models.items()): # 模拟模型验证 if time.time() - model_info['load_time'] > 86400: # 24小时 self.logger.info(f"重新加载模型: {model_name}") model_info['load_time'] = time.time() def get_status(self) -> Dict[str, Any]: return { "loaded_models": list(self.loaded_models.keys()), "count": len(self.loaded_models) } return ModelManager() def _create_memory_system(self): class MemorySystem: def __init__(self): self.memories: List[Dict[str, Any]] = [] self.last_consolidation = time.time() self.logger = logging.getLogger('MemorySystem') def periodic_task(self): """巩固记忆""" try: # 保留最近100条记忆 if len(self.memories) > 100: self.memories = self.memories[-100:] self.last_consolidation = time.time() self.logger.debug(f"记忆巩固完成,当前记忆数: {len(self.memories)}") except Exception as e: self.logger.error(f"记忆巩固失败: {str(e)}") def add_memory(self, memory: str): """添加记忆""" self.memories.append({ "content": memory, "timestamp": time.time() }) def get_status(self) -> Dict[str, Any]: return { "memory_count": len(self.memories), "last_consolidation": self.last_consolidation } return MemorySystem() def _create_affective_system(self): class AffectiveSystem: def __init__(self): self.mood = "neutral" self.energy = 100 self.logger = logging.getLogger('AffectiveSystem') def periodic_task(self): """情感成长""" try: # 根据时间恢复能量 self.energy = min(100, self.energy + 1) self.logger.debug(f"情感更新: 能量={self.energy}, 情绪={self.mood}") except Exception as e: self.logger.error(f"情感系统更新失败: {str(e)}") def update_mood(self, interaction: str): """根据交互更新情绪""" if "positive" in interaction: self.mood = "happy" elif "negative" in interaction: self.mood = "sad" def get_status(self) -> Dict[str, Any]: return { "mood": self.mood, "energy": self.energy } return AffectiveSystem() def _create_cognitive_architecture(self): class CognitiveArchitecture: def __init__(self): self.current_task: Optional[str] = None self.task_history: List[Dict[str, Any]] = [] self.logger = logging.getLogger('CognitiveArchitecture') def start_task(self, task: str): """开始新任务""" self.logger.info(f"开始任务: {task}") self.current_task = task self.task_history.append({ "task": task, "start_time": time.time(), "status": "in_progress" }) def complete_task(self, result: Any): """完成任务""" if self.current_task: for task in reversed(self.task_history): if task["task"] == self.current_task and task["status"] == "in_progress": task["status"] = "completed" task["result"] = result task["end_time"] = time.time() self.logger.info(f"完成任务: {task['task']}") break self.current_task = None def periodic_task(self): """认知架构周期性任务""" # 清理过时任务 now = time.time() self.task_history = [t for t in self.task_history if t['status'] == 'completed' or (now - t['start_time']) < 3600] # 保留1小时内进行中的任务 def get_status(self) -> Dict[str, Any]: return { "current_task": self.current_task, "task_count": len(self.task_history), "completed_tasks": sum(1 for t in self.task_history if t["status"] == "completed") } return CognitiveArchitecture() def _create_communication_system(self): class CommunicationSystem: def __init__(self): self.message_queue: List[Dict[str, Any]] = [] self.processed_count = 0 self.logger = logging.getLogger('CommunicationSystem') def process_input(self, user_input: str, user_id: str = "default") -> str: """处理用户输入""" try: # 模拟处理逻辑 response = f"已处理您的消息: '{user_input}' (用户: {user_id})" # 记录处理 self.processed_count += 1 self.logger.info(f"处理消息: '{user_input[:30]}...' (用户: {user_id})") return response except Exception as e: self.logger.error(f"消息处理失败: {str(e)}") return "处理消息时出错" def periodic_task(self): """通信系统周期性任务""" # 清理消息队列 if len(self.message_queue) > 100: self.message_queue = self.message_queue[-100:] self.logger.debug("清理消息队列") def check_heartbeat(self) -> bool: """心跳检查""" return True def get_status(self) -> Dict[str, Any]: return { "queue_size": len(self.message_queue), "processed_count": self.processed_count } return CommunicationSystem() def process_input(self, user_input: str, user_id: str = "default") -> Dict[str, Any]: """处理用户输入(通过通信系统)""" # 获取通信系统 comm_system = self.subsystem_registry.get_subsystem('通信系统') if not comm_system: self.logger.error("通信系统未初始化,使用回退处理") self.metrics.record_error('communication_system_inactive') return {"response": "系统正在维护中,请稍后再试"} # 检查熔断器状态 breaker = self.circuit_breakers.get('通信系统') if breaker and breaker.is_open(): self.logger.warning("通信系统熔断器已打开") self.metrics.record_error('communication_circuit_open') return {"response": "系统繁忙,请稍后再试"} try: # 使用熔断器包装调用 def process_wrapper(): return comm_system.process_input(user_input, user_id) if breaker: response = breaker.call(process_wrapper) else: response = process_wrapper() # 使用线程池异步处理 future = executor.submit(lambda: response) result = future.result(timeout=system_config.AGENT_RESPONSE_TIMEOUT) # 记录成功 self.metrics.record_success('process_input') return {"response": result} except TimeoutError: self.logger.warning("处理输入超时") self.metrics.record_timeout('process_input') if breaker: breaker.record_failure() return {"error": "处理超时,请重试"} except Exception as e: self.logger.error(f"处理输入失败: {str(e)}") self.metrics.record_error('process_input') if breaker: breaker.record_failure() return {"error": "处理失败,请稍后再试"} def _monitor_environment(self): """监控环境状态""" try: if self.environment and hasattr(self.environment, 'get_state'): # 使用真实环境管理器获取状态 env_state = self.environment.get_state() self.logger.info( f"🌡️ 环境监控: 温度={env_state.get('temperature', '未知')}℃, " f"湿度={env_state.get('humidity', '未知')}%, " f"光照={env_state.get('light_level', '未知')}%" ) # 记录到健康系统(如果可用) health_system = self.subsystem_registry.get_subsystem('健康系统') if health_system and hasattr(health_system, 'record_environment_status'): health_system.record_environment_status(env_state) else: # 使用内置监控 cpu_usage = psutil.cpu_percent() mem_usage = psutil.virtual_memory().percent disk_usage = psutil.disk_usage('/').percent self.logger.info( f"📊 系统监控: CPU={cpu_usage}%, " f"内存={mem_usage}%, " f"磁盘={disk_usage}%" ) # 记录到健康系统 health_system = self.subsystem_registry.get_subsystem('健康系统') if health_system and hasattr(health_system, 'record_environment_status'): health_system.record_environment_status({ "cpu_usage": cpu_usage, "mem_usage": mem_usage, "disk_usage": disk_usage }) except Exception as e: self.logger.error(f"环境监控失败: {str(e)}") self.metrics.record_error('environment_monitoring') def _check_subsystem_heartbeats(self): """检查子系统心跳""" for name, subsystem in self.subsystem_registry.subsystems.items(): if hasattr(subsystem, 'check_heartbeat'): try: if not subsystem.check_heartbeat(): self.logger.warning(f"⚠️ 子系统 {name} 心跳检测失败") self._handle_subsystem_error(name) else: self.logger.debug(f"✅ 子系统 {name} 心跳正常") except Exception as e: self.logger.error(f"子系统 {name} 心跳检查异常: {str(e)}") self._handle_subsystem_error(name) self.metrics.record_error(f'heartbeat_{name.lower()}') def _handle_subsystem_error(self, name: str): """处理子系统错误""" breaker = self.circuit_breakers.get(name) if breaker: breaker.record_failure() if breaker.is_open(): self.logger.critical(f"🚨 子系统 {name} 因连续错误被熔断!") self.metrics.record_event('circuit_breaker', name) def _recover_failed_subsystems(self): """尝试恢复失败的子系统""" for name, breaker in self.circuit_breakers.items(): if breaker.is_open() and breaker.should_try_recovery(): self.logger.info(f"🔄 尝试恢复子系统: {name}") try: # 尝试重新初始化子系统 self._reinitialize_subsystem(name) breaker.record_success() self.logger.info(f"✅ 子系统 {name} 恢复成功") self.metrics.record_event('subsystem_recovered', name) except Exception as e: self.logger.error(f"子系统 {name} 恢复失败: {str(e)}") breaker.record_failure() self.metrics.record_error(f'recovery_{name.lower()}') def _reinitialize_subsystem(self, name: str): """重新初始化子系统""" creators = { '健康系统': self._create_health_system, '模型管理器': self._create_model_manager, '记忆系统': self._create_memory_system, '情感系统': self._create_affective_system, '认知架构': self._create_cognitive_architecture, '通信系统': self._create_communication_system } if name in creators: # 先尝试关闭现有实例 old_instance = self.subsystem_registry.get_subsystem(name) if old_instance and hasattr(old_instance, 'shutdown'): try: old_instance.shutdown() self.logger.info(f"已关闭旧实例: {name}") except Exception as e: self.logger.warning(f"关闭旧实例失败: {str(e)}") # 创建新实例 instance = creators[name]() self.subsystem_registry.register_subsystem(name, instance) else: raise SubsystemFailure(f"未知子系统: {name}") def get_status(self) -> Dict[str, Any]: """获取智能体状态报告""" status_data = { "uptime": time.time() - self._initialization_time, "running": self._running, "metrics": self.metrics.get_metrics(), "subsystems": {} } # 添加子系统状态 for name, subsystem in self.subsystem_registry.subsystems.items(): if hasattr(subsystem, 'get_status'): status_data['subsystems'][name] = subsystem.get_status() # 添加熔断器状态 status_data['circuit_breakers'] = {} for name, breaker in self.circuit_breakers.items(): status_data['circuit_breakers'][name] = breaker.get_status() return status_data def shutdown(self): """关闭智能体""" self.logger.info("🛑 正在关闭智能体...") self._running = False # 停止线程池 executor.shutdown(wait=False) # 等待后台线程 if self._background_thread and self._background_thread.is_alive(): self._background_thread.join(timeout=5.0) if self._background_thread.is_alive(): self.logger.warning("后台线程未正常退出") # 关闭所有子系统 for name, subsystem in self.subsystem_registry.subsystems.items(): if hasattr(subsystem, 'shutdown'): try: subsystem.shutdown() self.logger.info(f"已关闭子系统: {name}") except Exception as e: self.logger.error(f"关闭子系统 {name} 失败: {str(e)}") self.logger.info("✅ 智能体已关闭")
08-13
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值