load transaction with until_time执行失败

本文记录了一次使用ASE数据库进行日志恢复时遇到的问题及解决过程。在指定时间点进行日志恢复时出现错误,通过调整恢复策略最终解决了问题。

---------------------------------------------------------------------------

---- 本文为andkylee个人原创,请在尊重作者劳动成果的前提下进行转载;

---- 转载务必注明原始出 : http://blog.youkuaiyun.com/andkylee

--- 2010-06-28 17:42:40

---- 关键字: ase dump transaction load until 日志恢复 错误 retrieve a row via its RID

----------------------------------------------------------------------------

在测试load tran with until_time 的时候出现了错误。

 

先执行dump tran andkylee to "d:/andkylee_tran2.dmp" with no_truncate

 

 

然后,进行数据库的恢复。

先进行全库恢复,接着进行下面的日志恢复,并恢复到指定的时间点。

但是报下面的错误:

 

1> load tran andkylee from "d:/andkylee_tran2.dmp" with until_time='Jun 28 2010
 5:24:10 PM'
2> go
Backup Server session id is:  111.  Use this value when executing the
'sp_volchanged' system stored procedure after fulfilling any volume change
request from the Backup Server.
Backup Server: 6.28.1.1: Dumpfile name 'ndkylee101790F554' section number 1
mounted on disk file 'd:/andkylee_tran2.dmp'
Backup Server: 4.58.1.1: Database andkylee: 75548 kilobytes LOADed.
Msg 623, Level 21, State 1:
Server 'TEST', Line 1:
Adaptive Server failed to retrieve a row via its RID in database 'andkylee'
because the target page is not a data page. Rid pageid = 0x16381; row num = 0x0.

Page pointer = 0x240FA000, pageno = 0, status = 0x300, ptnid = 99, indexid = 0,
level = 0.

 

 

用dbcc page查看pageid = 0x16381 的页面内容,发现此页面为日志页。

0x16381 = 91009

ptnid=8 syslogs

 

不指定时间点了,全部恢复日志备份文件,竟然成功了!但是,不是想要的结果!

 

推断,在until_time='Jun 28 2010 5:24:10 PM'时间点附近的日志备份文件中有逻辑页面错误。

 

 

尝试挪动时间点能否恢复成功!

 

 

import asyncio from functools import partial from anyio import Event, fail_after, run, sleep from collections import deque import threading from src.websocket.utils import YWSServerManager, _server_lock # 修改为线程安全的队列 import queue processing_queue = queue.Queue() consumer_thread = None stop_event = threading.Event() # 在全局创建队列 processing_queue = asyncio.Queue() # 兼容包内/脚本直接运行两种方式的导入 try: from .utils import create_yws_server, create_yws_provider # 导入布局数据服务(包内相对导入) from ..base.layout_data_service import LayoutDataService # 导入YJS事件处理器 from .yjs_event_processor import YJSEventProcessor, YJSOperationExecutor, YJSEventItem # 导入自定义日志模块 from ..base.logger_config import log_yjs_operation except ImportError: import os, sys _current_dir = os.path.dirname(__file__) _src_dir = os.path.abspath(os.path.join(_current_dir, "..")) if _src_dir not in sys.path: sys.path.insert(0, _src_dir) # 绝对导入(将 src 加入 sys.path 后) from websocket.utils import create_yws_server, create_yws_provider from base.layout_data_service import LayoutDataService from websocket.yjs_event_processor import YJSEventProcessor, YJSOperationExecutor, YJSEventItem # 导入自定义日志模块 from ..base.logger_config import log_yjs_operation from pycrdt import Array, Map, Decoder, read_message, Doc, TransactionEvent, handle_sync_message, create_sync_message from typing import Optional, Any, Tuple, AsyncGenerator # 初始化布局数据服务(实例在每次回调前确保绑定到正确工程的连接) layout_service = LayoutDataService() # 初始化YJS事件处理器 event_processor = YJSEventProcessor() operation_executor = YJSOperationExecutor(layout_service) try: # 延迟导入,避免启动时循环依赖 from .yjs_store import YJSStore except Exception: YJSStore = None def _activate_project_db(project_name: str): """在处理回调前,确保切换到对应工程的数据库连接池。 注意:这里假定 BaseDAO 访问的是全局 DBpool; 我们通过 MultiProjectManager 切换当前工程来影响后续 DAO 使用的连接。 """ try: from src.base.multi_project_manager import project_manager # 确保目标工程连接有效,并切换全局 DBpool if project_name in [p.name for p in project_manager.get_all_projects()]: if not project_manager.get_project_db_pool(project_name): import asyncio loop = asyncio.get_event_loop() loop.create_task(project_manager.connect_to_database(project_name)) project_manager.activate_project_db_pool(project_name) except Exception: pass # 旧的复杂处理函数已被新的事件处理器替代 def callback(event, key=None, project_name: str = None): """YJS数据更新回调 - 使用新的事件处理器""" # log_yjs_operation( # operation_type="callback_start", # entity_type="websocket", # entity_id="callback", # project_name=project_name, # details="=== YJS数据更新回调 ===", # level="INFO" # ) for item in event: # log_yjs_operation( # operation_type="event_item", # entity_type="websocket", # entity_id="item", # project_name=project_name, # details=f"Keys: {item.keys}, Payload: {item.path}", # level="DEBUG" # ) try: # 创建YJS事件项 print(f"item.keys: {item.keys}") # print(f"item.target: {item.target}") # print(f"item.path: {item.path}") # 过滤空路径的全量/初始化消息,避免写入数据库 if not hasattr(item, 'path') or not item.path: log_yjs_operation( operation_type="skip_empty_path", entity_type="websocket", entity_id="filter", project_name=project_name, details="检测到空 path 的初始化/快照消息,已跳过持久化处理", level="INFO" ) continue event_item = YJSEventItem( keys=item.keys if hasattr(item, 'keys') else {}, target=item.target, path=item.path if hasattr(item, 'path') else [] ) # 将操作放入队列(生产者) asyncio.run_coroutine_threadsafe( processing_queue.put((event_item, project_name)), asyncio.get_event_loop() ) # processing_queue.put((event_item, project_name)) # # 使用事件处理器解析事件 # operations = event_processor.process_event(event_item) # if operations: # 异步执行重IO操作,避免阻塞事件循环 # async def _execute_and_log_in_background(proj_name): # try: # event_item = YJSEventItem( # keys=item.keys if hasattr(item, 'keys') else {}, # target=item.target, # path=item.path if hasattr(item, 'path') else [] # ) # # ops = await asyncio.to_thread( # # event_processor.process_event, # # event_item # # ) # ops = event_processor.process_event(event_item) # log_yjs_operation( # operation_type="event_parsing", # entity_type="websocket", # entity_id="parsing", # project_name=project_name, # details=f"解析到 {len(ops)} 个操作", # level="INFO" # ) # # 1. 在后台线程中执行数据库操作 # results = await asyncio.to_thread( # operation_executor.execute_operations, ops, proj_name # ) # log_yjs_operation( # operation_type="operation_execution", # entity_type="websocket", # entity_id="execution", # project_name=proj_name, # details=f"后台操作执行结果: {results}", # level="INFO" # ) # # 2. 在后台线程中记录到 ystore # if YJSStore is not None and proj_name: # ystore = YJSStore(proj_name) # op_dicts = [op.to_dict() for op in ops] # count = await asyncio.to_thread( # ystore.log_operations, op_dicts, source_project=proj_name # ) # log_yjs_operation( # operation_type="operation_logged", # entity_type="websocket", # entity_id="logging", # project_name=proj_name, # details=f"成功后台记录 {count} 条操作到 yjs_operation_logs", # level="INFO" # ) # except Exception as e: # log_yjs_operation( # operation_type="background_operation_error", # entity_type="websocket", # entity_id="error", # project_name=proj_name, # details=f"后台执行或记录操作时出错: {e}", # level="ERROR" # ) # 创建一个后台任务来执行,不阻塞当前回调 #asyncio.create_task(_execute_and_log_in_background(project_name)) except Exception as e: log_yjs_operation( operation_type="callback_error", entity_type="websocket", entity_id="error", project_name=project_name, details=f"处理YJS数据时发生错误: {e}", level="ERROR" ) import traceback traceback.print_exc() def consumer_thread_func(): """消费者线程函数""" loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) project_name = None while not stop_event.is_set(): try: # 从队列获取任务(消费者),设置超时避免永久阻塞 event_item, project_name = processing_queue.get(timeout=1.0) # 处理操作 operations = loop.run_until_complete(event_processor.process_event(event_item)) # 确保使用正确的数据库连接池 from src.base.multi_project_manager import project_manager with project_manager._lock: loop.run_until_complete(project_manager.activate_project_db_pool(project_name)) results = operation_executor.execute_operations(operations, project_name) # 记录到ystore if YJSStore is not None and project_name: ystore = YJSStore(project_name) count = ystore.log_operations( [op.to_dict() for op in operations], source_project=project_name ) except queue.Empty: # 队列为空时继续循环 continue except Exception as e: log_yjs_operation( operation_type="consumer_error", entity_type="websocket", entity_id="error", project_name=project_name or "Unknown", details=f"消费者处理时出错: {e}", level="ERROR" ) finally: try: processing_queue.task_done() except ValueError: pass async def consumer_task_wrapper(): """ 一个包装器,通过在发生崩溃时自动重启来确保 consumer_task 始终在运行。 """ while True: try: # 运行核心的消费者任务 await consumer_task() except asyncio.CancelledError: # 如果是正常的取消操作(如程序关闭),则记录日志并退出 log_yjs_operation( operation_type="consumer_shutdown", entity_type="system", entity_id="lifecycle", details="Consumer task wrapper was cancelled, shutting down gracefully.", level="INFO" ) break except Exception as e: # 如果发生任何其他意外崩溃 import traceback # 记录严重错误日志,包括完整的堆栈信息 log_yjs_operation( operation_type="consumer_crash", entity_type="system", entity_id="critical_error", details=f"""CRITICAL: consumer_task crashed unexpectedly and will be restarted: {e} {traceback.format_exc()}""", level="CRITICAL" ) # 等待5秒后重启,防止因连续崩溃导致CPU占用过高 await asyncio.sleep(5) def run_async_in_thread(): loop = asyncio.new_event_loop() # 创建新事件循环 asyncio.set_event_loop(loop) loop.run_until_complete(consumer_task_wrapper()) async def consumer_task(): """消费者协程:从队列中读取并处理数据""" from src.base.multi_project_manager import project_manager while True: operations, project_name = None, None try: # 从队列获取任务(消费者) event_item, project_name = await processing_queue.get() operations = await event_processor.process_event(event_item) # Acquire lock to prevent race conditions with project lifecycle operations (delete, rename) async with project_manager._lock: # Activate the correct database pool for the current operation activated = await project_manager.activate_project_db_pool(project_name) if not activated: log_yjs_operation( operation_type="consumer_db_activation_failed", entity_type="websocket", entity_id="error", project_name=project_name, details=f"消费者任务无法激活数据库池,跳过操作", level="ERROR" ) processing_queue.task_done() continue # 处理操作 (This part now safely uses the correct, activated global DB pool) results = operation_executor.execute_operations(operations, project_name) results = None log_yjs_operation( operation_type="operation_execution", entity_type="websocket", entity_id="execution", project_name=project_name, details=f"后台操作执行结果: {results}", level="INFO" ) # 记录到ystore if YJSStore is not None and project_name: ystore = YJSStore(project_name) count = await asyncio.to_thread( ystore.log_operations, [op.to_dict() for op in operations], source_project=project_name ) # 标记任务完成 processing_queue.task_done() except Exception as e: log_yjs_operation( operation_type="consumer_error", entity_type="websocket", entity_id="error", project_name=project_name or "Unknown", details=f"消费者处理时出错: {e}", level="ERROR" ) # In case of error, still mark task as done to avoid blocking the queue try: processing_queue.task_done() except ValueError: # task_done() raises ValueError if called more than once pass except Exception: pass # Ignore other errors during error handling def doc_callback(event: TransactionEvent, project_name: str = None): """文档变更回调函数""" try: log_yjs_operation( operation_type="doc_callback_start", entity_type="websocket", entity_id="doc_callback", project_name=project_name, details="=== 文档变更回调 ===", level="INFO" ) y = Doc() y.apply_update(event.update) list_ = list(y.keys()) log_yjs_operation( operation_type="doc_keys", entity_type="websocket", entity_id="doc_keys", project_name=project_name, details=f"Document keys: {list_}", level="DEBUG" ) for item in list_: try: log_yjs_operation( operation_type="doc_item_processing", entity_type="websocket", entity_id="doc_item", project_name=project_name, details=f"Processing document item: {item}", level="DEBUG" ) map_ = y.get(item, type=Map) if hasattr(map_, 'to_py'): data = map_.to_py() # 如果数据是有效的布局数据,尝试更新数据库 if isinstance(data, dict) and any(key in data for key in ['nodes', 'edges', 'layers', 'ssios', 'loadtypes', 'agvtypes']): log_yjs_operation( operation_type="doc_layout_detected", entity_type="websocket", entity_id="layout_detection", project_name=project_name, details="检测到布局数据,开始更新数据库...", level="INFO" ) result = layout_service.process_yjs_update(data, project_name=project_name) log_yjs_operation( operation_type="doc_db_update_result", entity_type="websocket", entity_id="db_result", project_name=project_name, details=f"数据库更新结果: {result}", level="INFO" ) # 记录文档级别的操作到 yjs_operation_logs try: if YJSStore is not None and project_name: ystore = YJSStore(project_name) # 为每个检测到的实体类型创建操作记录 operations = [] for entity_key in ['nodes', 'edges', 'layers', 'ssios', 'loadtypes', 'agvtypes', 'stations', 'locations', 'actions', 'wops']: if entity_key in data and data[entity_key]: operation = { 'entity_type': entity_key, 'operation_type': 'update', 'entity_id': f'doc_{entity_key}', 'data': data[entity_key], 'old_data': None } operations.append(operation) if operations: count = ystore.log_operations(operations, source_project=project_name) log_yjs_operation( operation_type="doc_operation_logged", entity_type="websocket", entity_id="doc_logging", project_name=project_name, details=f"成功记录 {count} 条文档操作到 yjs_operation_logs", level="INFO" ) except Exception as e: log_yjs_operation( operation_type="doc_operation_logging_error", entity_type="websocket", entity_id="doc_logging_error", project_name=project_name, details=f"记录文档操作到 yjs_operation_logs 失败: {e}", level="ERROR" ) else: log_yjs_operation( operation_type="doc_invalid_format", entity_type="websocket", entity_id="invalid_format", project_name=project_name, details=f"数据不是有效的布局数据格式: {data}", level="WARNING" ) else: log_yjs_operation( operation_type="doc_no_to_py", entity_type="websocket", entity_id="no_to_py", project_name=project_name, details=f"Map 对象没有 to_py 方法: {map_}", level="WARNING" ) except Exception as item_e: log_yjs_operation( operation_type="doc_item_error", entity_type="websocket", entity_id="item_error", project_name=project_name, details=f"处理文档项 {item} 时发生错误: {item_e}", level="ERROR" ) import traceback traceback.print_exc() except Exception as e: log_yjs_operation( operation_type="doc_callback_error", entity_type="websocket", entity_id="callback_error", project_name=project_name, details=f"文档回调处理失败: {e}", level="ERROR" ) import traceback traceback.print_exc() _global_server_manager: Optional['YWSServerManager'] = None async def get_global_server_manager() -> YWSServerManager: """ 获取全局服务器管理器实例 """ global _global_server_manager async with _server_lock: if _global_server_manager is None: _global_server_manager = YWSServerManager() return _global_server_manager async def main(sync_port=12345, project_name: str = None, room_name: str = None): """ 主函数,启动指定项目的 YJS 服务 """ global _global_server_manager async with _server_lock: if _global_server_manager is None: _global_server_manager = YWSServerManager() server_manager = _global_server_manager try: # 为项目启动服务器(如果尚未运行) if sync_port not in server_manager.servers: await server_manager.start_server(project_name, sync_port) # 添加 provider ydoc, server_websocket = await server_manager.add_provider(project_name, room_name) # ydoc是与websocket连接的实例,preloaded_ydoc是导入时预加载了数据的实例 from .yjs_factory import yjs_manager preloaded_ydoc = yjs_manager.get_document(project_name) log_yjs_operation( operation_type="client_connected", entity_type="websocket", entity_id="client", project_name=project_name, details='客户端已连接,准备将预加载数据同步到服务器YDoc', level="INFO" ) # 核心修复:直接将预加载文档的内容完整地复制到服务器文档中 # 避免使用 get_update/apply_update,此方法更直接可靠 with ydoc.transaction(): # 1. 清空当前服务器文档,确保是干净状态 for key in list(ydoc.keys()): del ydoc[key] # 2. 遍历预加载文档的所有顶层项目,并复制到服务器文档 for key, value in preloaded_ydoc.items(): ydoc[key] = value log_yjs_operation( operation_type="data_copy_complete", entity_type="websocket", entity_id="copy", project_name=project_name, details='已通过直接复制,将预加载YDoc的内容同步到服务器YDoc', level="INFO" ) # 用同步后的、与websocket连接的ydoc,替换掉管理器中的旧ydoc实例 # 这样后续所有操作都将基于这个与客户端连接的ydoc yjs_manager.set_document(project_name, ydoc) # 从YDoc中获取 'elements' Map,如果不存在则创建 if 'elements' in ydoc: ymap2 = ydoc['elements'] else: ydoc['elements'] = ymap2 = Map() # 确保所有必要的子Map都存在,以防快照不完整 entity_types = ['edges', 'nodes', 'stations', 'locations', 'agvtypes', 'loadtypes', 'actions', 'wops', 'ssios', 'layers', 'angleNodes'] for entity_type in entity_types: if entity_type not in ymap2: ymap2[entity_type] = Map() # 从 yjs_operation_logs 表中读取原来工程的数据并写入共享数据结构 try: if YJSStore is not None and project_name: log_yjs_operation( operation_type="load_operation_logs", entity_type="websocket", entity_id="data_loading", project_name=project_name, details="开始从 yjs_operation_logs 加载工程数据...", level="INFO" ) ystore = YJSStore(project_name) # 从 yjs_operation_logs 表中获取所有操作记录 operation_logs = ystore.iter_operation_logs(since_timestamp=0.0) if operation_logs: log_yjs_operation( operation_type="logs_found", entity_type="websocket", entity_id="data_loading", project_name=project_name, details=f"找到 {len(operation_logs)} 条操作记录,开始重建数据...", level="INFO" ) # 按实体类型组织数据 entity_data = {} for log in operation_logs: entity_type = log.get('entity_type') entity_id = log.get('entity_id') operation_type = log.get('operation_type') data = log.get('data') # 跳过无效数据 if not entity_type or not entity_id: if not data and operation_type=='delete': pass else: continue # 初始化实体类型字典 if entity_type not in entity_data: entity_data[entity_type] = {} # 根据操作类型处理数据 if operation_type == 'delete': # 删除操作,从字典中移除 if entity_id in entity_data[entity_type]: del entity_data[entity_type][entity_id] else: # 添加或更新操作,写入最新数据 entity_data[entity_type][entity_id] = data # 将重建的数据写入共享数据结构 for entity_type, entities in entity_data.items(): if entity_type in ymap2.keys() and entities: entity_map = ymap2[entity_type] for entity_id, entity_data in entities.items(): entity_map[entity_id] = entity_data log_yjs_operation( operation_type="data_loaded", entity_type="websocket", entity_id=f"load_{entity_type}", project_name=project_name, details=f"已加载 {len(entities)} 个 {entity_type} 数据项", level="INFO" ) else: log_yjs_operation( operation_type="no_operation_logs", entity_type="websocket", entity_id="data_loading", project_name=project_name, details="未找到操作记录,尝试加载最新状态...", level="INFO" ) # 如果没有操作记录,尝试获取最新状态 try: # 这里可以添加获取最新状态的备用方法 # 例如从数据库直接读取当前状态 pass except Exception as state_error: log_yjs_operation( operation_type="state_loading_error", entity_type="websocket", entity_id="data_loading", project_name=project_name, details=f"加载最新状态失败: {state_error}", level="WARNING" ) except Exception as e: log_yjs_operation( operation_type="data_loading_error", entity_type="websocket", entity_id="data_loading", project_name=project_name, details=f"从操作日志加载数据时发生错误: {e}", level="ERROR" ) import traceback traceback.print_exc() # ymap2 = ymap7 = Map({'123':123}) # ydoc["map2"] = ymap2 # ymap2['key1'] = "value1" # ymap2['key2'] = ymap7 # ydoc.on('update') # 订阅变更事件 def on_change(event): # 在每次事件处理前激活对应工程的 DB 连接 # if project_name: # _activate_project_db(project_name) callback(event, "test_key", project_name=project_name) # loop = asyncio.get_running_loop() # loop.create_task(ystore.write(event.update)) # log_yjs_operation( operation_type="map_content_updated", entity_type="websocket", entity_id="map_update", project_name=project_name, details="Map content updated (full snapshot logging is disabled for performance)", level="DEBUG" ) def on_doc_change(event): if project_name: _activate_project_db(project_name) doc_callback(event, project_name=project_name) # subscription = ymap.observe_deep(on_change) sub2 = ymap2.observe_deep(on_change) # sub2 = ydoc.observe(on_doc_change) # 新增:挂载原始更新(raw update)观察器,用于调用 ystore.write # 这将把所有二进制更新数据持久化到 yjs_updates 表,确保数据完整性 if YJSStore is not None and project_name: ystore = YJSStore(project_name) # pycrdt的observe回调是同步的,但我们的write是异步的 # 因此定义一个同步包装器,在其中创建异步任务 def raw_update_callback(event): # 从同步函数中安全地调用异步代码 try: #ystore.write(event.update) loop = asyncio.get_running_loop() loop.create_task(ystore.write(event.update)) except Exception as e: log_yjs_operation( operation_type="raw_update_callback_error", entity_type="websocket", entity_id="observer", project_name=project_name, details=f'原始更新回调中创建任务失败: {e}', level="ERROR" ) # 订阅整个文档的原始二进制更新事件 # ydoc.observe(raw_update_callback) #ymap2.observe_deep(raw_update_callback) log_yjs_operation( operation_type="raw_update_observer_attached", entity_type="websocket", entity_id="observer", project_name=project_name, details='已挂载原始更新(raw update)观察器,将自动持久化到yjs_updates表', level="INFO" ) consumer = asyncio.create_task(consumer_task_wrapper()) # consumer_thread = threading.Thread( # target=consumer_thread_func, # daemon=True # ) # consumer_thread.start() # consumer = asyncio.create_task(consumer_task_wrapper()) # from concurrent.futures import ThreadPoolExecutor # with ThreadPoolExecutor(max_workers=1) as executor: # future = executor.submit(run_async_in_thread) # consumer_thread = threading.Thread( # target=consumer_task, # 直接传入函数,无需 async # daemon=True # 可选:设置为守护线程(主线程退出时自动结束) # ) # consumer_thread.start() # sub3 = ydoc.get_update() try: while True: await asyncio.sleep(1) # 保持事件循环运行 finally: # 优雅关停:当 main 任务被取消时,这个 finally 块会被执行 print('boom!!!!!!!') log_yjs_operation( operation_type="shutdown_start", entity_type="websocket", entity_id="main", project_name=project_name, details='开始关停YJS服务实例...', level="INFO" ) # 给 consumer 一些时间来处理剩余的任务 # try: # # 等待最多 30 秒让消费者完成工作 # await asyncio.wait_for(consumer, timeout=60.0) # except asyncio.TimeoutError: # log_yjs_operation( # operation_type="consumer_shutdown_timeout", # entity_type="websocket", # entity_id="consumer", # project_name=project_name, # details='消费者任务关闭超时,正在强制取消...', # level="WARNING" # ) # if not consumer.done(): # consumer.cancel() # try: # await consumer # except asyncio.CancelledError as e: # log_yjs_operation( # operation_type="consumer_shutdown_1111111111111111111111111111", # entity_type="websocket", # entity_id="consumer", # project_name=project_name, # details='cancel reson2 {e}', # level="INFO" # ) # pass # except asyncio.CancelledError as e: # log_yjs_operation( # operation_type="consumer_shutdown_success", # entity_type="websocket", # entity_id="consumer", # project_name=project_name, # details='消费者任务已成功关闭。', # level="INFO" # ) # print(f'cancel reson2 {e}') # log_yjs_operation( # operation_type="consumer_shutdown_222222222222222222222222222222", # entity_type="websocket", # entity_id="consumer", # project_name=project_name, # details='cancel reson2 {e}', # level="INFO" # ) except Exception as e: log_yjs_operation( operation_type="main_error", entity_type="websocket", entity_id="main", project_name=project_name, details=f"项目 {project_name} 发生错误: {e}", level="ERROR" ) log_yjs_operation( operation_type="consumer_shutdown_3333333333333333333333333333333", entity_type="websocket", entity_id="consumer", project_name=project_name, details='cancel reson2 {e}', level="INFO" ) import traceback traceback.print_exc() raise finally: if sub2: sub2.unobserve() async def run_server(): """运行服务器并保持连接""" try: await main() except KeyboardInterrupt: log_yjs_operation( operation_type="server_interrupted", entity_type="websocket", entity_id="server", project_name=None, details="服务器被中断", level="INFO" ) if __name__ == "__main__": from anyio import run run(run_server) # ========== 多实例启动/停止接口(供工厂调用) ========== def start(project_name: str, port: int, room_name: str = None) -> asyncio.Task: """启动一个YJS服务实例,返回运行任务。 - project_name: 工程名,用于日志/房间隔离 - port: 监听端口(每个工程独立端口) - room_name: 房间名(默认等于 project_name) """ return asyncio.create_task(main(project_name=project_name, room_name=room_name)) async def stop(task: asyncio.Task) -> None: """停止由 start 返回的任务。""" if task and not task.done(): task.cancel() try: await task except asyncio.CancelledError: pass其中sub2 = ymap2.observe_deep(on_change),所以ymap2变化,都会触发on_change,但是当ymap2变化频率超级高的时候,后端就崩溃了,这个如何优化呢
最新发布
10-31
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值