future.set_result

本文介绍了Tornado框架中Future对象的set_result方法的作用及使用方式。当异步操作完成时,通过调用set_result方法将结果保存在Future中,并触发相应的回调函数执行。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

tornado中future对象的set_result方法负责将异步执行的结果保存在future中:

当某个回调函数执行时,如果该函数内异步操作返回result时,调用future.set_result(result),同时触发该future的_set_done()方法,执行future._callback中的回调函数,该回调函数将callback(run)注册进ioloop中的callbacks队列,在下一次循环时执行。

def callback(result, future):

    if result:

         future.set_result(result)

    else:

         IOLoop.instance().add_callback(callback, result, future)

from concurrent.futures import ThreadPoolExecutor import multiprocessing import multiprocessing.queues import threading import time import asyncio import numpy as np import grpc from grpc import aio import argparse import os import video_service_pb2 import video_service_pb2_grpc class FPSCounter: def __init__(self, window_size=0.5): self.window_size = window_size # in seconds self.frame_timestamps = [] self.last_print_time = time.time() def add_frame(self): now = time.time() self.frame_timestamps.append(now) # Remove frames older than window_size while self.frame_timestamps and now - self.frame_timestamps[0] > self.window_size: self.frame_timestamps.pop(0) def get_fps(self): if not self.frame_timestamps: return 0.0 time_span = self.frame_timestamps[-1] - self.frame_timestamps[0] if time_span <= 0: return len(self.frame_timestamps) return len(self.frame_timestamps) / time_span def should_print(self): now = time.time() if now - self.last_print_time >= self.window_size: self.last_print_time = now return True return False class InferencerProcess(multiprocessing.Process): def __init__(self, inference_queue, result_queue): super().__init__() self.inference_queue = inference_queue # 从推理队列获取数据 self.result_queue = result_queue # 发送结果到结果队列 self.running = True self.lpd = None self.lnr = None self.tensor = None def init_model(self): """初始化推理模型""" import torch pid = os.getpid() print(f"[推理进程-{pid}] 初始化模型...") if torch.cuda.is_available(): torch.cuda.init() torch.cuda.set_device(0) print(f"[推理进程-{pid}] CUDA初始化成功,设备: {torch.cuda.get_device_name(0)}") else: raise RuntimeError("CUDA not available in worker process") from src.alpr.core.model_loader import Predictor, get_model from src.alpr.core.LicensePlateProcessor import project_root lpd_classes = ("LicensePlate",) lnr_classes = ("0","1","2","3","4","5","6","7","8","9", "A","B","C","D","E","F","G","H","J","K", "L","M","N","P","Q","R","S","T","U","V", "W","X","Y","Z") lpd_model = get_model(project_root/"models/yolox_lpd_s_20240201.pth", num_classes=len(lpd_classes)) lnr_model = get_model(project_root/"models/yolox_lnr_s_20240201.pth", num_classes=len(lnr_classes)) self.lpd = Predictor(lpd_model, obj_labels=lpd_classes, confthre=0.01, img_size=(544, 960)) self.lnr = Predictor(lnr_model, lnr_classes, confthre=0.7, img_size=(320, 640)) print(f"[推理进程-{pid}] 模型初始化完成") # self.tensor = torch.as_tensor(np.ones((3, 1080, 1920), dtype=np.uint8), device='cuda') def run(self): pid = os.getpid() print(f"[推理进程-{pid}] 启动成功") try: self.init_model() while self.running: try: # 从推理队列获取数据 data = self.inference_queue.get(block=False) if not data: continue tensor, rtsp_id, request_id = data #tensor = self.tensor # 执行推理 try: out, _, _ = self.lpd.inference(tensor) out, T3, T4 = self.lnr.inference(tensor) #out, T3, T4 = [], 0, 0 # 将检测结果转换为仅包含基本类型的字典 detections = [] for det in out: # 确保所有值都是Python基本类型 detection = { 'label': str(det['label']), 'confidence': float(det['confidence']), 'x1': float(det['bbox'][0]), 'y1': float(det['bbox'][1]), 'x2': float(det['bbox'][2]), 'y2': float(det['bbox'][3]) } detections.append(detection) # 推理成功,将结果放入结果队列 (仅包含可序列化对象) res = { 'status': 'success', 'data': { 'detections': detections, 'rtsp_id': rtsp_id, 'request_id': request_id, 'timing': { 't3': float(T3) if T3 is not None else 0, 't4': float(T4) if T4 is not None else 0 } } } self.result_queue.put_nowait(res) except Exception as e: error_msg = f"推理失败: {str(e)}" print(f"[推理进程-{pid}] {error_msg}") self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': error_msg, 'rtsp_id': rtsp_id, 'request_id': request_id } }) except multiprocessing.queues.Empty: time.sleep(0.001) except Exception as e: print(f"[推理进程-{pid}] 异常: {str(e)}") except Exception as e: print(f"[推理进程-{pid}] 初始化失败: {str(e)}") print(f"[推理进程-{pid}] 已停止") class DecodeProcess(multiprocessing.Process): def __init__(self, decode_queue, inference_queue, result_queue): super().__init__() self.decode_queue = decode_queue # 接收h264数据 self.inference_queue = inference_queue # 发送解码结果 self.result_queue = result_queue # 发送错误信息 self.running = True self.fps_counter = FPSCounter() from av_decoder import AVDecoder self.decoder = AVDecoder() print(f"[Decode进程-{os.getpid()}] 初始化完成") def run(self): pid = os.getpid() print(f"[Decode进程-{pid}] 启动成功") while self.running: try: # 从队列获取h264数据 data = self.decode_queue.get(block=False) if not data: continue h264_data, rtsp_id, request_id = data tensor, t2, t5 = self.decoder.decode(h264_data, rtsp_id) if tensor is None: error_msg = "Failed to decode H264 to tensor" print(f"[Decode进程-{pid}] {error_msg}, request_id={request_id}") self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': error_msg, 'rtsp_id': rtsp_id, 'request_id': request_id } }) continue if self.fps_counter.should_print(): print(f"Decoder FPS: {self.fps_counter.get_fps():.2f}") # 解码成功,发送到推理队列 try: self.inference_queue.put_nowait((tensor, rtsp_id, request_id)) self.fps_counter.add_frame() except multiprocessing.queues.Full: self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': '推理队列已满', 'rtsp_id': rtsp_id, 'request_id': request_id } }) print(f'Inference 隊列已滿') self.fps_counter.add_frame() except multiprocessing.queues.Empty: time.sleep(0.001) # 避免忙等待 except Exception as e: print(f"[Decode进程-{pid}] 异常: {str(e)}") break print(f"[Decode进程-{pid}] 已停止") class FinalizerProcess(multiprocessing.Process): def __init__(self, result_queue, response_queue): super().__init__() self.result_queue = result_queue # 从推理队列获取结果 self.response_queue = response_queue # 将处理好的结果发送回主进程 self.running = True def run(self): pid = os.getpid() print(f"[Finalizer进程-{pid}] 启动成功") while self.running: try: # 从结果队列获取数据 result_dict = self.result_queue.get(block=False) #print("RES:", result_dict) if result_dict: # 将结果发送回主进程 self.response_queue.put(result_dict) except multiprocessing.queues.Empty: continue except Exception as e: print(f"[Finalizer进程-{pid}] 异常: {str(e)}") break print(f"[Finalizer进程-{pid}] 已停止") class ResponseHandler: """在主进程中处理响应的类""" def __init__(self): self.pending_requests = {} # request_id -> (context, response_future) self.lock = threading.Lock() def register_request(self, request_id, context, response_future): """注册新请求""" with self.lock: self.pending_requests[request_id] = (context, response_future) def process_response(self, response_dict): """处理从Finalizer进程返回的响应""" try: data = response_dict['data'] request_id = data['request_id'] with self.lock: if request_id not in self.pending_requests: print(f"请求ID {request_id} 未找到上下文") return context, response_future = self.pending_requests.pop(request_id) # 当前全部返回 Empty Response response_future.set_result(video_service_pb2.DecodeAndInferResponse()) return if response_dict['status'] == 'success': # 构造gRPC响应 detections = [] for det in data['detections']: proto_det = video_service_pb2.Detection( label=det['label'], confidence=det['confidence'], x1=det['x1'], y1=det['y1'], x2=det['x2'], y2=det['y2'] ) detections.append(proto_det) response = video_service_pb2.DecodeAndInferResponse(detections=detections) response_future.set_result(response) else: # 设置错误状态 context.set_code(grpc.StatusCode.INTERNAL) context.set_details(data['message']) response_future.set_result(video_service_pb2.DecodeAndInferResponse()) except Exception as e: print(f"响应处理异常: {str(e)}") class DecodeAndInferService(video_service_pb2_grpc.DecodeAndInferServiceServicer): decode_frame = 0 start_time = None def __init__(self, decode_queue: multiprocessing.Queue, result_queue, response_handler): self.decode_queue = decode_queue self.result_queue = result_queue self.response_handler = response_handler self.request_counter = 0 self.counter_lock = threading.Lock() self.fps_counter = FPSCounter() # 在主进程中初始化解码器 from h264_reassembler import H264Reassembler self.reassembler = H264Reassembler() print("[主进程] 解码器初始化完成") async def DecodeAndInfer(self, request, context): # 生成唯一请求ID with self.counter_lock: self.request_counter += 1 request_id = self.request_counter if DecodeAndInferService.start_time == None: DecodeAndInferService.start_time = time.time() DecodeAndInferService.decode_frame += 1 # 创建Future对象 response_future = asyncio.Future() # 注册请求 self.response_handler.register_request(request_id, context, response_future) try: # 提取需要的数据,创建可序列化的字典 h264_data = self.reassembler.reassemble(request.packets, request.rtsp_id) if h264_data is not None: self.decode_queue.put_nowait((h264_data, request.rtsp_id, request_id)) else: self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': 'cant convert rtp packets to h264', 'rtsp_id': request.rtsp_id, 'request_id': request_id } }) except multiprocessing.queues.Full: self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': 'decode队列已满', 'rtsp_id': request.rtsp_id, 'request_id': request_id } }) print(f'Decode 隊列已滿') except Exception as e: context.set_code(grpc.StatusCode.INTERNAL) context.set_details(f'处理请求失败: {str(e)}') return video_service_pb2.DecodeAndInferResponse() finally: # 等待结果 result = await response_future print(f"D&F Service FPS: {DecodeAndInferService.decode_frame / (time.time() - DecodeAndInferService.start_time)}") return result async def response_worker(response_queue, response_handler): """响应处理工作线程,在主进程中运行""" while True: try: response_dict = response_queue.get(block=False) response_handler.process_response(response_dict) except multiprocessing.queues.Empty: await asyncio.sleep(0.001) except Exception as e: print(f"响应处理线程异常: {str(e)}") async def serve(port, decode_queue, result_queue, response_queue): """启动gRPC服务""" # 创建响应处理器 response_handler = ResponseHandler() # 启动响应处理工作线程 asyncio.create_task(response_worker(response_queue, response_handler)) server = aio.server() service = DecodeAndInferService(decode_queue, result_queue, response_handler) video_service_pb2_grpc.add_DecodeAndInferServiceServicer_to_server(service, server) server.add_insecure_port(f'[::]:{port}') print(f"[gRPC服务] 启动成功,监听端口 {port}") await server.start() await server.wait_for_termination() def main(): parser = argparse.ArgumentParser(description='视频解码推理服务') parser.add_argument('--port', type=int, default=50151, help='gRPC服务端口') args = parser.parse_args() # 创建进程间通信队列 decode_queue = multiprocessing.Queue(maxsize=50) # 主进程→Decode进程 inference_queue = multiprocessing.Queue(maxsize=50) # Decode进程→Inference进程 result_queue = multiprocessing.Queue(maxsize=50) # Decode进程/Inference进程→Finalizer response_queue = multiprocessing.Queue(maxsize=50) # Finalizer→主进程 # 启动工作进程 decoder = DecodeProcess(decode_queue, inference_queue, result_queue) inferencer = InferencerProcess(inference_queue, result_queue) finalizer = FinalizerProcess(result_queue, response_queue) decoder.start() inferencer.start() finalizer.start() print(f"[主进程] 启动gRPC服务,端口 {args.port}") try: # 启动gRPC服务 (主进程同时负责解码) asyncio.run(serve(args.port, decode_queue, result_queue, response_queue)) except KeyboardInterrupt: print("\n[主进程] 收到停止信号") # 停止工作进程 inferencer.running = False finalizer.running = False # 等待进程结束 inferencer.join() finalizer.join() print("[主进程] 所有进程已停止") if __name__ == '__main__': # 确保使用spawn启动方法 multiprocessing.set_start_method('spawn', force=True) main() 修改代码,让各自组件都可以以其全速进行处理
最新发布
08-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值