MAX_PATH、IN、OUT宏常量

本文详细介绍了C语言中MAX_PATH宏常量的作用,解释了其在VC++6.0环境中定义的最长全路径名长度为260。此外,还探讨了IN、OUT和INOUT宏常量的使用场景,分别用于标记输入、输出和输入输出参数。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

MAX_PATH宏常量

MAX_PATH是C语言运行时库中通过#define指令定义的一个宏常量,它定义了编译器所支持的最长全路径名的长度。

在VC++6.0中, _MAX_PATH的值为260。

所属库:stdlib.h

IN、OUT宏常量

#define OUT
#define IN
#define INOUT

作用是逻辑的:
IN 表示输入参数,指针指向的值不会修改
OUT表示输出参数,指针指向的值会修改,且不会读
INOUT 表示输入输出参数,指针指向的值会修改,且会读取

 

import os import cv2 import base64 import json import numpy as np from channels.generic.websocket import AsyncWebsocketConsumer from django.conf import settings from ultralytics import YOLO import time import asyncio import logging import uuid import json import cv2 import base64 from index.page_4_1_auto_set import image_detect logger = logging.getLogger(__name__) class VideoDetectionConsumer(AsyncWebsocketConsumer): async def connect(self): logger.info(f"WebSocket 连接尝试: {self.scope}") await self.accept() logger.info("WebSocket 连接已建立") self.last_time = time.time() self.start_time = time.time() self.frame_count = 0 self.total_processing_time = 0 async def disconnect(self, close_code): pass async def receive(self, text_data=None, bytes_data=None): if text_data: text_data_json = json.loads(text_data) action = text_data_json.get('action') video_name = text_data_json.get('video') if action == 'start_detection': # 确保临时目录存在 temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) video_path = os.path.join(temp_dir, video_name) # 检查视频文件是否存在 if not os.path.exists(video_path): await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'视频文件不存在: {video_path}' })) return model_path = os.path.join(settings.BASE_DIR, "C:/Users/16660/Desktop/网页搭建/Behaviewer/models/best.pt") output_video_path = os.path.join(settings.MEDIA_ROOT, 'videos', video_name) output_video_dir = os.path.dirname(output_video_path) if not os.path.exists(output_video_dir): os.makedirs(output_video_dir) # 启动视频处理任务 asyncio.create_task(self.detect_objects_in_video(model_path, video_path, output_video_path)) async def detect_objects_in_video(self, model_path, video_path, output_path): try: # 加载模型 model = YOLO(model_path) # 打开视频 cap = cv2.VideoCapture(video_path) if not cap.isOpened(): await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'无法打开视频文件: {video_path}' })) return total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 创建视频写入器 fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) frame_index = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break # 处理帧 frame_index += 1 start_time = time.time() # 目标检测 results = model(frame) annotated_frame = results[0].plot() # 计算处理时间 processing_time = time.time() - start_time self.total_processing_time += processing_time self.frame_count += 1 # 计算当前FPS current_fps = 1.0 / processing_time if processing_time > 0 else 0 # 添加FPS显示 fps_text = f"FPS: {current_fps:.2f}" cv2.putText(annotated_frame, fps_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # 保存处理后的帧 out.write(annotated_frame) # 将处理后的帧转换为base64 _, buffer = cv2.imencode('.jpg', annotated_frame) frame_base64 = base64.b64encode(buffer).decode('utf-8') # 计算进度 progress = frame_index / total_frames # 发送处理后的帧 await self.send(text_data=json.dumps({ 'type': 'frame', 'frame': frame_base64, 'objects': len(results[0].boxes), 'fps': current_fps, 'progress': progress })) # 稍微延迟以控制发送速率 await asyncio.sleep(0.01) # 释放资源 cap.release() out.release() # 计算平均FPS avg_fps = self.frame_count / self.total_processing_time if self.total_processing_time > 0 else 0 # 发送完成消息 output_video_url = f'{settings.MEDIA_URL}videos/{os.path.basename(output_path)}' await self.send(text_data=json.dumps({ 'type': 'end', 'output_video_url': output_video_url, 'total_frames': total_frames, 'avg_fps': avg_fps, 'fps': fps })) except Exception as e: await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'处理错误: {str(e)}' })) import traceback traceback.print_exc() logger = logging.getLogger(__name__) class VideoProcessingConsumer(AsyncWebsocketConsumer): async def connect(self): logger.info("WebSocket 连接已建立") await self.accept() self.processing = False self.video_path = None self.config = None self.temp_video_path = None self.cap = None self.last_update_time = time.time() async def disconnect(self, close_code): logger.info("WebSocket 连接已关闭") self.processing = False # 清理资源 if self.cap: self.cap.release() logger.info("视频捕获资源已释放") # 清理临时文件 if self.temp_video_path and os.path.exists(self.temp_video_path): try: os.remove(self.temp_video_path) logger.info(f"已删除临时文件: {self.temp_video_path}") except Exception as e: logger.error(f"删除临时文件失败: {str(e)}") async def receive(self, text_data): try: data = json.loads(text_data) command = data.get('command') if command == 'start_processing': # 停止任何正在进行的处理 self.processing = False # 获取参数 video_filename = data.get('video_path') self.config = data.get('config') if not video_filename or not self.config: await self.send_error('缺少必要参数: video_path 或 config') return # 创建临时目录 (使用统一的临时目录) temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) logger.info(f"创建临时目录: {temp_dir}") # 构建视频文件路径 self.temp_video_path = os.path.join(temp_dir, video_filename) logger.info(f"视频文件路径: {self.temp_video_path}") # 检查视频文件是否存在 if not os.path.exists(self.temp_video_path): await self.send_error(f'视频文件不存在: {self.temp_video_path}') return # 验证视频文件格式 if not self.is_valid_video_file(self.temp_video_path): await self.send_error(f'无效的视频格式: {self.temp_video_path}') return # 开始处理 self.processing = True self.last_update_time = time.time() asyncio.create_task(self.process_video()) elif command == 'stop_processing': self.processing = False await self.send_status('处理已停止') except Exception as e: logger.error(f"接收消息错误: {str(e)}") await self.send_error(f'处理错误: {str(e)}') def is_valid_video_file(self, file_path): """验证是否为有效的视频文件""" try: cap = cv2.VideoCapture(file_path) if not cap.isOpened(): return False # 检查帧数和尺寸 frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) cap.release() # 基本验证 if frame_count <= 0 or width <= 0 or height <= 0: return False return True except: return False async def send_error(self, message): await self.send(text_data=json.dumps({ 'type': 'error', 'message': message })) async def send_status(self, message): await self.send(text_data=json.dumps({ 'type': 'status', 'message': message })) async def send_progress(self, frame_count, total_frames, fps): """发送处理进度""" progress = int((frame_count / total_frames) * 100) await self.send(text_data=json.dumps({ 'type': 'progress', 'progress': progress, 'processed_frames': frame_count, 'fps': fps })) async def process_video(self): """异步处理视频帧并发送结果""" try: # 打开视频文件 self.cap = cv2.VideoCapture(self.temp_video_path) if not self.cap.isOpened(): await self.send_error(f"无法打开视频文件: {self.temp_video_path}") return # 获取视频信息 total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = self.cap.get(cv2.CAP_PROP_FPS) width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) logger.info(f"视频信息: {total_frames}帧, {fps} FPS, {width}x{height}") # 发送初始信息 await self.send(text_data=json.dumps({ 'type': 'info', 'total_frames': total_frames, 'fps': fps })) frame_count = 0 while self.processing and self.cap.isOpened(): ret, frame = self.cap.read() if not ret: break # 处理当前帧 processed_frame, _ = image_detect( frame, self.config['roi'], currentback=self.config.get('currentback', 0), kernal_erode=self.config.get('kernal_erode', 1), kernal_dilate=self.config.get('kernal_dilate', 1), kernal_erode_2=self.config.get('kernal_erode_2', 1), min_area=self.config.get('min_area', 1), max_area=self.config.get('max_area', 10000), adjust_threshold=self.config.get('adjust_threshold', 150), feature=self.config.get('feature', 1) ) # 转换为base64 _, buffer = cv2.imencode('.jpg', processed_frame) frame_base64 = base64.b64encode(buffer).decode('utf-8') # 发送处理后的帧 await self.send(text_data=json.dumps({ 'type': 'frame', 'frame': frame_base64, 'frame_count': frame_count })) frame_count += 1 # 定期发送进度 (每秒最多1次) current_time = time.time() if current_time - self.last_update_time > 1.0: # 每秒更新一次 await self.send_progress(frame_count, total_frames, fps) self.last_update_time = current_time # 处理完成 if self.processing: # 发送最终进度 await self.send_progress(frame_count, total_frames, fps) # 生成最终结果图像 _, final_buffer = cv2.imencode('.jpg', processed_frame) final_image = base64.b64encode(final_buffer).decode('utf-8') await self.send(text_data=json.dumps({ 'type': 'end', 'result_image': final_image, 'processed_frames': frame_count, 'total_frames': total_frames })) except Exception as e: logger.error(f"视频处理错误: {str(e)}") await self.send_error(f'视频处理错误: {str(e)}') finally: self.processing = False if self.cap: self.cap.release() self.cap = None报错import os import cv2 import base64 import json import numpy as np from channels.generic.websocket import AsyncWebsocketConsumer from django.conf import settings from ultralytics import YOLO import time import asyncio import logging import uuid import json import cv2 import base64 from index.page_4_1_auto_set import image_detect logger = logging.getLogger(__name__) class VideoDetectionConsumer(AsyncWebsocketConsumer): async def connect(self): logger.info(f"WebSocket 连接尝试: {self.scope}") await self.accept() logger.info("WebSocket 连接已建立") self.last_time = time.time() self.start_time = time.time() self.frame_count = 0 self.total_processing_time = 0 async def disconnect(self, close_code): pass async def receive(self, text_data=None, bytes_data=None): if text_data: text_data_json = json.loads(text_data) action = text_data_json.get('action') video_name = text_data_json.get('video') if action == 'start_detection': # 确保临时目录存在 temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) video_path = os.path.join(temp_dir, video_name) # 检查视频文件是否存在 if not os.path.exists(video_path): await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'视频文件不存在: {video_path}' })) return model_path = os.path.join(settings.BASE_DIR, "C:/Users/16660/Desktop/网页搭建/Behaviewer/models/best.pt") output_video_path = os.path.join(settings.MEDIA_ROOT, 'videos', video_name) output_video_dir = os.path.dirname(output_video_path) if not os.path.exists(output_video_dir): os.makedirs(output_video_dir) # 启动视频处理任务 asyncio.create_task(self.detect_objects_in_video(model_path, video_path, output_video_path)) async def detect_objects_in_video(self, model_path, video_path, output_path): try: # 加载模型 model = YOLO(model_path) # 打开视频 cap = cv2.VideoCapture(video_path) if not cap.isOpened(): await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'无法打开视频文件: {video_path}' })) return total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 创建视频写入器 fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) frame_index = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break # 处理帧 frame_index += 1 start_time = time.time() # 目标检测 results = model(frame) annotated_frame = results[0].plot() # 计算处理时间 processing_time = time.time() - start_time self.total_processing_time += processing_time self.frame_count += 1 # 计算当前FPS current_fps = 1.0 / processing_time if processing_time > 0 else 0 # 添加FPS显示 fps_text = f"FPS: {current_fps:.2f}" cv2.putText(annotated_frame, fps_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # 保存处理后的帧 out.write(annotated_frame) # 将处理后的帧转换为base64 _, buffer = cv2.imencode('.jpg', annotated_frame) frame_base64 = base64.b64encode(buffer).decode('utf-8') # 计算进度 progress = frame_index / total_frames # 发送处理后的帧 await self.send(text_data=json.dumps({ 'type': 'frame', 'frame': frame_base64, 'objects': len(results[0].boxes), 'fps': current_fps, 'progress': progress })) # 稍微延迟以控制发送速率 await asyncio.sleep(0.01) # 释放资源 cap.release() out.release() # 计算平均FPS avg_fps = self.frame_count / self.total_processing_time if self.total_processing_time > 0 else 0 # 发送完成消息 output_video_url = f'{settings.MEDIA_URL}videos/{os.path.basename(output_path)}' await self.send(text_data=json.dumps({ 'type': 'end', 'output_video_url': output_video_url, 'total_frames': total_frames, 'avg_fps': avg_fps, 'fps': fps })) except Exception as e: await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'处理错误: {str(e)}' })) import traceback traceback.print_exc() logger = logging.getLogger(__name__) class VideoProcessingConsumer(AsyncWebsocketConsumer): async def connect(self): logger.info("WebSocket 连接已建立") await self.accept() self.processing = False self.video_path = None self.config = None self.temp_video_path = None self.cap = None self.last_update_time = time.time() async def disconnect(self, close_code): logger.info("WebSocket 连接已关闭") self.processing = False # 清理资源 if self.cap: self.cap.release() logger.info("视频捕获资源已释放") # 清理临时文件 if self.temp_video_path and os.path.exists(self.temp_video_path): try: os.remove(self.temp_video_path) logger.info(f"已删除临时文件: {self.temp_video_path}") except Exception as e: logger.error(f"删除临时文件失败: {str(e)}") async def receive(self, text_data): try: data = json.loads(text_data) command = data.get('command') if command == 'start_processing': # 停止任何正在进行的处理 self.processing = False # 获取参数 video_filename = data.get('video_path') self.config = data.get('config') if not video_filename or not self.config: await self.send_error('缺少必要参数: video_path 或 config') return # 创建临时目录 (使用统一的临时目录) temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) logger.info(f"创建临时目录: {temp_dir}") # 构建视频文件路径 self.temp_video_path = os.path.join(temp_dir, video_filename) logger.info(f"视频文件路径: {self.temp_video_path}") # 检查视频文件是否存在 if not os.path.exists(self.temp_video_path): await self.send_error(f'视频文件不存在: {self.temp_video_path}') return # 验证视频文件格式 if not self.is_valid_video_file(self.temp_video_path): await self.send_error(f'无效的视频格式: {self.temp_video_path}') return # 开始处理 self.processing = True self.last_update_time = time.time() asyncio.create_task(self.process_video()) elif command == 'stop_processing': self.processing = False await self.send_status('处理已停止') except Exception as e: logger.error(f"接收消息错误: {str(e)}") await self.send_error(f'处理错误: {str(e)}') def is_valid_video_file(self, file_path): """验证是否为有效的视频文件""" try: cap = cv2.VideoCapture(file_path) if not cap.isOpened(): return False # 检查帧数和尺寸 frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) cap.release() # 基本验证 if frame_count <= 0 or width <= 0 or height <= 0: return False return True except: return False async def send_error(self, message): await self.send(text_data=json.dumps({ 'type': 'error', 'message': message })) async def send_status(self, message): await self.send(text_data=json.dumps({ 'type': 'status', 'message': message })) async def send_progress(self, frame_count, total_frames, fps): """发送处理进度""" progress = int((frame_count / total_frames) * 100) await self.send(text_data=json.dumps({ 'type': 'progress', 'progress': progress, 'processed_frames': frame_count, 'fps': fps })) async def process_video(self): """异步处理视频帧并发送结果""" try: # 打开视频文件 self.cap = cv2.VideoCapture(self.temp_video_path) if not self.cap.isOpened(): await self.send_error(f"无法打开视频文件: {self.temp_video_path}") return # 获取视频信息 total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = self.cap.get(cv2.CAP_PROP_FPS) width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) logger.info(f"视频信息: {total_frames}帧, {fps} FPS, {width}x{height}") # 发送初始信息 await self.send(text_data=json.dumps({ 'type': 'info', 'total_frames': total_frames, 'fps': fps })) frame_count = 0 while self.processing and self.cap.isOpened(): ret, frame = self.cap.read() if not ret: break # 处理当前帧 processed_frame, _ = image_detect( frame, self.config['roi'], currentback=self.config.get('currentback', 0), kernal_erode=self.config.get('kernal_erode', 1), kernal_dilate=self.config.get('kernal_dilate', 1), kernal_erode_2=self.config.get('kernal_erode_2', 1), min_area=self.config.get('min_area', 1), max_area=self.config.get('max_area', 10000), adjust_threshold=self.config.get('adjust_threshold', 150), feature=self.config.get('feature', 1) ) # 转换为base64 _, buffer = cv2.imencode('.jpg', processed_frame) frame_base64 = base64.b64encode(buffer).decode('utf-8') # 发送处理后的帧 await self.send(text_data=json.dumps({ 'type': 'frame', 'frame': frame_base64, 'frame_count': frame_count })) frame_count += 1 # 定期发送进度 (每秒最多1次) current_time = time.time() if current_time - self.last_update_time > 1.0: # 每秒更新一次 await self.send_progress(frame_count, total_frames, fps) self.last_update_time = current_time # 处理完成 if self.processing: # 发送最终进度 await self.send_progress(frame_count, total_frames, fps) # 生成最终结果图像 _, final_buffer = cv2.imencode('.jpg', processed_frame) final_image = base64.b64encode(final_buffer).decode('utf-8') await self.send(text_data=json.dumps({ 'type': 'end', 'result_image': final_image, 'processed_frames': frame_count, 'total_frames': total_frames })) except Exception as e: logger.error(f"视频处理错误: {str(e)}") await self.send_error(f'视频处理错误: {str(e)}') finally: self.processing = False if self.cap: self.cap.release() self.cap = None报错处理错误: 视频文件不存在: C:\Users\16660\Desktop\网页搭建\Behaviewer\temp\1.mp4
08-07
import gradio as gr import os import json import shutil from json_test import main_pipeline # 常量定义(全局统一路径) INPUT_DIR = r"D:\code\table-transformer-main\input_first" os.makedirs(INPUT_DIR, exist_ok=True) # 确保输入目录存在 def process_pdf(pdf_file,progress=gr.Progress()) -> tuple: """处理PDF文件的核心流程""" try: progress(0.1, desc="开始处理PDF...") # 1. 获取PDF文件名并复制到输入目录 if isinstance(pdf_file, str): pdf_name = os.path.basename(pdf_file) pdf_path = pdf_file else: pdf_name = os.path.basename(pdf_file.name) pdf_path = pdf_file.name target_pdf = os.path.join(INPUT_DIR, pdf_name) shutil.copyfile(pdf_path, target_pdf) progress(0.3, desc="调用模型解析...") # 2. 调用主处理函数生成JSON文件 json_path = main_pipeline(target_pdf) print(f"[DEBUG] JSON路径: {json_path}") # 3. 验证JSON文件是否存在 if not os.path.exists(json_path): raise FileNotFoundError(f"JSON文件未生成: {json_path}") progress(0.8, desc="生成JSON文件...") # 4. 读取JSON内容并返回(同时返回文件路径) with open(json_path, "r", encoding="utf-8") as f: json_data = json.load(f) return json_path, json_data except Exception as e: print(f"处理 PDF 失败: {e}") # 返回错误信息到前端(避免解析失败导致崩溃) return None, {"error": str(e)} def update_status(msg): """更新状态提示""" return {"value": msg, "__type__": "update"} # Gradio界面 with gr.Blocks(title="PDF智能解析系统", theme=gr.themes.Soft()) as demo: gr.Markdown("# 📑 PDF智能解析系统") gr.Markdown("上传PDF文件,自动提取结构化数据并生成JSON结果") with gr.Row(): # 输入区域 with gr.Column(scale=1): file_input = gr.File(label="上传PDF文件", file_types=[".pdf"], file_count="single") submit_btn = gr.Button("开始解析", variant="primary") # 输出区域 with gr.Column(scale=2): json_output = gr.JSON(label="解析结果预览", show_label=True) file_output = gr.File(label="下载JSON文件", interactive=False) status = gr.Textbox(label="处理状态", placeholder="等待文件上传...", interactive=False) # 按钮点击事件 submit_btn.click( fn=lambda: update_status("🔄 正在处理文件,请稍候..."), outputs=status, queue=False ).then( fn=process_pdf, inputs=file_input, outputs=[file_output, json_output], api_name="process_pdf" ).then( fn=lambda: update_status("✅ 处理完成!"), outputs=status, queue=False ) if __name__ == "__main__": demo.launch( server_name="127.0.0.1", server_port=7860, # 设置超时时间为 300 秒(5分钟) max_threads=4, # 可选:增加线程数 auth=None, # 可选:如果需要认证 share=True, # 关闭临时共享链接 # 关键参数:请求超时时间(秒) app_kwargs={"timeout": 1000} )
03-26
/** * @file uav_fsm_node.cpp * @brief UAV Takeoff and U-Shaped Path Flight for Simulation * @version 6.1 * @date 2023-10-15 * * 针对仿真环境的优化: * 1. 修复坐标系问题(使用ENU坐标系的数值表示) * 2. 简化起飞控制逻辑 * 3. 增强错误检测和日志输出 * 4. 优化参数默认值 * 5. 确保控制指令正确发送 */ #include <ros/ros.h> #include <geometry_msgs/PoseStamped.h> #include <geometry_msgs/TwistStamped.h> #include <mavros_msgs/CommandBool.h> #include <mavros_msgs/SetMode.h> #include <mavros_msgs/State.h> #include <mavros_msgs/CommandTOL.h> #include <mavros_msgs/PositionTarget.h> #include <mavros_msgs/TimesyncStatus.h> #include <cmath> #include <vector> #include <algorithm> #include <tf2/LinearMath/Quaternion.h> #include <tf2_geometry_msgs/tf2_geometry_msgs.h> #include <angles/angles.h> // 全局变量 mavros_msgs::State current_state; geometry_msgs::PoseStamped current_pose; geometry_msgs::TwistStamped current_vel; mavros_msgs::TimesyncStatus timesync_status; bool pose_received = false; bool vel_received = false; bool timesync_received = false; // 状态回调 void state_cb(const mavros_msgs::State::ConstPtr& msg) { current_state = *msg; } // 位置回调 void pose_cb(const geometry_msgs::PoseStamped::ConstPtr& msg) { current_pose = *msg; pose_received = true; } // 速度回调 void vel_cb(const geometry_msgs::TwistStamped::ConstPtr& msg) { current_vel = *msg; vel_received = true; } // 时间同步回调 void timesync_cb(const mavros_msgs::TimesyncStatus::ConstPtr& msg) { timesync_status = *msg; timesync_received = true; } // 计算两点之间的距离 double distance(const geometry_msgs::Point& p1, const geometry_msgs::Point& p2) { double dx = p1.x - p2.x; double dy = p1.y - p2.y; double dz = p1.z - p2.z; return std::sqrt(dx*dx + dy*dy + dz*dz); } // 计算航点与位置之间的距离 double distance_to_waypoint(const geometry_msgs::Point& position, double wx, double wy, double wz) { double dx = position.x - wx; double dy = position.y - wy; double dz = position.z - wz; return std::sqrt(dx*dx + dy*dy + dz*dz); } // 检查是否到达目标点 bool reached_target(double target_x, double target_y, double target_z, double tolerance = 0.3) { if (!pose_received) return false; return distance_to_waypoint(current_pose.pose.position, target_x, target_y, target_z) < tolerance; } // 创建偏航角四元数 geometry_msgs::Quaternion create_quaternion_from_yaw(double yaw_rad) { tf2::Quaternion q; q.setRPY(0, 0, yaw_rad); return tf2::toMsg(q); } // 获取当前偏航角 double get_current_yaw() { if (!pose_received) return 0.0; tf2::Quaternion q( current_pose.pose.orientation.x, current_pose.pose.orientation.y, current_pose.pose.orientation.z, current_pose.pose.orientation.w); tf2::Matrix3x3 m(q); double roll, pitch, yaw; m.getRPY(roll, pitch, yaw); return yaw; } int main(int argc, char **argv) { ros::init(argc, argv, "uav_fsm_node"); ros::NodeHandle nh; // 参数配置 - 仿真环境优化值 double flight_height = 2.0; // 飞行高度(米) - 降低高度 double u_width = 1.5; // U型宽度(米) - 缩小路径 double u_depth = 1.5; // U型深度(米) - 缩小路径 double takeoff_climb_rate = 0.8; // 起飞爬升速率(米/秒) - 降低爬升率 double max_vel = 1.0; // 最大速度(m/s) - 降低速度 double arming_tolerance = 1.0; // 解锁位置容差 - 增大容差 double state_timeout = 120.0; // 状态超时时间(秒) - 延长时间 nh.param("flight_height", flight_height, flight_height); nh.param("u_width", u_width, u_width); nh.param("u_depth", u_depth, u_depth); nh.param("takeoff_climb_rate", takeoff_climb_rate, takeoff_climb_rate); nh.param("max_vel", max_vel, max_vel); nh.param("arming_tolerance", arming_tolerance, arming_tolerance); nh.param("state_timeout", state_timeout, state_timeout); ROS_INFO("UAV Simulation Flight Control v6.1"); ROS_INFO(" - Takeoff Height: %.1f m", flight_height); ROS_INFO(" - Takeoff Climb Rate: %.1f m/s", takeoff_climb_rate); ROS_INFO(" - U-Width: %.1f m", u_width); ROS_INFO(" - U-Depth: %.1f m", u_depth); ROS_INFO(" - Max Velocity: %.1f m/s", max_vel); ROS_INFO(" - State Timeout: %.1f sec", state_timeout); // 订阅者 ros::Subscriber state_sub = nh.subscribe<mavros_msgs::State> ("mavros/state", 10, state_cb); ros::Subscriber pose_sub = nh.subscribe<geometry_msgs::PoseStamped> ("mavros/local_position/pose", 10, pose_cb); ros::Subscriber vel_sub = nh.subscribe<geometry_msgs::TwistStamped> ("mavros/local_position/velocity_local", 10, vel_cb); ros::Subscriber timesync_sub = nh.subscribe<mavros_msgs::TimesyncStatus> ("mavros/timesync_status", 10, timesync_cb); // 发布者 ros::Publisher local_pos_pub = nh.advertise<geometry_msgs::PoseStamped> ("mavros/setpoint_position/local", 10); ros::Publisher setpoint_raw_pub = nh.advertise<mavros_msgs::PositionTarget> ("mavros/setpoint_raw/local", 10); // 服务客户端 ros::ServiceClient arming_client = nh.serviceClient<mavros_msgs::CommandBool> ("mavros/cmd/arming"); ros::ServiceClient set_mode_client = nh.serviceClient<mavros_msgs::SetMode> ("mavros/set_mode"); ros::ServiceClient land_client = nh.serviceClient<mavros_msgs::CommandTOL> ("mavros/cmd/land"); // 设置发布频率 double control_rate = 20.0; ros::Rate rate(control_rate); double dt_val = 1.0 / control_rate; // 等待飞控连接 ROS_INFO("Waiting for FCU connection..."); int connection_attempts = 0; while (ros::ok() && !current_state.connected) { ros::spinOnce(); rate.sleep(); if (++connection_attempts % 50 == 0) { ROS_WARN("Still waiting for FCU connection..."); } } ROS_INFO("FCU connected"); // 定义航点类型 enum MovementType { FORWARD, // 机头朝向运动方向 SIDEWAYS // 侧向移动 }; struct Waypoint { double x, y, z; MovementType move_type; // 移动类型 double target_yaw; // 目标偏航角(用于侧向移动) }; // U形路径航点序列(起飞后执行的部分) std::vector<Waypoint> u_path = { // 起点(起飞后的悬停点) {0.0, 0.0, flight_height, FORWARD, 0.0}, // 向前飞行 {u_depth, 0.0, flight_height, FORWARD, 0.0}, // 向右移动 {u_depth, u_width, flight_height, FORWARD, M_PI/2}, // U型终点 {0.0, u_width, flight_height, FORWARD, M_PI}, // 降落准备点 {0.0, u_width, flight_height, FORWARD, M_PI} }; // 创建目标位置消息(用于位置控制) geometry_msgs::PoseStamped target_pose; target_pose.header.frame_id = "map"; // 创建速度控制消息 - 使用ENU坐标系(仿真环境标准) mavros_msgs::PositionTarget vel_cmd; // 关键修复:使用数值代替常量 // FRAME_LOCAL_ENU = 4, FRAME_LOCAL_NED = 1 vel_cmd.coordinate_frame = 4; // MAV_FRAME_LOCAL_ENU (z-up) ROS_INFO("Using ENU coordinate system (value 4) for simulation"); vel_cmd.type_mask = 0b0000011111000111 | // 忽略加速度和力 mavros_msgs::PositionTarget::IGNORE_YAW_RATE; // 预先发送设定点 - 关键修复:确保在OFFBOARD模式下持续发送设定点 ROS_INFO("Sending initial setpoints..."); ros::Time last_setpoint_time = ros::Time::now(); for (int i = 0; ros::ok() && i < 100; i++) { // 初始设定点为地面点(0,0,0) target_pose.pose.position.x = 0.0; target_pose.pose.position.y = 0.0; target_pose.pose.position.z = 0.0; target_pose.pose.orientation = create_quaternion_from_yaw(0.0); target_pose.header.stamp = ros::Time::now(); local_pos_pub.publish(target_pose); // 在初始设定点发送期间尝试进入OFFBOARD模式 if (i > 20) { mavros_msgs::SetMode offb_set_mode; offb_set_mode.request.custom_mode = "OFFBOARD"; if (set_mode_client.call(offb_set_mode) && offb_set_mode.response.mode_sent) { ROS_INFO("OFFBOARD mode enabled during initialization"); } } ros::spinOnce(); rate.sleep(); } // 准备服务请求 mavros_msgs::SetMode offb_set_mode; offb_set_mode.request.custom_mode = "OFFBOARD"; mavros_msgs::CommandBool arm_cmd; arm_cmd.request.value = true; // 飞行状态 enum FlightState { PREFLIGHT, OFFBOARD, ARMING, TAKEOFF, FLYING, LANDING, COMPLETED }; FlightState flight_state = PREFLIGHT; size_t current_waypoint = 0; ros::Time last_request = ros::Time::now(); ros::Time state_start_time = ros::Time::now(); bool landing_initiated = false; double target_altitude = flight_height; double current_yaw = 0.0; double target_yaw = 0.0; bool position_landing = false; int arming_attempts = 0; int convergence_count = 0; geometry_msgs::Point current_position; double takeoff_start_z = 0.0; double pos_error = 0.0; // 时间同步计数器 int timesync_warning_count = 0; const int MAX_TIMESYNC_WARNINGS = 10; ROS_INFO("Starting UAV simulation mission..."); while (ros::ok()) { // 获取当前状态 ros::spinOnce(); // 检查状态超时 if ((ros::Time::now() - state_start_time).toSec() > state_timeout) { ROS_ERROR("State timeout in state %d! Resetting to PREFLIGHT.", flight_state); flight_state = PREFLIGHT; state_start_time = ros::Time::now(); } // 记录当前位置 if (pose_received) { current_position = current_pose.pose.position; } // 状态机处理 switch (flight_state) { case PREFLIGHT: ROS_INFO_THROTTLE(1.0, "PREFLIGHT: Waiting for OFFBOARD mode"); ROS_INFO_THROTTLE(1.0, "Current mode: %s, Armed: %d", current_state.mode.c_str(), current_state.armed); if (ros::Time::now() - last_request > ros::Duration(5.0)) { if (set_mode_client.call(offb_set_mode)) { if (offb_set_mode.response.mode_sent) { ROS_INFO("OFFBOARD enabled"); flight_state = OFFBOARD; convergence_count = 0; state_start_time = ros::Time::now(); } else { ROS_WARN("OFFBOARD enable failed"); } } else { ROS_WARN("OFFBOARD service call failed"); } last_request = ros::Time::now(); } break; case OFFBOARD: ROS_INFO_THROTTLE(1.0, "OFFBOARD: Waiting for position convergence"); // 保持在地面点(0,0,0) target_pose.pose.position.x = 0.0; target_pose.pose.position.y = 0.0; target_pose.pose.position.z = 0.0; target_pose.pose.orientation = create_quaternion_from_yaw(0.0); target_pose.header.stamp = ros::Time::now(); local_pos_pub.publish(target_pose); // 计算位置误差 if (pose_received) { pos_error = distance_to_waypoint( current_position, 0.0, 0.0, 0.0 ); // 位置收敛检查 if (pos_error < 0.5) { convergence_count++; } else { convergence_count = std::max(0, convergence_count - 1); } } if (current_state.mode != "OFFBOARD") { ROS_WARN("OFFBOARD mode lost! Current mode: %s", current_state.mode.c_str()); flight_state = PREFLIGHT; state_start_time = ros::Time::now(); } else if (ros::Time::now() - last_request > ros::Duration(5.0)) { // 位置稳定检查 if (convergence_count > 10 && pos_error < arming_tolerance) { if (arming_client.call(arm_cmd)) { if (arm_cmd.response.success) { ROS_INFO("Armed successfully"); flight_state = TAKEOFF; if (pose_received) { takeoff_start_z = current_pose.pose.position.z; } arming_attempts = 0; state_start_time = ros::Time::now(); } else { arming_attempts++; ROS_WARN("Arming failed (attempt %d)!", arming_attempts); if (arming_attempts > 3) { ROS_ERROR("Too many arming failures. Aborting mission."); ros::shutdown(); } } } else { ROS_WARN("Arming service call failed"); } } else { ROS_WARN_THROTTLE(1.0, "Position not stable for arming: error=%.2fm, convergence=%d/%d", pos_error, convergence_count, 10); } last_request = ros::Time::now(); } break; case TAKEOFF: if (pose_received) { double current_height = current_pose.pose.position.z; double height_error = flight_height - current_height; ROS_INFO_THROTTLE(0.5, "TAKEOFF: Climbing to %.1fm (Current: %.2fm, Error: %.2fm)", flight_height, current_height, height_error); // 获取当前偏航角 current_yaw = get_current_yaw(); // 创建速度控制指令 vel_cmd.velocity.x = 0.0; vel_cmd.velocity.y = 0.0; vel_cmd.velocity.z = takeoff_climb_rate; // 直接使用固定爬升速率 vel_cmd.yaw = current_yaw; // 发布速度控制命令 vel_cmd.header.stamp = ros::Time::now(); setpoint_raw_pub.publish(vel_cmd); // 检查是否达到起飞高度 if (current_height > flight_height - 0.1) { ROS_INFO("Reached takeoff altitude: %.2f m", current_height); flight_state = FLYING; current_waypoint = 0; ROS_INFO("Starting U-shaped path flight"); state_start_time = ros::Time::now(); } } break; case FLYING: if (pose_received) { ROS_INFO_THROTTLE(1.0, "FLYING: Waypoint %zu/%zu (Current Z: %.2f)", current_waypoint+1, u_path.size(), current_pose.pose.position.z); // 获取当前偏航角 current_yaw = get_current_yaw(); // 根据移动类型计算目标偏航角 if (u_path[current_waypoint].move_type == FORWARD) { if (current_waypoint < u_path.size() - 1) { double dx = u_path[current_waypoint+1].x - u_path[current_waypoint].x; double dy = u_path[current_waypoint+1].y - u_path[current_waypoint].y; target_yaw = std::atan2(dy, dx); } else { target_yaw = u_path[current_waypoint].target_yaw; } } else { target_yaw = u_path[current_waypoint].target_yaw; } // 平滑偏航角过渡 double yaw_error = angles::shortest_angular_distance(current_yaw, target_yaw); double smoothed_yaw = current_yaw + 0.1 * yaw_error; // 创建位置控制指令 target_pose.pose.position.x = u_path[current_waypoint].x; target_pose.pose.position.y = u_path[current_waypoint].y; target_pose.pose.position.z = u_path[current_waypoint].z; target_pose.pose.orientation = create_quaternion_from_yaw(smoothed_yaw); // 发布位置指令 target_pose.header.stamp = ros::Time::now(); local_pos_pub.publish(target_pose); // 检查是否到达当前航点 if (reached_target( u_path[current_waypoint].x, u_path[current_waypoint].y, u_path[current_waypoint].z )) { ROS_INFO("Reached waypoint %zu: (%.1f, %.1f, %.1f)", current_waypoint, u_path[current_waypoint].x, u_path[current_waypoint].y, u_path[current_waypoint].z); if (current_waypoint < u_path.size() - 1) { current_waypoint++; ROS_INFO("Moving to waypoint %zu: (%.1f, %.1f, %.1f)", current_waypoint, u_path[current_waypoint].x, u_path[current_waypoint].y, u_path[current_waypoint].z); state_start_time = ros::Time::now(); } else { ROS_INFO("Final waypoint reached, initiating landing"); flight_state = LANDING; state_start_time = ros::Time::now(); } } } if (!current_state.armed) { ROS_ERROR("Unexpected disarm during flight!"); flight_state = OFFBOARD; state_start_time = ros::Time::now(); } break; case LANDING: ROS_INFO_THROTTLE(1.0, "LANDING: Descending... (Current Z: %.2f)", pose_received ? current_pose.pose.position.z : 0.0); if (!landing_initiated) { mavros_msgs::CommandTOL land_cmd_srv; land_cmd_srv.request.yaw = u_path.back().target_yaw; if (land_client.call(land_cmd_srv)) { if (land_cmd_srv.response.success) { ROS_INFO("Landing command accepted"); landing_initiated = true; } else { ROS_WARN("Land service failed, using position landing"); landing_initiated = true; position_landing = true; if (pose_received) { target_altitude = current_pose.pose.position.z; } else { target_altitude = flight_height; } } } else { ROS_WARN("Land service call failed, using position landing"); landing_initiated = true; position_landing = true; if (pose_received) { target_altitude = current_pose.pose.position.z; } else { target_altitude = flight_height; } } } if (position_landing && pose_received) { // 设置降落位置(XY保持不变) target_pose.pose.position.x = u_path.back().x; target_pose.pose.position.y = u_path.back().y; // 平滑降低高度 if (target_altitude > 0.1) { target_altitude -= 0.05 * dt_val; } else { target_altitude = 0.0; } target_pose.pose.position.z = target_altitude; // 设置偏航角 target_pose.pose.orientation = create_quaternion_from_yaw(u_path.back().target_yaw); // 发布位置指令 target_pose.header.stamp = ros::Time::now(); local_pos_pub.publish(target_pose); } // 检查是否着陆 if (pose_received && current_pose.pose.position.z < 0.2) { ROS_INFO("Landed successfully at (%.2f, %.2f, %.2f)", current_pose.pose.position.x, current_pose.pose.position.y, current_pose.pose.position.z); flight_state = COMPLETED; } break; case COMPLETED: ROS_INFO("Mission completed. Shutting down..."); ros::shutdown(); break; } rate.sleep(); } return 0; } 代码运行后无人机一直在地面附件徘徊,无法上升到设定的1m高度,帮我修改代码
08-04
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值