rospy.get_time()使用细节说明

碰到了一个问题,记录一下:
rospy.get_rostime()的作用是获取浮点值的秒数,是ROS系统的时间,使用它需要确保已经正确初始化了ROS节点,否则会出现意料外情况,演示如下:

def main():
    rospy.init_node("illustration_node")
    print("before node init", rospy.get_time())
    time.sleep(1)
    print("after node init", rospy.get_time())
    return 0

if __name__ == '__main__':
    sys.exit(main())

输出结果为:

('before node init', 0.0)
('after node init', 22238.047)
#!/usr/bin/env python3 import rospy import actionlib from std_msgs.msg import Int32 from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal def do_navigation_goal(): client = actionlib.SimpleActionClient('move_base', MoveBaseAction) rospy.loginfo("等待 move_base 服务启动...") client.wait_for_server() # 创建目标点 goal = MoveBaseGoal() goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() # 设置目标坐标和方向(示例为 x=2.0, y=3.0) goal.target_pose.pose.position.x = 2.0 goal.target_pose.pose.position.y = 5.0 goal.target_pose.pose.orientation.w = 1.0 # 朝向正前方 rospy.loginfo("正在发送导航目标...") client.send_goal(goal) # 等待结果(最多等待60秒) finished_before_timeout = client.wait_for_result(rospy.Duration(60)) if finished_before_timeout: state = client.get_state() if state == actionlib.GoalStatus.SUCCEEDED: rospy.loginfo("成功到达目标点!") else: rospy.logwarn(f"导航失败,状态码: {state}") else: rospy.logwarn("导航超时,取消目标...") client.cancel_goal() def wait_for_first_angle(): rospy.init_node('angle_navigation_node', anonymous=True) rospy.loginfo("等待第一个 angle 消息...") # 等待第一个 angle 消息 msg = rospy.wait_for_message("/angle", Int32, timeout=None) rospy.loginfo(f"收到 angle: {msg.data} 度,开始执行导航任务...") # 执行导航 do_navigation_goal() if __name__ == '__main__': try: wait_for_first_angle() except rospy.ROSInterruptException: rospy.loginfo("程序被用户中断。") 如何在代码中添加(识别任务主要流程是通过python或c++编写执行脚本或文件,在其中初始化节点并建立一个Subscriber来订阅这个topic的信息,在其回调函数中进行图像识别作,成功识别图像后建立一个Publisher来发送需要进行的作(比如发送下一个目标点之类的)/camera/image_raw)这个功能代码
07-18
改进这个nmpc控制器,上面两个控制器那样接受外界信号,#!/usr/bin/env python3 import rospy import numpy as np from scipy.linalg import solve_continuous_are from uuv_control_interfaces import DPControllerBase class TutorialLQRController(DPControllerBase): def __init__(self): super(TutorialLQRController, self).__init__() # 系统参数配置 self._mass_params = rospy.get_param('~mass_params', [100.0]*6) # 质量/惯性参数 Q_diag = rospy.get_param('~Q_diag', [1.0]*12) # 状态权重 [位置x6, 速度x6] R_diag = rospy.get_param('~R_diag', [0.1]*6) # 控制权重 # 构建系统矩阵 self.A, self.B = self._build_system_matrix() # 构建权重矩阵 self.Q = np.diag(Q_diag) self.R = np.diag(R_diag) # 计算LQR增益 self.K = self._compute_lqr_gain() print('LQR Gain Matrix:\n', self.K) self._is_init = True def _build_system_matrix(self): """构建双积分器模型的状态空间矩阵""" n = 6 # 自由度数目 A = np.zeros((2*n, 2*n)) B = np.zeros((2*n, n)) for i in range(n): # 位置与速度的关系 A[i, n+i] = 1.0 # 加速度与控制力的关系 (1/m) B[n+i, i] = 1.0 / self._mass_params[i] return A, B def _compute_lqr_gain(self): """求解连续时间代数Riccati方程""" P = solve_continuous_are(self.A, self.B, self.Q, self.R) return np.linalg.inv(self.R) @ self.B.T @ P def update_controller(self): if not self._is_init or not self.odom_is_init: return False # 构建状态向量 [位置误差, 速度误差] state = np.concatenate([self.error_pose_euler, self._errors['vel']]) # 计算LQR控制力 tau = -np.dot(self.K, state) self.publish_control_wrench(tau) return True if __name__ == '__main__': rospy.init_node('tutorial_lqr_controller') try: node = TutorialLQRController() rospy.spin() except rospy.ROSInterruptException: print('Controller node terminated')
05-14
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import cv2 import torch import rospy import numpy as np from ultralytics import YOLO from time import time from std_msgs.msg import Header from sensor_msgs.msg import Image from yolov8_ros_msgs.msg import BoundingBox, BoundingBoxes class Yolo_Dect: def __init__(self): # load parameters weight_path = rospy.get_param('~weight_path', '') image_topic = rospy.get_param( '~image_topic', '/camClassera/color/image_raw') pub_topic = rospy.get_param('~pub_topic', '/yolov8/BoundingBoxes') self.camera_frame = rospy.get_param('~camera_frame', '') conf = rospy.get_param('~conf', '0.5') self.visualize = rospy.get_param('~visualize', 'True') # which device will be used if (rospy.get_param('/use_cpu', 'false')): self.device = 'cpu' else: self.device = 'cuda' self.model = YOLO(weight_path) self.model.fuse() self.model.conf = conf self.color_image = Image() self.getImageStatus = False # Load class color self.classes_colors = {} # image subscribe self.color_sub = rospy.Subscriber(image_topic, Image, self.image_callback, queue_size=1, buff_size=52428800) # output publishers self.position_pub = rospy.Publisher( pub_topic, BoundingBoxes, queue_size=1) self.image_pub = rospy.Publisher( '/yolov8/detection_image', Image, queue_size=1) # if no image messages while (not self.getImageStatus): rospy.loginfo("waiting for image.") rospy.sleep(2) def image_callback(self, image): self.boundingBoxes = BoundingBoxes() self.boundingBoxes.header = image.header self.boundingBoxes.image_header = image.header self.getImageStatus = True self.color_image = np.frombuffer(image.data, dtype=np.uint8).reshape( image.height, image.width, -1) self.color_image = cv2.cvtColor(self.color_image, cv2.COLOR_BGR2RGB) results = self.model(self.color_image, show=False, conf=0.3) self.dectshow(results, image.height, image.width) cv2.waitKey(3) def dectshow(self, results, height, width): self.frame = results[0].plot() print(str(results[0].speed['inference'])) fps = 1000.0/ results[0].speed['inference'] cv2.putText(self.frame, f'FPS: {int(fps)}', (20,50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2, cv2.LINE_AA) for result in results[0].boxes: boundingBox = BoundingBox() boundingBox.xmin = np.int64(result.xyxy[0][0].item()) boundingBox.ymin = np.int64(result.xyxy[0][1].item()) boundingBox.xmax = np.int64(result.xyxy[0][2].item()) boundingBox.ymax = np.int64(result.xyxy[0][3].item()) boundingBox.Class = results[0].names[result.cls.item()] boundingBox.probability = result.conf.item() self.boundingBoxes.bounding_boxes.append(boundingBox) self.position_pub.publish(self.boundingBoxes) self.publish_image(self.frame, height, width) if self.visualize : cv2.imshow('YOLOv8', self.frame) def publish_image(self, imgdata, height, width): image_temp = Image() header = Header(stamp=rospy.Time.now()) header.frame_id = self.camera_frame image_temp.height = height image_temp.width = width image_temp.encoding = 'bgr8' image_temp.data = np.array(imgdata).tobytes() image_temp.header = header image_temp.step = width * 3 self.image_pub.publish(image_temp) def main(): rospy.init_node('yolov8_ros', anonymous=True) yolo_dect = Yolo_Dect() rospy.spin() if __name__ == "__main__": main()该代码检测出物块的坐标信息了吗
最新发布
08-11
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值