ROS出错:打开action模式下的内标定程序 GAZEBO_MODE

本文介绍了一个常见的Gazebo仿真环境中遇到的问题,即环境变量GAZEBO_MODE未设置导致的错误,并提供了详细的解决方案,包括如何通过修改.bashrc文件来正确设置此环境变量。

 

打开action模式下的内标定程序,发生错误    GAZEBO_MODE

Invalid <arg> tag: environment variable 'GAZEBO_MODE' is not set. 

Arg xml is <arg default="$(env GAZEBO_MODE)" doc="gazebo_mode type [true, false]" name="use_gazebo"/>
The traceback for the exception was written to the log file

 

 

解决:

cd ~

vim .bashrc

//在最后一行增加下面语句
export GAZEBO_MODE=false

 

cjh@ASUS:~/ur5e_ws$ roslaunch ur_robot_driver ur10_bringup.launch robot_ip:=192.168.56.101 ... logging to /home/cjh/.ros/log/c741816e-d1ac-11f0-972e-a7fb13f9bfc6/roslaunch-ASUS-39643.log Checking log directory for disk usage. This may take a while. Press Ctrl-C to interrupt Done checking log file disk usage. Usage is <1GB. started roslaunch server http://ASUS:36277/ SUMMARY ======== PARAMETERS * /controller_stopper/consistent_controllers: ['joint_state_con... * /force_torque_sensor_controller/publish_rate: 125 * /force_torque_sensor_controller/type: force_torque_sens... * /forward_cartesian_traj_controller/joints: ['shoulder_pan_jo... * /forward_cartesian_traj_controller/type: pass_through_cont... * /forward_joint_traj_controller/joints: ['shoulder_pan_jo... * /forward_joint_traj_controller/type: pass_through_cont... * /hardware_control_loop/loop_hz: 125 * /joint_based_cartesian_traj_controller/base: base * /joint_based_cartesian_traj_controller/joints: ['shoulder_pan_jo... * /joint_based_cartesian_traj_controller/tip: tool0 * /joint_based_cartesian_traj_controller/type: position_controll... * /joint_group_vel_controller/joints: ['shoulder_pan_jo... * /joint_group_vel_controller/type: velocity_controll... * /joint_state_controller/publish_rate: 125 * /joint_state_controller/type: joint_state_contr... * /pos_joint_traj_controller/action_monitor_rate: 20 * /pos_joint_traj_controller/constraints/elbow_joint/goal: 0.1 * /pos_joint_traj_controller/constraints/elbow_joint/trajectory: 0.2 * /pos_joint_traj_controller/constraints/goal_time: 0.6 * /pos_joint_traj_controller/constraints/shoulder_lift_joint/goal: 0.1 * /pos_joint_traj_controller/constraints/shoulder_lift_joint/trajectory: 0.2 * /pos_joint_traj_controller/constraints/shoulder_pan_joint/goal: 0.1 * /pos_joint_traj_controller/constraints/shoulder_pan_joint/trajectory: 0.2 * /pos_joint_traj_controller/constraints/stopped_velocity_tolerance: 0.05 * /pos_joint_traj_controller/constraints/wrist_1_joint/goal: 0.1 * /pos_joint_traj_controller/constraints/wrist_1_joint/trajectory: 0.2 * /pos_joint_traj_controller/constraints/wrist_2_joint/goal: 0.1 * /pos_joint_traj_controller/constraints/wrist_2_joint/trajectory: 0.2 * /pos_joint_traj_controller/constraints/wrist_3_joint/goal: 0.1 * /pos_joint_traj_controller/constraints/wrist_3_joint/trajectory: 0.2 * /pos_joint_traj_controller/joints: ['shoulder_pan_jo... * /pos_joint_traj_controller/state_publish_rate: 125 * /pos_joint_traj_controller/stop_trajectory_duration: 0.5 * /pos_joint_traj_controller/type: position_controll... * /pose_based_cartesian_traj_controller/base: base * /pose_based_cartesian_traj_controller/joints: ['shoulder_pan_jo... * /pose_based_cartesian_traj_controller/tip: tool0_controller * /pose_based_cartesian_traj_controller/type: pose_controllers/... * /robot_description: <?xml version="1.... * /robot_status_controller/handle_name: industrial_robot_... * /robot_status_controller/publish_rate: 10 * /robot_status_controller/type: industrial_robot_... * /rosdistro: noetic * /rosversion: 1.17.4 * /scaled_pos_joint_traj_controller/action_monitor_rate: 20 * /scaled_pos_joint_traj_controller/constraints/elbow_joint/goal: 0.1 * /scaled_pos_joint_traj_controller/constraints/elbow_joint/trajectory: 0.2 * /scaled_pos_joint_traj_controller/constraints/goal_time: 0.6 * /scaled_pos_joint_traj_controller/constraints/shoulder_lift_joint/goal: 0.1 * /scaled_pos_joint_traj_controller/constraints/shoulder_lift_joint/trajectory: 0.2 * /scaled_pos_joint_traj_controller/constraints/shoulder_pan_joint/goal: 0.1 * /scaled_pos_joint_traj_controller/constraints/shoulder_pan_joint/trajectory: 0.2 * /scaled_pos_joint_traj_controller/constraints/stopped_velocity_tolerance: 0.05 * /scaled_pos_joint_traj_controller/constraints/wrist_1_joint/goal: 0.1 * /scaled_pos_joint_traj_controller/constraints/wrist_1_joint/trajectory: 0.2 * /scaled_pos_joint_traj_controller/constraints/wrist_2_joint/goal: 0.1 * /scaled_pos_joint_traj_controller/constraints/wrist_2_joint/trajectory: 0.2 * /scaled_pos_joint_traj_controller/constraints/wrist_3_joint/goal: 0.1 * /scaled_pos_joint_traj_controller/constraints/wrist_3_joint/trajectory: 0.2 * /scaled_pos_joint_traj_controller/joints: ['shoulder_pan_jo... * /scaled_pos_joint_traj_controller/state_publish_rate: 125 * /scaled_pos_joint_traj_controller/stop_trajectory_duration: 0.5 * /scaled_pos_joint_traj_controller/type: position_controll... * /scaled_vel_joint_traj_controller/action_monitor_rate: 20 * /scaled_vel_joint_traj_controller/constraints/elbow_joint/goal: 0.1 * /scaled_vel_joint_traj_controller/constraints/elbow_joint/trajectory: 0.1 * /scaled_vel_joint_traj_controller/constraints/goal_time: 0.6 * /scaled_vel_joint_traj_controller/constraints/shoulder_lift_joint/goal: 0.1 * /scaled_vel_joint_traj_controller/constraints/shoulder_lift_joint/trajectory: 0.1 * /scaled_vel_joint_traj_controller/constraints/shoulder_pan_joint/goal: 0.1 * /scaled_vel_joint_traj_controller/constraints/shoulder_pan_joint/trajectory: 0.1 * /scaled_vel_joint_traj_controller/constraints/stopped_velocity_tolerance: 0.05 * /scaled_vel_joint_traj_controller/constraints/wrist_1_joint/goal: 0.1 * /scaled_vel_joint_traj_controller/constraints/wrist_1_joint/trajectory: 0.1 * /scaled_vel_joint_traj_controller/constraints/wrist_2_joint/goal: 0.1 * /scaled_vel_joint_traj_controller/constraints/wrist_2_joint/trajectory: 0.1 * /scaled_vel_joint_traj_controller/constraints/wrist_3_joint/goal: 0.1 * /scaled_vel_joint_traj_controller/constraints/wrist_3_joint/trajectory: 0.1 * /scaled_vel_joint_traj_controller/gains/elbow_joint/d: 0.1 * /scaled_vel_joint_traj_controller/gains/elbow_joint/i: 0.05 * /scaled_vel_joint_traj_controller/gains/elbow_joint/i_clamp: 1 * /scaled_vel_joint_traj_controller/gains/elbow_joint/p: 5.0 * /scaled_vel_joint_traj_controller/gains/shoulder_lift_joint/d: 0.1 * /scaled_vel_joint_traj_controller/gains/shoulder_lift_joint/i: 0.05 * /scaled_vel_joint_traj_controller/gains/shoulder_lift_joint/i_clamp: 1 * /scaled_vel_joint_traj_controller/gains/shoulder_lift_joint/p: 5.0 * /scaled_vel_joint_traj_controller/gains/shoulder_pan_joint/d: 0.1 * /scaled_vel_joint_traj_controller/gains/shoulder_pan_joint/i: 0.05 * /scaled_vel_joint_traj_controller/gains/shoulder_pan_joint/i_clamp: 1 * /scaled_vel_joint_traj_controller/gains/shoulder_pan_joint/p: 5.0 * /scaled_vel_joint_traj_controller/gains/wrist_1_joint/d: 0.1 * /scaled_vel_joint_traj_controller/gains/wrist_1_joint/i: 0.05 * /scaled_vel_joint_traj_controller/gains/wrist_1_joint/i_clamp: 1 * /scaled_vel_joint_traj_controller/gains/wrist_1_joint/p: 5.0 * /scaled_vel_joint_traj_controller/gains/wrist_2_joint/d: 0.1 * /scaled_vel_joint_traj_controller/gains/wrist_2_joint/i: 0.05 * /scaled_vel_joint_traj_controller/gains/wrist_2_joint/i_clamp: 1 * /scaled_vel_joint_traj_controller/gains/wrist_2_joint/p: 5.0 * /scaled_vel_joint_traj_controller/gains/wrist_3_joint/d: 0.1 * /scaled_vel_joint_traj_controller/gains/wrist_3_joint/i: 0.05 * /scaled_vel_joint_traj_controller/gains/wrist_3_joint/i_clamp: 1 * /scaled_vel_joint_traj_controller/gains/wrist_3_joint/p: 5.0 * /scaled_vel_joint_traj_controller/joints: ['shoulder_pan_jo... * /scaled_vel_joint_traj_controller/state_publish_rate: 125 * /scaled_vel_joint_traj_controller/stop_trajectory_duration: 0.5 * /scaled_vel_joint_traj_controller/type: velocity_controll... * /scaled_vel_joint_traj_controller/velocity_ff/elbow_joint: 1.0 * /scaled_vel_joint_traj_controller/velocity_ff/shoulder_lift_joint: 1.0 * /scaled_vel_joint_traj_controller/velocity_ff/shoulder_pan_joint: 1.0 * /scaled_vel_joint_traj_controller/velocity_ff/wrist_1_joint: 1.0 * /scaled_vel_joint_traj_controller/velocity_ff/wrist_2_joint: 1.0 * /scaled_vel_joint_traj_controller/velocity_ff/wrist_3_joint: 1.0 * /speed_scaling_state_controller/publish_rate: 125 * /speed_scaling_state_controller/type: scaled_controller... * /twist_controller/frame_id: tool0_controller * /twist_controller/joints: ['shoulder_pan_jo... * /twist_controller/publish_rate: 125 * /twist_controller/type: ros_controllers_c... * /ur_hardware_interface/headless_mode: False * /ur_hardware_interface/input_recipe_file: /home/cjh/ur5e_ws... * /ur_hardware_interface/joints: ['shoulder_pan_jo... * /ur_hardware_interface/kinematics/forearm/pitch: 0 * /ur_hardware_interface/kinematics/forearm/roll: 0 * /ur_hardware_interface/kinematics/forearm/x: -0.612 * /ur_hardware_interface/kinematics/forearm/y: 0 * /ur_hardware_interface/kinematics/forearm/yaw: 0 * /ur_hardware_interface/kinematics/forearm/z: 0 * /ur_hardware_interface/kinematics/hash: calib_17227329492... * /ur_hardware_interface/kinematics/shoulder/pitch: 0 * /ur_hardware_interface/kinematics/shoulder/roll: 0 * /ur_hardware_interface/kinematics/shoulder/x: 0 * /ur_hardware_interface/kinematics/shoulder/y: 0 * /ur_hardware_interface/kinematics/shoulder/yaw: 0 * /ur_hardware_interface/kinematics/shoulder/z: 0.1273 * /ur_hardware_interface/kinematics/upper_arm/pitch: 0 * /ur_hardware_interface/kinematics/upper_arm/roll: 1.570796327 * /ur_hardware_interface/kinematics/upper_arm/x: 0 * /ur_hardware_interface/kinematics/upper_arm/y: 0 * /ur_hardware_interface/kinematics/upper_arm/yaw: 0 * /ur_hardware_interface/kinematics/upper_arm/z: 0 * /ur_hardware_interface/kinematics/wrist_1/pitch: 0 * /ur_hardware_interface/kinematics/wrist_1/roll: 0 * /ur_hardware_interface/kinematics/wrist_1/x: -0.5723 * /ur_hardware_interface/kinematics/wrist_1/y: 0 * /ur_hardware_interface/kinematics/wrist_1/yaw: 0 * /ur_hardware_interface/kinematics/wrist_1/z: 0.163941 * /ur_hardware_interface/kinematics/wrist_2/pitch: 0 * /ur_hardware_interface/kinematics/wrist_2/roll: 1.570796327 * /ur_hardware_interface/kinematics/wrist_2/x: 0 * /ur_hardware_interface/kinematics/wrist_2/y: -0.1157 * /ur_hardware_interface/kinematics/wrist_2/yaw: 0 * /ur_hardware_interface/kinematics/wrist_2/z: -2.37304666792238... * /ur_hardware_interface/kinematics/wrist_3/pitch: 3.141592653589793 * /ur_hardware_interface/kinematics/wrist_3/roll: 1.570796326589793 * /ur_hardware_interface/kinematics/wrist_3/x: 0 * /ur_hardware_interface/kinematics/wrist_3/y: 0.0922 * /ur_hardware_interface/kinematics/wrist_3/yaw: 3.141592653589793 * /ur_hardware_interface/kinematics/wrist_3/z: -1.89105361091135... * /ur_hardware_interface/output_recipe_file: /home/cjh/ur5e_ws... * /ur_hardware_interface/reverse_ip: * /ur_hardware_interface/reverse_port: 50001 * /ur_hardware_interface/robot_ip: 192.168.56.101 * /ur_hardware_interface/robot_receive_timeout: 0.02 * /ur_hardware_interface/script_command_port: 50004 * /ur_hardware_interface/script_file: /opt/ros/noetic/s... * /ur_hardware_interface/script_sender_port: 50002 * /ur_hardware_interface/servoj_gain: 2000 * /ur_hardware_interface/servoj_lookahead_time: 0.03 * /ur_hardware_interface/tf_prefix: * /ur_hardware_interface/tool_baud_rate: 115200 * /ur_hardware_interface/tool_parity: 0 * /ur_hardware_interface/tool_rx_idle_chars: 1.5 * /ur_hardware_interface/tool_stop_bits: 1 * /ur_hardware_interface/tool_tx_idle_chars: 3.5 * /ur_hardware_interface/tool_voltage: 0 * /ur_hardware_interface/trajectory_port: 50003 * /ur_hardware_interface/use_spline_interpolation: True * /ur_hardware_interface/use_tool_communication: False * /vel_joint_traj_controller/action_monitor_rate: 20 * /vel_joint_traj_controller/constraints/elbow_joint/goal: 0.1 * /vel_joint_traj_controller/constraints/elbow_joint/trajectory: 0.1 * /vel_joint_traj_controller/constraints/goal_time: 0.6 * /vel_joint_traj_controller/constraints/shoulder_lift_joint/goal: 0.1 * /vel_joint_traj_controller/constraints/shoulder_lift_joint/trajectory: 0.1 * /vel_joint_traj_controller/constraints/shoulder_pan_joint/goal: 0.1 * /vel_joint_traj_controller/constraints/shoulder_pan_joint/trajectory: 0.1 * /vel_joint_traj_controller/constraints/stopped_velocity_tolerance: 0.05 * /vel_joint_traj_controller/constraints/wrist_1_joint/goal: 0.1 * /vel_joint_traj_controller/constraints/wrist_1_joint/trajectory: 0.1 * /vel_joint_traj_controller/constraints/wrist_2_joint/goal: 0.1 * /vel_joint_traj_controller/constraints/wrist_2_joint/trajectory: 0.1 * /vel_joint_traj_controller/constraints/wrist_3_joint/goal: 0.1 * /vel_joint_traj_controller/constraints/wrist_3_joint/trajectory: 0.1 * /vel_joint_traj_controller/gains/elbow_joint/d: 0.1 * /vel_joint_traj_controller/gains/elbow_joint/i: 0.05 * /vel_joint_traj_controller/gains/elbow_joint/i_clamp: 1 * /vel_joint_traj_controller/gains/elbow_joint/p: 5.0 * /vel_joint_traj_controller/gains/shoulder_lift_joint/d: 0.1 * /vel_joint_traj_controller/gains/shoulder_lift_joint/i: 0.05 * /vel_joint_traj_controller/gains/shoulder_lift_joint/i_clamp: 1 * /vel_joint_traj_controller/gains/shoulder_lift_joint/p: 5.0 * /vel_joint_traj_controller/gains/shoulder_pan_joint/d: 0.1 * /vel_joint_traj_controller/gains/shoulder_pan_joint/i: 0.05 * /vel_joint_traj_controller/gains/shoulder_pan_joint/i_clamp: 1 * /vel_joint_traj_controller/gains/shoulder_pan_joint/p: 5.0 * /vel_joint_traj_controller/gains/wrist_1_joint/d: 0.1 * /vel_joint_traj_controller/gains/wrist_1_joint/i: 0.05 * /vel_joint_traj_controller/gains/wrist_1_joint/i_clamp: 1 * /vel_joint_traj_controller/gains/wrist_1_joint/p: 5.0 * /vel_joint_traj_controller/gains/wrist_2_joint/d: 0.1 * /vel_joint_traj_controller/gains/wrist_2_joint/i: 0.05 * /vel_joint_traj_controller/gains/wrist_2_joint/i_clamp: 1 * /vel_joint_traj_controller/gains/wrist_2_joint/p: 5.0 * /vel_joint_traj_controller/gains/wrist_3_joint/d: 0.1 * /vel_joint_traj_controller/gains/wrist_3_joint/i: 0.05 * /vel_joint_traj_controller/gains/wrist_3_joint/i_clamp: 1 * /vel_joint_traj_controller/gains/wrist_3_joint/p: 5.0 * /vel_joint_traj_controller/joints: ['shoulder_pan_jo... * /vel_joint_traj_controller/state_publish_rate: 125 * /vel_joint_traj_controller/stop_trajectory_duration: 0.5 * /vel_joint_traj_controller/type: velocity_controll... * /vel_joint_traj_controller/velocity_ff/elbow_joint: 1.0 * /vel_joint_traj_controller/velocity_ff/shoulder_lift_joint: 1.0 * /vel_joint_traj_controller/velocity_ff/shoulder_pan_joint: 1.0 * /vel_joint_traj_controller/velocity_ff/wrist_1_joint: 1.0 * /vel_joint_traj_controller/velocity_ff/wrist_2_joint: 1.0 * /vel_joint_traj_controller/velocity_ff/wrist_3_joint: 1.0 NODES / controller_stopper (ur_robot_driver/controller_stopper_node) robot_state_publisher (robot_state_publisher/robot_state_publisher) ros_control_controller_spawner (controller_manager/spawner) ros_control_stopped_spawner (controller_manager/spawner) ur_hardware_interface (ur_robot_driver/ur_robot_driver_node) /ur_hardware_interface/ ur_robot_state_helper (ur_robot_driver/robot_state_helper) auto-starting new master process[master]: started with pid [39676] ROS_MASTER_URI=http://localhost:11311 setting /run_id to c741816e-d1ac-11f0-972e-a7fb13f9bfc6 process[rosout-1]: started with pid [39697] started core service [/rosout] process[robot_state_publisher-2]: started with pid [39700] process[ur_hardware_interface-3]: started with pid [39701] process[ros_control_controller_spawner-4]: started with pid [39705] process[ros_control_stopped_spawner-5]: started with pid [39707] process[controller_stopper-6]: started with pid [39708] process[ur_hardware_interface/ur_robot_state_helper-7]: started with pid [39709] [INFO] [1764920046.778635809]: Waiting for controller manager service to come up on /controller_manager/switch_controller [INFO] [1764920046.781284140]: waitForService: Service [/controller_manager/switch_controller] has not been advertised, waiting... [INFO] [1764920046.785299298]: waitForService: Service [/ur_hardware_interface/dashboard/play] has not been advertised, waiting... [INFO] [1764920046.823334251]: Initializing urdriver [WARN] [1764920046.825634581]: Your system/user seems not to be setup for FIFO scheduling. We recommend using a lowlatency kernel with FIFO scheduling. See https://docs.universal-robots.com/Universal_Robots_ROS_Documentation/doc/ur_client_library/doc/real_time.html for details. [INFO] [1764920046.847294748]: Negotiated RTDE protocol version to 2. [INFO] [1764920046.848984727]: Setting up RTDE communication with frequency 125.000000 INFO: cannot create a symlink to latest log directory: [Errno 2] No such file or directory: '/home/cjh/.ros/log/latest' [INFO] [1764920047.158009]: Controller Spawner: Waiting for service controller_manager/load_controller [INFO] [1764920047.158084]: Controller Spawner: Waiting for service controller_manager/load_controller [INFO] [1764920047.888104678]: Starting primary client pipeline [INFO] [1764920047.888343400]: Checking if calibration data matches connected robot. [INFO] [1764920048.888495202]: Calibration checked successfully. [INFO] [1764920048.888534652]: Initializing dashboard client [INFO] [1764920048.889529021]: Connected: Universal Robots Dashboard Server [WARN] [1764920048.899956764]: Your system/user seems not to be setup for FIFO scheduling. We recommend using a lowlatency kernel with FIFO scheduling. See https://docs.universal-robots.com/Universal_Robots_ROS_Documentation/doc/ur_client_library/doc/real_time.html for details. [INFO] [1764920048.900069962]: Loaded ur_robot_driver hardware_interface [INFO] [1764920048.910638874]: waitForService: Service [/ur_hardware_interface/dashboard/play] is now available. [INFO] [1764920048.930356146]: waitForService: Service [/controller_manager/switch_controller] is now available. [INFO] [1764920048.930374725]: Service available. [INFO] [1764920048.930384931]: Waiting for controller list service to come up on /controller_manager/list_controllers [INFO] [1764920048.930891012]: Service available. [DEBUG] [1764920048.949826235]: Invalid robot package type recieved: 5 [INFO] [1764920048.966082]: Controller Spawner: Waiting for service controller_manager/switch_controller [INFO] [1764920048.966234]: Controller Spawner: Waiting for service controller_manager/switch_controller [INFO] [1764920048.967955]: Controller Spawner: Waiting for service controller_manager/unload_controller [INFO] [1764920048.968168]: Controller Spawner: Waiting for service controller_manager/unload_controller [INFO] [1764920048.969711]: Loading controller: joint_state_controller [INFO] [1764920048.969893]: Loading controller: pos_joint_traj_controller [INFO] [1764920048.974830]: Loading controller: scaled_pos_joint_traj_controller [INFO] [1764920048.998878]: Loading controller: joint_group_vel_controller [INFO] [1764920049.014910]: Loading controller: speed_scaling_state_controller [INFO] [1764920049.022789]: Controller Spawner: Loaded controllers: pos_joint_traj_controller, joint_group_vel_controller [INFO] [1764920049.030777]: Loading controller: force_torque_sensor_controller [INFO] [1764920049.062786]: Controller Spawner: Loaded controllers: joint_state_controller, scaled_pos_joint_traj_controller, speed_scaling_state_controller, force_torque_sensor_controller [INFO] [1764920049.070739]: Started controllers: joint_state_controller, scaled_pos_joint_traj_controller, speed_scaling_state_controller, force_torque_sensor_controller [INFO] [1764920049.157379882]: Robot mode is now RUNNING [INFO] [1764920049.157612152]: Robot's safety mode is now NORMAL [INFO] [1764920070.448252993]: Robot requested program [INFO] [1764920070.449193846]: Sent program to robot [INFO] [1764920070.867426635]: Robot connected to reverse interface. Ready to receive control commands. [INFO] [1764920524.946960694]: Connection to reverse interface dropped. [ERROR] [1764920524.982626390]: Sending data through socket failed. [INFO] [1764920524.984212103]: Robot connected to reverse interface. Ready to receive control commands. [INFO] [1764920525.041894751]: Connection to reverse interface dropped. [ERROR] [1764920525.049856852]: Sending data through socket failed. [INFO] [1764920525.049895864]: Robot connected to reverse interface. Ready to receive control commands. [DEBUG] [1764920525.112915369]: Invalid robot package type recieved: 25 [INFO] [1764921139.409715057]: Connection to reverse interface dropped. [ERROR] [1764921139.417127854]: Sending data through socket failed. [INFO] [1764921139.417650014]: Robot connected to reverse interface. Ready to receive control commands. [INFO] [1764921384.895780001]: Connection to reverse interface dropped. [ERROR] [1764921384.897034888]: Sending data through socket failed. [INFO] [1764921384.900863281]: Robot connected to reverse interface. Ready to receive control commands. [DEBUG] [1764921384.980181088]: Invalid robot package type recieved: 25 [INFO] [1764921498.100054229]: Connection to reverse interface dropped. [ERROR] [1764921498.102120385]: Sending data through socket failed. [INFO] [1764921498.105283427]: Robot connected to reverse interface. Ready to receive control commands. [INFO] [1764921498.745088814]: Connection to reverse interface dropped. [INFO] [1764921498.745205409]: Robot connected to reverse interface. Ready to receive control commands.
12-06
#!/usr/bin/env python # coding: utf-8 import rclpy from rclpy.node import Node import os import time import cv2 import cv2 as cv import numpy as np import threading import queue from time import sleep from std_msgs.msg import Float32, Bool from ultralytics import YOLO from dofbot_pro_interface.msg import * import socket from dofbot_pro_yolov11.fps import FPS encoding = ['16UC1', '32FC1'] class Yolov11DetectNode(Node): def __init__(self): super().__init__('detect_node') # 初始化变量 self.pr_time = 0 # 用于计算FPS的上次时间 # 订阅图像话题 self.image_sub = self.create_subscription(ImageMsg, "/image_data", self.image_sub_callback, qos_profile=1) self.img = np.zeros((480, 640, 3), dtype=np.uint8) # 初始图像(黑色) self.init_joints = [90.0, 120.0, 0.0, 0.0, 90.0, 90.0] # 机械臂初始关节角度 self.init_joints_180 = [180.0, 120.0, 0.0, 0.0, 90.0, 90.0] # 机械臂初始关节角度(夹取用) # 创建发布器 self.pubPoint = self.create_publisher(ArmJoint, "TargetAngle", 10) # 发布目标角度 self.pubDetect = self.create_publisher(Yolov11Detect, "Yolov11DetectInfo", 10) # 发布检测信息 self.pub_SortFlag = self.create_publisher(Bool, 'sort_flag', 10) # 发布开始分类标志 self.pub_SortFlag_new = self.create_publisher(Bool, 'sort_flag_new', 10) # 抓取发布器 # 订阅抓取完成状态 self.grasp_status_sub = self.create_subscription(Bool, 'grasp_done', self.GraspStatusCallback, qos_profile=1) self.start_flag = False # 控制检测启动的标志 self.awaiting_arm_movement = False # 等待机械臂移动到初始位置的标志 # 加载YOLO模型(使用TensorRT引擎加速) self.yolo_model = YOLO("/home/jetson/dofbot_pro_ws/src/dofbot_pro_yolov11/dofbot_pro_yolov11/best.engine", task='detect') self.fps = FPS() # FPS计算器 self.selection_mode = "leftmost" # 选择模式: "leftmost", "confidence", "specific_class" self.target_class = "one" # 特定类别的目标 # TCP客户端设置 self.tcp_client = None self.tcp_connected = False self.server_address = ('192.168.1.114', 8888) # 服务器地址 self.tcp_thread = None self.tcp_lock = threading.Lock() # 线程锁,保护TCP操作 self.message_queue = queue.Queue() # 消息队列 self.reconnect_interval = 5 # 重连间隔(秒) # 抓取状态跟踪 self.current_grasp_type = None # 当前抓取类型: "original" 或 "new" self.grasp_in_progress = False # 抓取进行中标志 # 启动TCP客户端线程和消息发送线程 self.start_tcp_client() self.start_message_sender() def start_tcp_client(self): """启动TCP客户端线程""" self.tcp_thread = threading.Thread(target=self.tcp_client_thread) self.tcp_thread.daemon = True self.tcp_thread.start() self.get_logger().info("TCP客户端线程已启动") def start_message_sender(self): """启动消息发送线程""" self.message_thread = threading.Thread(target=self.message_sender_thread) self.message_thread.daemon = True self.message_thread.start() self.get_logger().info("消息发送线程已启动") def tcp_client_thread(self): """TCP客户端线程函数""" while rclpy.ok(): try: # 创建TCP套接字 self.tcp_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.tcp_client.settimeout(5.0) # 设置连接超时 self.get_logger().info(f"正在连接到服务器 {self.server_address}...") self.tcp_client.connect(self.server_address) with self.tcp_lock: self.tcp_connected = True self.get_logger().info("成功连接到服务器") # 设置接收超时 self.tcp_client.settimeout(0.5) # 发送连接成功消息 self.message_queue.put("检测节点已连接到服务器") # 接收服务器消息 while self.tcp_connected and rclpy.ok(): try: data = self.tcp_client.recv(1024).decode('utf-8').strip() if not data: continue # 非阻塞接收,无数据则继续 self.get_logger().info(f"收到服务器消息: {data}") # 处理不同的控制命令 if data == "1": self.get_logger().info("通过TCP触发舵机转至夹取角度") self.current_grasp_type = "reset" # 发布机械臂复位到初始位置 self.pub_arm(self.init_joints_180, id=6, angle=90.0, runtime=1500) # 发送确认消息到队列 self.message_queue.put("舵机复位指令已执行,正在复位到初始位置...") # 等待复位完成 time.sleep(2) self.message_queue.put("舵机复位完成,已回到初始位置") elif data == "2": # 原流程 self.get_logger().info("通过TCP触发原抓取流程") self.current_grasp_type = "original" self.grasp_in_progress = True start_flag = Bool() start_flag.data = True self.pub_SortFlag.publish(start_flag) self.start_flag = True # 设置检测标志 # 发送确认消息到队列 self.message_queue.put("机械臂已就位,开始检测和抓取...") elif data == "3": # 抓取 self.get_logger().info("通过TCP触发新抓取流程") self.current_grasp_type = "new" self.grasp_in_progress = True start_flag_new = Bool() start_flag_new.data = True self.pub_SortFlag_new.publish(start_flag_new) # 发布抓取标志 self.start_flag = True # 设置检测标志 # 发送确认消息到队列 self.message_queue.put("机械臂已就位,开始检测和抓取...") elif data == "4": self.get_logger().info("通过TCP触发舵机复位") self.current_grasp_type = "reset" # 发布机械臂复位到初始位置 self.pub_arm(self.init_joints, id=6, angle=90.0, runtime=1500) # 发送确认消息到队列 self.message_queue.put("舵机复位指令已执行,正在复位到初始位置...") # 等待复位完成 time.sleep(2) self.message_queue.put("舵机复位完成,已回到初始位置") elif data == "status": # 获取状态 status_msg = f"状态: 检测节点运行中\n抓取状态: {self.grasp_in_progress}\n当前抓取类型: {self.current_grasp_type}\n等待机械臂移动: {self.awaiting_arm_movement}" self.message_queue.put(status_msg) elif data == "quit": # 断开连接 self.get_logger().info("服务器请求断开连接") self.message_queue.put("断开连接") break else: self.message_queue.put("未知命令") except socket.timeout: continue # 超时是正常的,继续循环 except ConnectionResetError: self.get_logger().error("服务器连接重置") break except Exception as e: self.get_logger().error(f"处理服务器消息时出错: {str(e)}") break except Exception as e: self.get_logger().error(f"连接服务器失败: {str(e)}") with self.tcp_lock: self.tcp_connected = False # 等待重连 self.get_logger().info(f"{self.reconnect_interval}秒后尝试重连...") time.sleep(self.reconnect_interval) finally: # 清理连接 with self.tcp_lock: if self.tcp_client: try: self.tcp_client.close() except: pass self.tcp_client = None self.tcp_connected = False self.grasp_in_progress = False self.current_grasp_type = None self.awaiting_arm_movement = False self.continuous_grasp = False # 确保连续抓取模式关闭 self.get_logger().info("与服务器的连接已关闭") def message_sender_thread(self): """消息发送线程函数""" while rclpy.ok(): try: # 从队列获取消息 message = self.message_queue.get(timeout=0.5) # 发送消息到服务器 with self.tcp_lock: if self.tcp_connected and self.tcp_client: try: self.tcp_client.send(f"{message}\n".encode('utf-8')) self.get_logger().info(f"已发送消息: {message}") except Exception as e: self.get_logger().error(f"发送消息失败: {str(e)}") self.tcp_connected = False except queue.Empty: continue # 队列为空是正常的,继续循环 except Exception as e: self.get_logger().error(f"消息发送线程错误: {str(e)}") def select_leftmost_box(self, boxes, target_class=None): """选择最左边的检测框""" if boxes is None or len(boxes) == 0: return None leftmost_box = None min_x = float('inf') for i, box in enumerate(boxes): # 获取类别信息 class_id = int(box.cls) class_name = self.yolo_model.names[class_id] # 如果指定了目标类别,只考虑该类别 if target_class is not None and class_name != target_class: continue # 获取x坐标 x_min = int(box.xyxy[0][0]) # 更新最左边的检测框 if x_min < min_x: min_x = x_min leftmost_box = { 'box': box, 'index': i, 'x_min': x_min, 'class_name': class_name, 'class_id': class_id } return leftmost_box def image_sub_callback(self, data): # 将ROS图像消息转换为numpy数组 image = np.ndarray(shape=(data.height, data.width, data.channels), dtype=np.uint8, buffer=data.data) # BGR转RGB(因为OpenCV使用BGR,但输入可能是RGB) self.img[:, :, 0], self.img[:, :, 1], self.img[:, :, 2] = image[:, :, 2], image[:, :, 1], image[:, :, 0] # 使用YOLO进行目标检测 results = self.yolo_model(self.img, save=False, verbose=False) # 绘制检测结果 annotated_frame = results[0].plot( labels=True, conf=False, boxes=True, ) boxes = results[0].boxes # 获取检测框 # 如果有检测框且start_flag为True,处理检测结果 # 注意:等待机械臂移动到初始位置后,再开始检测 if boxes != [None] and self.start_flag == True and not self.awaiting_arm_movement: detected_count = len(boxes) print(f"\n" + "=" * 60) print(f"开始选择检测框 (模式: {self.selection_mode})") print(f"检测到 {detected_count} 个物体") print("=" * 60) # 发送检测到的物体数量给服务器 self.message_queue.put(f"检测到 {detected_count} 个物体") # 根据选择模式选择检测框 if self.selection_mode == "leftmost": selected = self.select_leftmost_box(boxes) reason = "最左边" elif self.selection_mode == "specific_class": selected = self.select_leftmost_box(boxes, target_class=self.target_class) reason = f"最左边的 {self.target_class}" else: if boxes and len(boxes) > 0: selected = { 'box': boxes[0], 'index': 0, 'class_name': self.yolo_model.names[int(boxes[0].cls)], 'class_id': int(boxes[0].cls) } reason = "第一个" else: selected = None # 处理选中的检测框 if selected is not None: box = selected['box'] x_min, y_min, x_max, y_max = map(int, box.xyxy[0]) confidence = float(box.conf) # 计算中心点 center_x = (x_min + x_max) // 2 center_y = (y_min + y_max) // 2 print(f"\n✅ 选择结果:") print(f" 理由: {reason}") print(f" 索引: {selected['index']}") print(f" 类别: {selected['class_name']}") print(f" 置信度: {confidence:.3f}") print(f" 边界框: [{x_min}, {y_min}, {x_max}, {y_max}]") print(f" 中心点: ({center_x}, {center_y})") # 创建检测信息消息 center = Yolov11Detect() center.centerx = float(center_x) center.centery = float(center_y) center.result = str(selected['class_name']) # 发布检测信息 self.pubDetect.publish(center) self.start_flag = False # 重置标志 # 通知服务器已检测到物体 detect_msg = f"检测到物体: {selected['class_name']}, 中心点: ({center_x}, {center_y})" self.message_queue.put(detect_msg) # 发送选择信息 self.message_queue.put(f"选择了第 {selected['index'] + 1} 个物体 (类别: {selected['class_name']})") print(f"已发布 {selected['class_name']} 的抓取信息") else: print(f"\n❌ 未找到符合条件的检测框") self.start_flag = False # 重置标志 self.grasp_in_progress = False # 通知服务器未找到物体 self.message_queue.put("未找到符合条件的物体") print("=" * 60 + "\n") elif self.awaiting_arm_movement: # 如果正在等待机械臂移动,重置标志位 self.awaiting_arm_movement = False print("机械臂已移动到初始抓取位置,开始检测...") elif boxes != [None] and self.start_flag == False: # 即使没有开始抓取,也显示检测到的物体数量 detected_count = len(boxes) if detected_count > 0: # 获取各类别的数量统计 class_counts = {} for box in boxes: class_id = int(box.cls) class_name = self.yolo_model.names[class_id] class_counts[class_name] = class_counts.get(class_name, 0) + 1 # 构建类别统计信息 count_info = f"当前视野中检测到 {detected_count} 个物体" for class_name, count in class_counts.items(): count_info += f"\n {class_name}: {count}个" # 在图像上显示数量信息 cv2.putText(annotated_frame, f"Objects: {detected_count}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) # 计算并显示FPS cur_time = time.time() fps = str(int(1 / (cur_time - self.pr_time))) if (cur_time - self.pr_time) > 0 else "0" self.pr_time = cur_time cv2.putText(annotated_frame, fps, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # 显示图像 cv2.imshow("frame", annotated_frame) cv2.waitKey(1) def pub_arm(self, joints, id=6, angle=180.0, runtime=1500): """发布机械臂目标角度""" arm_joint = ArmJoint() arm_joint.id = id arm_joint.angle = angle arm_joint.run_time = runtime arm_joint.joints = joints self.pubPoint.publish(arm_joint) def GraspStatusCallback(self, msg): """抓取完成回调函数""" if msg.data == True: self.start_flag = True # 设置标志,准备下一次检测 self.grasp_in_progress = False # 抓取完成 print("抓取完成,准备下一次检测") # 通知服务器抓取完成 grasp_type = self.current_grasp_type if self.current_grasp_type else "未知" complete_msg = "ok" self.message_queue.put(complete_msg) # 重置抓取类型 self.current_grasp_type = None def destroy_node(self): """清理资源""" # 关闭所有窗口 cv2.destroyAllWindows() # 关闭TCP连接 with self.tcp_lock: if self.tcp_client: try: self.tcp_client.close() except: pass # 等待线程结束 if self.tcp_thread and self.tcp_thread.is_alive(): self.tcp_thread.join(timeout=2) if self.message_thread and self.message_thread.is_alive(): self.message_thread.join(timeout=2) super().destroy_node() def main(args=None): rclpy.init(args=args) yolov11_detect = Yolov11DetectNode() # 初始发布到复位位置,而不是抓取位置 yolov11_detect.pub_arm(yolov11_detect.init_joints) try: rclpy.spin(yolov11_detect) except KeyboardInterrupt: pass finally: yolov11_detect.destroy_node() rclpy.shutdown() if __name__ == '__main__': main() #!/usr/bin/env python # -*- coding: utf-8 -*- import rclpy import cv2 from rclpy.node import Node import numpy as np from std_msgs.msg import Float32, Bool, Int8 from cv_bridge import CvBridge from sensor_msgs.msg import Image import cv2 as cv import time import math from message_filters import ApproximateTimeSynchronizer from dofbot_pro_interface.msg import * from dofbot_pro_interface.srv import * import transforms3d as tfs import tf_transformations as tf # ROS2使用tf_transformations import threading from ament_index_python import get_package_share_directory import yaml import os from Arm_Lib import Arm_Device # 加载偏移量配置文件 pkg_path = get_package_share_directory('dofbot_pro_driver') offset_file = os.path.join(pkg_path, 'config', 'offset_value.yaml') with open(offset_file, 'r') as file: offset_config = yaml.safe_load(file) encoding = ['16UC1', '32FC1'] class Yolov11GraspNode(Node): def __init__(self): super().__init__('yolov11_grap') # 初始化计数器和变量 self.SF_Counting = [False, False] # 记录'SF'类别的库存 self.YunDa_Counting = [False, False] # 记录'YunDa'类别的库存 self.EMS_Counting = [False, False] #记录'EMS'类别的库存 self.ZTO_Counting = [False, False] #记录'ZTO'类别的库存 self.cx = 0 # 检测目标中心x坐标 self.cy = 0 # 检测目标中心y坐标 self.Arm = Arm_Device() # 机械臂控制对象 # 订阅和发布器 self.sub_joint5 = self.create_subscription(Float32, "adjust_joint5", self.get_joint5Callback, qos_profile=1) self.pubPoint = self.create_publisher(ArmJoint, "TargetAngle", qos_profile=10) self.pubGraspStatus = self.create_publisher(Bool, "grasp_done", qos_profile=10) self.pub_playID = self.create_publisher(Int8, "player_id", qos_profile=10) # 订阅检测信息、深度图像和开始标志 self.subDetect = self.create_subscription(Yolov11Detect, "Yolov11DetectInfo", self.getDetectInfoCallback, qos_profile=10) self.depth_image_sub = self.create_subscription(Image, '/camera/depth/image_raw', self.getDepthCallback, qos_profile=1) self.sub_SortFlag = self.create_subscription(Bool, 'sort_flag', self.getSortFlagCallback, qos_profile=10) self.sub_SortFlag_new = self.create_subscription(Bool, 'sort_flag_new', self.getSortFlagNewCallback, qos_profile=10) # 运动学服务客户端 self.client = self.create_client(Kinemarics, "dofbot_kinemarics") # 初始位置和偏移量 self.color_x = 0.0 self.color_y = 0.0 self.color_z = 0.15 self.grasp_flag = True self.init_joints = [90.0, 120.0, 0.0, 0.0, 90.0, 90.0] # 初始关节角度(抓货) self.init_joints_180 = [180.0, 120.0, 0.0, 0.0, 90.0, 90.0] #夹取完成后的位置 self.init_joints_pla = [90.0, 120.0, 0.0, 0.0, 90.0, 90.0] #初始关节角度(放货) self.down_joint = [130.0, 55.0, 34.0, 16.0, 90.0, 125.0] # 下降位置 self.set_joint = [90.0, 120.0, 0.0, 0.0, 90.0, 90.0] # 设置位置 self.gripper_joint = 90.0 # 夹爪角度 self.depth_bridge = CvBridge() # 深度图像转换桥 self.start_sort = False # 原流程标志 self.start_sort_new = False # 新流程标志 # 当前末端位姿 self.CurEndPos = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # 相机内参矩阵(K矩阵) self.camera_info_K = [477.57421875, 0.0, 319.3820495605469, 0.0, 477.55718994140625, 238.64108276367188, 0.0, 0.0, 1.0] # 末端到相机的变换矩阵(手眼标定结果) self.EndToCamMat = np.array([ [1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [0.00000000e+00, 7.96326711e-04, 9.99999683e-01, -9.90000000e-02], [0.00000000e+00, -9.99999683e-01, 7.96326711e-04, 4.90000000e-02], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00] ]) # 获取当前末端位姿 self.get_current_end_pos() self.name = None # 检测到的物体名称 # 从配置文件读取偏移量 self.x_offset = offset_config.get('x_offset') self.y_offset = offset_config.get('y_offset') self.z_offset = offset_config.get('z_offset') # 垃圾分类配置 self.play_id = Int8() self.First_shelf = ['SF','YunDa','ZTO','EMS'] # 货架顶层 # self.Second_shelf = ['YunDa'] # 湿垃圾 print("Current_End_Pose: ", self.CurEndPos) print("Init Done") def getDetectInfoCallback(self, msg): """接收检测信息回调""" self.cx = int(msg.centerx) self.cy = int(msg.centery) self.name = msg.result # 物体类别名称 def getDepthCallback(self, msg): """接收深度图像回调""" # 将深度图像消息转换为OpenCV格式 depth_image = self.depth_bridge.imgmsg_to_cv2(msg, encoding[1]) frame = cv.resize(depth_image, (640, 480)) depth_image_info = frame.astype(np.float32) # 如果有检测目标,获取其深度值 if self.cy != 0 and self.cx != 0: self.dist = depth_image_info[self.cy, self.cx] / 1000 # 转换为米 # 如果深度有效且有物体名称,并且需要开始分类 if self.dist != 0 and self.name != None: if self.start_sort == True: # 执行原流程 self.execute_original_grasp() self.start_sort = False elif self.start_sort_new == True: # 执行新流程 self.execute_new_grasp() self.start_sort_new = False def execute_original_grasp(self): """执行原抓取流程""" # 将像素坐标转换为相机坐标系下的3D坐标 camera_location = self.pixel_to_camera_depth((self.cx, self.cy), self.dist) # 计算目标在机械臂基坐标系下的位姿 PoseEndMat = np.matmul(self.EndToCamMat, self.xyz_euler_to_mat(camera_location, (0, 0, 0))) EndPointMat = self.get_end_point_mat() WorldPose = np.matmul(EndPointMat, PoseEndMat) # 提取位置和姿态 pose_T, pose_R = self.mat_to_xyz_euler(WorldPose) # 应用偏移量校准 pose_T[0] = pose_T[0] + self.x_offset pose_T[1] = pose_T[1] + self.y_offset pose_T[2] = pose_T[2] + self.z_offset # 启动抓取线程 threading.Thread(target=self.grasp_grip, args=(pose_T,)).start() print("Original grasp triggered for:", self.name) def execute_new_grasp(self): """执行新抓取流程(按键盘2触发)""" # 将像素坐标转换为相机坐标系下的3D坐标 camera_location = self.pixel_to_camera_depth((self.cx, self.cy), self.dist) # 计算目标在机械臂基坐标系下的位姿 PoseEndMat = np.matmul(self.EndToCamMat, self.xyz_euler_to_mat(camera_location, (0, 0, 0))) EndPointMat = self.get_end_point_mat() WorldPose = np.matmul(EndPointMat, PoseEndMat) # 提取位置和姿态 pose_T, pose_R = self.mat_to_xyz_euler(WorldPose) # 应用偏移量校准 pose_T[0] = pose_T[0] + self.x_offset pose_T[1] = pose_T[1] + self.y_offset pose_T[2] = pose_T[2] + self.z_offset # 启动抓取线程 threading.Thread(target=self.grasp_place, args=(pose_T,)).start() print("Original grasp triggered for:", self.name) def getSortFlagCallback(self, msg): """接收原流程开始标志回调""" if msg.data == True: self.start_sort = True print("Original flow triggered") def getSortFlagNewCallback(self, msg): """接收新流程开始标志回调""" if msg.data == True: self.start_sort_new = True print("New flow triggered") def get_current_end_pos(self): """通过正向运动学获取当前末端位姿""" if not self.client.wait_for_service(timeout_sec=5.0): self.get_logger().error("Service 'dofbot_kinematics' not available!") return request = Kinemarics.Request() # 设置当前关节角度 request.cur_joint1 = self.init_joints[0] request.cur_joint2 = self.init_joints[1] request.cur_joint3 = self.init_joints[2] request.cur_joint4 = self.init_joints[3] request.cur_joint5 = self.init_joints[4] request.kin_name = "fk" # 正向运动学 future = self.client.call_async(request) rclpy.spin_until_future_complete(self, future, timeout_sec=10.0) response = future.result() if isinstance(response, Kinemarics.Response): self.CurEndPos[0] = response.x self.CurEndPos[1] = response.y self.CurEndPos[2] = response.z self.CurEndPos[3] = response.roll self.CurEndPos[4] = response.pitch self.CurEndPos[5] = response.yaw def get_joint5Callback(self, msg): """接收关节5调整值回调""" self.gripper_joint = msg.data def grasp_grip(self, pose_T): #夹货的grasp函数 """执行抓取操作""" print("------------------------------------------------") print("pose_T: ", pose_T) # 创建逆向运动学请求 request = Kinemarics.Request() request.tar_x = pose_T[0] request.tar_y = pose_T[1] # Z轴补偿(根据目标距离调整) request.tar_z = pose_T[2] + (math.sqrt(request.tar_y ** 2 + request.tar_x ** 2) - 0.181) * 0.2 request.kin_name = "ik" # 逆向运动学 request.roll = self.CurEndPos[3] try: future = self.client.call_async(request) rclpy.spin_until_future_complete(self, future, timeout_sec=5.0) response = future.result() # 设置关节角度 joints = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] joints[0] = response.joint1 joints[1] = response.joint2 joints[2] = response.joint3 # 限制关节4角度 if response.joint4 > 90: joints[3] = 90 else: joints[3] = response.joint4 joints[4] = 90 # 关节5固定 joints[5] = 30 # 夹爪角度 # 发布目标角度 self.pubTargetArm(joints) time.sleep(3.5) self.move_grip() # 执行移动操作 except Exception as e: print(f"Grasp failed: {str(e)}") def move_grip(self): #夹货的move函数 """执行移动和放置操作""" # 关闭夹爪 self.Arm.Arm_serial_servo_write(5, self.gripper_joint, 2000) time.sleep(2.5) # 抬起机械臂 self.Arm.Arm_serial_servo_write(6, 140, 2000) time.sleep(2.5) self.Arm.Arm_serial_servo_write(2, 120, 2000) time.sleep(1.5) print("name", self.name) # 根据物体类别选择放置位置 if self.name in self.First_shelf: print("This is First Shelf.") if self.name == "SF": if not self.SF_Counting[0]: self.play_id.data = 2 # 播放ID self.pub_playID.publish(self.play_id) self.set_joint = [180, 20, 90, 3, 50.0, 140] # 第一个放置位置 self.SF_Counting[0] = True elif not self.SF_Counting[1]: self.play_id.data = 2 self.pub_playID.publish(self.play_id) self.set_joint = [140, 20, 90, 3, 50.0, 140] # 第二个放置位置 self.SF_Counting[1] = True elif self.name in self.First_shelf: print("This is First_shelf.") if self.name == "ZTO": if not self.ZTO_Counting[0]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [180, 38, 60, 2, 90.0, 140] self.ZTO_Counting[0] = True elif not self.ZTO_Counting[1]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [140, 20, 90, 3, 50.0, 140] self.ZTO_Counting[1] = True elif self.name in self.First_shelf: print("This is First_shelf.") if self.name == "YunDa": if not self.YunDa_Counting[0]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [180, 38, 60, 2, 90.0, 140] self.YunDa_Counting[0] = True elif not self.YunDa_Counting[1]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [140, 20, 90, 3, 50.0, 140] self.YunDa_Counting[1] = True elif self.name in self.First_shelf: print("This is First_shelf.") if self.name == "EMS": if not self.EMS_Counting[0]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [180, 38, 60, 2, 90.0, 140] self.EMS_Counting[0] = True elif not self.EMS_Counting[1]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [140, 20, 90, 3, 50.0, 140] self.EMS_Counting[1] = True # 移动到放置位置 self.Arm.Arm_serial_servo_write6_array(self.set_joint, 2000) time.sleep(2.5) # 打开夹爪 self.Arm.Arm_serial_servo_write(6, 90, 2000) time.sleep(2.5) # 返回初始位置 self.Arm.Arm_serial_servo_write(2, 90, 2000) time.sleep(2.5) self.Arm.Arm_serial_servo_write6_array(self.init_joints_180, 2000) print("Grasp done!") time.sleep(5.0) # 发布抓取完成信号 self.grasp_flag = True grasp_done = Bool() grasp_done.data = True self.pubGraspStatus.publish(grasp_done) # 重置变量 self.name = None self.cx = 0 self.cy = 0 def grasp_place(self, pose_T): #放货的grasp函数 """执行抓取操作""" print("------------------------------------------------") print("pose_T: ", pose_T) # 创建逆向运动学请求 request = Kinemarics.Request() request.tar_x = pose_T[0] request.tar_y = pose_T[1] # Z轴补偿(根据目标距离调整) request.tar_z = pose_T[2] + (math.sqrt(request.tar_y ** 2 + request.tar_x ** 2) - 0.181) * 0.2 request.kin_name = "ik" # 逆向运动学 request.roll = self.CurEndPos[3] try: future = self.client.call_async(request) rclpy.spin_until_future_complete(self, future, timeout_sec=5.0) response = future.result() # 设置关节角度 joints = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] joints[0] = response.joint1 joints[1] = response.joint2 joints[2] = response.joint3 # 限制关节4角度 if response.joint4 > 90: joints[3] = 90 else: joints[3] = response.joint4 joints[4] = 90 # 关节5固定 joints[5] = 30 # 夹爪角度 # 发布目标角度 self.pubTargetArm(joints) time.sleep(3.5) self.move_place() # 执行移动操作 except Exception as e: print(f"Grasp failed: {str(e)}") def move_place(self): #放货的move函数 """执行移动和放置操作""" # 关闭夹爪 self.Arm.Arm_serial_servo_write(5, self.gripper_joint, 2000) time.sleep(2.5) # 抬起机械臂 self.Arm.Arm_serial_servo_write(6, 140, 2000) time.sleep(2.5) self.Arm.Arm_serial_servo_write(2, 120, 2000) time.sleep(1.5) print("name", self.name) # 根据物体类别选择放置位置 if self.name in self.First_shelf: print("This is First Shelf.") if self.name == "SF": if self.SF_Counting[0]: self.play_id.data = 2 # 播放ID self.pub_playID.publish(self.play_id) self.set_joint = [180, 20, 90, 3, 50.0, 140] # 第一个放置位置 self.SF_Counting[0] = False elif self.SF_Counting[1]: self.play_id.data = 2 self.pub_playID.publish(self.play_id) self.set_joint = [140, 20, 90, 3, 50.0, 140] # 第二个放置位置 self.SF_Counting[1] = False elif self.name in self.First_shelf: print("This is First_shelf.") if self.name == "ZTO": if self.ZTO_Counting[0]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [180, 38, 60, 2, 90.0, 140] self.ZTO_Counting[0] = False elif self.ZTO_Counting[1]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [140, 20, 90, 3, 50.0, 140] self.ZTO_Counting[1] = False elif self.name in self.First_shelf: print("This is First_shelf.") if self.name == "YunDa": if self.YunDa_Counting[0]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [180, 38, 60, 2, 90.0, 140] self.YunDa_Counting[0] = False elif not self.YunDa_Counting[1]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [140, 20, 90, 3, 50.0, 140] self.YunDa_Counting[1] = False elif self.name in self.First_shelf: print("This is First_shelf.") if self.name == "EMS": if self.EMS_Counting[0]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [180, 38, 60, 2, 90.0, 140] self.EMS_Counting[0] = False elif not self.EMS_Counting[1]: self.play_id.data = 3 self.pub_playID.publish(self.play_id) self.set_joint = [140, 20, 90, 3, 50.0, 140] self.EMS_Counting[1] = False # 移动到放置位置 self.Arm.Arm_serial_servo_write6_array(self.set_joint, 2000) time.sleep(2.5) # 打开夹爪 self.Arm.Arm_serial_servo_write(6, 90, 2000) time.sleep(2.5) # 返回初始位置 self.Arm.Arm_serial_servo_write(2, 90, 2000) time.sleep(2.5) self.Arm.Arm_serial_servo_write6_array(self.init_joints_pla, 2000) print("Grasp done!") time.sleep(5.0) # 发布抓取完成信号 self.grasp_flag = True grasp_done = Bool() grasp_done.data = True self.pubGraspStatus.publish(grasp_done) # 重置变量 self.name = None self.cx = 0 self.cy = 0 def get_end_point_mat(self): """获取当前末端位姿的变换矩阵""" print("Get the current pose is ", self.CurEndPos) # 将欧拉角转换为四元数 end_w, end_x, end_y, end_z = self.euler_to_quaternion(self.CurEndPos[3], self.CurEndPos[4], self.CurEndPos[5]) # 创建变换矩阵 endpoint_mat = self.xyz_quat_to_mat([self.CurEndPos[0], self.CurEndPos[1], self.CurEndPos[2]], [end_w, end_x, end_y, end_z]) print("endpoint_mat: ", endpoint_mat) return endpoint_mat def pixel_to_camera_depth(self, pixel_coords, depth): """将像素坐标转换为相机坐标系下的3D坐标""" fx, fy, cx, cy = self.camera_info_K[0], self.camera_info_K[4], self.camera_info_K[2], self.camera_info_K[5] px, py = pixel_coords x = (px - cx) * depth / fx y = (py - cy) * depth / fy z = depth return np.array([x, y, z]) def xyz_euler_to_mat(self, xyz, euler, degrees=False): """通过平移向量和欧拉角创建变换矩阵""" if degrees: mat = tfs.euler.euler2mat(math.radians(euler[0]), math.radians(euler[1]), math.radians(euler[2])) else: mat = tfs.euler.euler2mat(euler[0], euler[1], euler[2]) mat = tfs.affines.compose(np.squeeze(np.asarray(xyz)), mat, [1, 1, 1]) return mat def euler_to_quaternion(self, roll, pitch, yaw): """欧拉角转四元数""" quaternion = tf.quaternion_from_euler(roll, pitch, yaw) qw = quaternion[3] qx = quaternion[0] qy = quaternion[1] qz = quaternion[2] return np.array([qw, qx, qy, qz]) def xyz_quat_to_mat(self, xyz, quat): """通过平移向量和四元数创建变换矩阵""" mat = tfs.quaternions.quat2mat(np.asarray(quat)) mat = tfs.affines.compose(np.squeeze(np.asarray(xyz)), mat, [1, 1, 1]) return mat def mat_to_xyz_euler(self, mat, degrees=False): """从变换矩阵提取平移向量和欧拉角""" t, r, _, _ = tfs.affines.decompose(mat) if degrees: euler = np.degrees(tfs.euler.mat2euler(r)) else: euler = tfs.euler.mat2euler(r) return t, euler def pubTargetArm(self, joints, id=6, angle=180.0, runtime=2000): """发布目标关节角度到机械臂""" print(joints) self.Arm.Arm_serial_servo_write6(joints[0], joints[1], joints[2], joints[3], joints[4], joints[5], 2000) def pubArm(self, joints, id=1, angle=90.0, run_time=2000): """发布关节角度消息""" armjoint = ArmJoint() armjoint.run_time = run_time if len(joints) != 0: armjoint.joints = joints else: armjoint.id = id armjoint.angle = angle self.pubPoint.publish(armjoint) def main(args=None): rclpy.init(args=args) yolov11_grasp = Yolov11GraspNode() yolov11_grasp.pubArm(yolov11_grasp.init_joints) # 发布初始位置 try: rclpy.spin(yolov11_grasp) except KeyboardInterrupt: pass finally: yolov11_grasp.destroy_node() rclpy.shutdown() if __name__ == '__main__': main() 让我在发送指令1和指令4并旋转之后重新获取当前为位置
最新发布
12-20
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值