JSON utility class for AX 2012

JSON utility class for AX 2012

One possibility is to send JSON messages to Microsoft Dynamics AX which is a lightweight data-interchange format. First rule is the table field that will store the received JSON message should be of type ‘String’ and fixed character length of 1999 which we can add it as a new field in ‘RetailTransactionTable’. In this scenario we will receive a JSON message with payment information for particular Sales order. So let’s go step by step:

Step 1

The first st

根据上述要求修改下列代码: #! /usr/bin/env python3 import cv2 as cv import numpy as np import torch import message_filters import rospy import sys import time from sensor_msgs.msg import Image from cv_bridge import CvBridge from rospkg import RosPack # get abs path from os import path # get home path from gazebo_msgs.msg import ModelStates from geometry_msgs.msg import * from pyquaternion import Quaternion as PyQuaternion # 新增全局变量 last_process_time = 0 processing_fps = 5 # 设置实时处理帧率(5-10Hz为宜) # Global variables path_yolo = path.join(path.expanduser('~'), 'yolov5') path_vision = RosPack().get_path('vision') path_weigths = path.join(path_vision, 'weigths') cam_point = (-0.44, -0.5, 1.58) height_tavolo = 0.74 dist_tavolo = None origin = None model = None model_orientation = None legoClasses = ['X1-Y1-Z2', 'X1-Y2-Z1', 'X1-Y2-Z2', 'X1-Y2-Z2-CHAMFER', 'X1-Y2-Z2-TWINFILLET', 'X1-Y3-Z2', 'X1-Y3-Z2-FILLET', 'X1-Y4-Z1', 'X1-Y4-Z2', 'X2-Y2-Z2', 'X2-Y2-Z2-FILLET'] argv = sys.argv a_show = '-show' in argv # Utility Functions def get_dist_tavolo(depth, hsv, img_draw): global dist_tavolo #color = (120,1,190) #mask = get_lego_mask(color, hsv, (5, 5, 5)) #dist_tavolo = depth[mask].max() #if dist_tavolo > 1: dist_tavolo -= height_tavolo dist_tavolo = np.nanmax(depth) def get_origin(img): global origin origin = np.array(img.shape[1::-1]) // 2 def get_lego_distance(depth): return depth.min() def get_lego_color(center, rgb): return rgb[center].tolist() def get_lego_mask(color, hsv, toll = (20, 20, 255)): thresh = np.array(color) mintoll = thresh - np.array([toll[0], toll[1], min(thresh[2]-1, toll[2])]) maxtoll = thresh + np.array(toll) return cv.inRange(hsv, mintoll, maxtoll) def getDepthAxis(height, lego): X, Y, Z = (int(x) for x in lego[1:8:3]) #Z = (0.038, 0.057) X = (0.031, 0.063) Y = (0.031, 0.063, 0.095, 0.127) rapZ = height / 0.019 - 1 pinZ = round(rapZ) rapXY = height / 0.032 pinXY = round(rapXY) errZ = abs(pinZ - rapZ) + max(pinZ - 2, 0) errXY = abs(pinXY - rapXY) + max(pinXY - 4, 0) if errZ < errXY: return pinZ, 2, pinZ == Z # pin, is ax Z, match else: if pinXY == Y: return pinXY, 1, True else: return pinXY, 0, pinXY == X def point_distorption(point, height, origin): p = dist_tavolo / (dist_tavolo - height) point = point - origin return p * point + origin def point_inverse_distortption(point, height): p = dist_tavolo / (dist_tavolo - height) point = point - origin return point / p + origin def myimshow(title, img): def mouseCB(event,x,y,a,b): print(x, y, img[y, x], "\r",end='',flush=True) print("\033[K", end='') cv.imshow(title, img) cv.setMouseCallback(title, mouseCB) cv.waitKey() # ----------------- LOCALIZATION ----------------- # def process_item(imgs, item): #images rgb, hsv, depth, img_draw = imgs #obtaining Yolo informations (class, coordinates, center) x1, y1, x2, y2, cn, cl, nm = item.values() mar = 15 x1, y1 = max(mar, x1), max(mar, y1) x2, y2 = min(rgb.shape[1]-mar, x2), min(rgb.shape[0]-mar, y2) boxMin = np.array((x1-mar, y1-mar)) x1, y1, x2, y2 = np.int0((x1, y1, x2, y2)) boxCenter = (y2 + y1) // 2, (x2 + x1) // 2 color = get_lego_color(boxCenter, rgb) hsvcolor = get_lego_color(boxCenter, hsv) sliceBox = slice(y1-mar, y2+mar), slice(x1-mar, x2+mar) #crop img with coordinate bounding box; computing all imgs l_rgb = rgb[sliceBox] l_hsv = hsv[sliceBox] if a_show: cv.rectangle(img_draw, (x1,y1),(x2,y2), color, 2) l_depth = depth[sliceBox] l_mask = get_lego_mask(hsvcolor, l_hsv) # filter mask by color l_mask = np.where(l_depth < dist_tavolo, l_mask, 0) l_depth = np.where(l_mask != 0, l_depth, dist_tavolo) #myimshow("asda", hsv) #getting lego height from camera and table l_dist = get_lego_distance(l_depth) l_height = dist_tavolo - l_dist #masking l_top_mask = cv.inRange(l_depth, l_dist-0.002, l_dist+0.002) #cv.bitwise_xor(img_draw,img_draw,img_draw, mask=cv.inRange(depth, l_dist-0.002, l_dist+0.002)) #myimshow("hmask", l_top_mask) # model detect orientation depth_borded = np.zeros(depth.shape, dtype=np.float32) depth_borded[sliceBox] = l_depth depth_image = cv.normalize( depth_borded, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U ) depth_image = cv.cvtColor(depth_image, cv.COLOR_GRAY2RGB).astype(np.uint8) #yolo in order to keep te orientation #print("Model orientation: Start...", end='\r') model_orientation.conf = 0.7 results = model_orientation(depth_image) pandino = [] pandino = results.pandas().xyxy[0].to_dict(orient="records") n = len(pandino) #print("Model orientation: Finish", n) # Adjust prediction pinN, ax, isCorrect = getDepthAxis(l_height, nm) if not isCorrect and ax == 2: if cl in (1,2,3) and pinN in (1, 2): # X1-Y2-Z*, *1,2,2-CHAMFER cl = 1 if pinN == 1 else 2 # -> Z'pinN' elif cl in (7, 8) and pinN in (1, 2): # X1-Y4-Z*, *1,2 cl = 7 if pinN == 1 else 8 # -> Z'pinN' elif pinN == -1: nm = "{} -> {}".format(nm, "Target") else: print("[Warning] Error in classification") elif not isCorrect: ax = 1 if cl in (0, 2, 5, 8) and pinN <= 4: # X1-Y*-Z2, *1,2,3,4 cl = (2, 5, 8)[pinN-2] # -> Y'pinN' elif cl in (1, 7) and pinN in (2, 4): # X1-Y*-Z1, *2,4 cl = 1 if pinN == 2 else 7 # -> Y'pinN' elif cl == 9 and pinN == 1: # X2-Y2-Z2 cl = 2 # -> X1 ax = 0 else: print("[Warning] Error in classification") nm = legoClasses[cl] if n != 1: print("[Warning] Classification not found") or_cn, or_cl, or_nm = ['?']*3 or_nm = ('lato', 'lato', 'sopra/sotto')[ax] else: print() #obtaining Yolo informations (class, coordinates, center) or_item = pandino[0] or_cn, or_cl, or_nm = or_item['confidence'], or_item['class'], or_item['name'] if or_nm == 'sotto': ax = 2 if or_nm == 'lato' and ax == 2: ax = 1 #--- #creating silouette top surface lego contours, hierarchy = cv.findContours(l_top_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) top_center = np.zeros(2) for cnt in contours: tmp_center, top_size, top_angle = cv.minAreaRect(cnt) top_center += np.array(tmp_center) top_center = boxMin + top_center / len(contours) #creating silouette lego contours, hierarchy = cv.findContours(l_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) if len(contours) == 0: return None cnt = contours[0] l_center, l_size, l_angle = cv.minAreaRect(cnt) l_center += boxMin if ax != 2: l_angle = top_angle l_box = cv.boxPoints((l_center, l_size, l_angle)) if l_size[0] <=3 or l_size[1] <=3: cv.drawContours(img_draw, np.int0([l_box]), 0, (0,0,0), 2) return None # filter out artifacts if a_show: cv.drawContours(img_draw, np.int0([l_box]), 0, color, 2) # silouette distorption # get vertexs distance from origin top_box = l_box.copy() vertexs_norm = [(i, np.linalg.norm(vec - origin)) for vec, i in zip(l_box, range(4))] vertexs_norm.sort(key=lambda tup: tup[1]) # get closest vertex iver = vertexs_norm[0][0] vec = l_box[iver] # distorping closest vertex if or_nm == 'sopra': l_height -= 0.019 top_box[iver] = point_distorption(l_box[iver], l_height, origin) v0 = top_box[iver] - vec # adapt adiacents veretx v1 = l_box[iver - 3] - vec # i - 3 = i+1 % 4 v2 = l_box[iver - 1] - vec top_box[iver - 3] += np.dot(v0, v2) / np.dot(v2, v2) * v2 top_box[iver - 1] += np.dot(v0, v1) / np.dot(v1, v1) * v1 l_center = (top_box[0] + top_box[2]) / 2 if a_show: cv.drawContours(img_draw, np.int0([top_box]), 0, (5,5,5), 2) cv.circle(img_draw, np.int0(top_box[iver]),1, (0,0,255),1,cv.LINE_AA) #rotation and axis drawing if or_nm in ('sopra', 'sotto', 'sopra/sotto', '?'): # fin x, y directions (dirX, dirY) dirZ = np.array((0,0,1)) if or_nm == 'sotto': dirZ = np.array((0,0,-1)) projdir = l_center - top_center if np.linalg.norm(projdir) < l_size[0] / 10: dirY = top_box[0] - top_box[1] dirX = top_box[0] - top_box[-1] if np.linalg.norm(dirY) < np.linalg.norm(dirX): dirX, dirY = dirY, dirX projdir = dirY * np.dot(dirY, projdir) edgeFar = [ver for ver in top_box if np.dot(ver - l_center, projdir) >= 0][:2] dirY = (edgeFar[0] + edgeFar[1]) / 2 - l_center dirY /= np.linalg.norm(dirY) dirY = np.array((*dirY, 0)) dirX = np.cross(dirZ, dirY) elif or_nm == "lato": # find pin direction (dirZ) edgePin = [ver for ver in top_box if np.dot(ver - l_center, l_center - top_center) >= 0][:2] dirZ = (edgePin[0] + edgePin[1]) / 2 - l_center dirZ /= np.linalg.norm(dirZ) dirZ = np.array((*dirZ, 0)) if cl == 10: if top_size[1] > top_size[0]: top_size = top_size[::-1] if top_size[0] / top_size[1] < 1.7: ax = 0 if ax == 0: vx,vy,x,y = cv.fitLine(cnt, cv.DIST_L2,0,0.01,0.01) dir = np.array((vx, vy)) vertexs_distance = [abs(np.dot(ver - l_center, dir)) for ver in edgePin] iverFar = np.array(vertexs_distance).argmin() dirY = edgePin[iverFar] - edgePin[iverFar-1] dirY /= np.linalg.norm(dirY) dirY = np.array((*dirY, 0)) dirX = np.cross(dirZ, dirY) if a_show: cv.circle(img_draw, np.int0(edgePin[iverFar]), 5, (70,10,50), 1) #cv.line(img_draw, np.int0(l_center), np.int0(l_center+np.array([int(vx*100),int(vy*100)])),(0,0,255), 3) if ax == 1: dirY = np.array((0,0,1)) dirX = np.cross(dirZ, dirY) if a_show: cv.line(img_draw, *np.int0(edgePin), (255,255,0), 2) l_center = point_inverse_distortption(l_center, l_height) # post rotation extra theta = 0 if cl == 1 and ax == 1: theta = 1.715224 - np.pi / 2 if cl == 3 and or_nm == 'sotto': theta = 2.359515 - np.pi if cl == 4 and ax == 1: theta = 2.145295 - np.pi if cl == 6 and or_nm == 'sotto': theta = 2.645291 - np.pi if cl == 10 and or_nm == 'sotto': theta = 2.496793 - np.pi rotX = PyQuaternion(axis=dirX, angle=theta) dirY = rotX.rotate(dirY) dirZ = rotX.rotate(dirZ) if a_show: # draw frame lenFrame = 50 unit_z = 0.031 unit_x = 22 * 0.8039 / dist_tavolo x_to_z = lenFrame * unit_z/unit_x center = np.int0(l_center) origin_from_top = origin - l_center endX = point_distorption(lenFrame * dirX[:2], x_to_z * dirX[2], origin_from_top) frameX = (center, center + np.int0(endX)) endY = point_distorption(lenFrame * dirY[:2], x_to_z * dirY[2], origin_from_top) frameY = (center, center + np.int0(endY)) endZ = point_distorption(lenFrame * dirZ[:2], x_to_z * dirZ[2], origin_from_top) frameZ = (center, center + np.int0(endZ)) cv.line(img_draw, *frameX, (0,0,255), 2) cv.line(img_draw, *frameY, (0,255,0), 2) cv.line(img_draw, *frameZ, (255,0,0), 2) # --- # draw text if or_cl != '?': or_cn = ['SIDE', 'UP', 'DOWN'][or_cl] text = "{} {:.2f} {}".format(nm, cn, or_cn) (text_width, text_height) = cv.getTextSize(text, cv.FONT_HERSHEY_DUPLEX, 0.4, 1)[0] text_offset_x = boxCenter[1] - text_width // 2 text_offset_y = y1 - text_height box_coords = ((text_offset_x - 1, text_offset_y + 1), (text_offset_x + text_width + 1, text_offset_y - text_height - 1)) cv.rectangle(img_draw, box_coords[0], box_coords[1], (210,210,10), cv.FILLED) cv.putText(img_draw, text, (text_offset_x, text_offset_y), cv.FONT_HERSHEY_DUPLEX, 0.4, (255, 255, 255), 1) def getAngle(vec, ax): vec = np.array(vec) if not vec.any(): return 0 vec = vec / np.linalg.norm(vec) wise = 1 if vec[-1] >= 0 else -1 dotclamp = max(-1, min(1, np.dot(vec, np.array(ax)))) return wise * np.arccos(dotclamp) msg = ModelStates() msg.name = nm #fov = 1.047198 #rap = np.tan(fov) #print("rap: ", rap) xyz = np.array((l_center[0], l_center[1], l_height / 2 + height_tavolo)) xyz[:2] /= rgb.shape[1], rgb.shape[0] xyz[:2] -= 0.5 xyz[:2] *= (-0.968, 0.691) xyz[:2] *= dist_tavolo / 0.84 xyz[:2] += cam_point[:2] rdirX, rdirY, rdirZ = dirX, dirY, dirZ rdirX[0] *= -1 rdirY[0] *= -1 rdirZ[0] *= -1 qz1 = PyQuaternion(axis=(0,0,1), angle=-getAngle(dirZ[:2], (1,0))) rdirZ = qz1.rotate(dirZ) qy2 = PyQuaternion(axis=(0,1,0), angle=-getAngle((rdirZ[2],rdirZ[0]), (1,0))) rdirX = qy2.rotate(qz1.rotate(rdirX)) qz3 = PyQuaternion(axis=(0,0,1), angle=-getAngle(rdirX[:2], (1,0))) rot = qz3 * qy2 * qz1 rot = rot.inverse msg.pose = Pose(Point(*xyz), Quaternion(x=rot.x,y=rot.y,z=rot.z,w=rot.w)) #pub.publish(msg) #print(msg) return msg #image processing def process_image(rgb, depth): img_draw = rgb.copy() hsv = cv.cvtColor(rgb, cv.COLOR_BGR2HSV) get_dist_tavolo(depth, hsv, img_draw) get_origin(rgb) #results collecting localization #print("Model localization: Start...",end='\r') model.conf = 0.6 results = model(rgb) pandino = results.pandas().xyxy[0].to_dict(orient="records") #print("Model localization: Finish ") # ---- if depth is not None: imgs = (rgb, hsv, depth, img_draw) results = [process_item(imgs, item) for item in pandino] # ---- msg = ModelStates() for point in results: if point is not None: msg.name.append(point.name) msg.pose.append(point.pose) pub.publish(msg) if a_show: cv.imshow("vision-results.png", img_draw) cv.waitKey() pass def process_CB(image_rgb, image_depth): global last_time t_start = time.time() if t_start - last_time < 0.2: return last_time = t_start rgb = CvBridge().imgmsg_to_cv2(image_rgb, "bgr8") depth = CvBridge().imgmsg_to_cv2(image_depth, "32FC1") process_image(rgb, depth) print("Time:", time.time() - t_start) # 打印处理时间,用于性能监控 # t_start = time.time() # #from standard message image to opencv image # rgb = CvBridge().imgmsg_to_cv2(image_rgb, "bgr8") # depth = CvBridge().imgmsg_to_cv2(image_depth, "32FC1") # process_image(rgb, depth) # print("Time:", time.time() - t_start) # rospy.signal_shutdown(0) # pass #init node function def start_node(): global pub print("Starting Node Vision 1.0") rospy.init_node('vision') print("Subscribing to camera images") #topics subscription rgb = message_filters.Subscriber("/camera/color/image_raw", Image,queue_size=1) depth = message_filters.Subscriber("/camera/depth/image_raw", Image,queue_size=1) #publisher results pub=rospy.Publisher("lego_detections", ModelStates, queue_size=1) print("Localization is starting.. ") print("(Waiting for images..)", end='\r'), print(end='\033[K') #images synchronization syncro = message_filters.TimeSynchronizer([rgb, depth], 1, reset=True) syncro.registerCallback(process_CB) #keep node always alive rospy.spin() pass def load_models(): global model, model_orientation #yolo model and weights classification print("Loading model best.pt") weight = path.join(path_weigths, 'best.pt') model = torch.hub.load(path_yolo,'custom',path=weight, source='local') #yolo model and weights orientation print("Loading model orientation.pt") weight = path.join(path_weigths, 'depth.pt') model_orientation = torch.hub.load(path_yolo,'custom',path=weight, source='local') pass if __name__ == '__main__': load_models() try: start_node() except rospy.ROSInterruptException: pass
10-23
现有一通过摄像头结合yolo获取图像进行识别定位再通过机械臂运动控制程序进行臂迹规划进行抓取的项目,但是该项目中有一个现象是当机械臂开始运动时摄像头输出画面会自动锁死图像不再实时获取图像,以下是运动控制代码#!/usr/bin/python3 import os import math import copy import json import actionlib import control_msgs.msg from controller import ArmController from gazebo_msgs.msg import ModelStates import rospy from pyquaternion import Quaternion as PyQuaternion import numpy as np from gazebo_ros_link_attacher.srv import SetStatic, SetStaticRequest, SetStaticResponse from gazebo_ros_link_attacher.srv import Attach, AttachRequest, AttachResponse PKG_PATH = os.path.dirname(os.path.abspath(__file__)) MODELS_INFO = { "X1-Y2-Z1": { "home": [0.264589, -0.293903, 0.777] }, "X2-Y2-Z2": { "home": [0.277866, -0.724482, 0.777] }, "X1-Y3-Z2": { "home": [0.268053, -0.513924, 0.777] }, "X1-Y2-Z2": { "home": [0.429198, -0.293903, 0.777] }, "X1-Y2-Z2-CHAMFER": { "home": [0.592619, -0.293903, 0.777] }, "X1-Y4-Z2": { "home": [0.108812, -0.716057, 0.777] }, "X1-Y1-Z2": { "home": [0.088808, -0.295820, 0.777] }, "X1-Y2-Z2-TWINFILLET": { "home": [0.103547, -0.501132, 0.777] }, "X1-Y3-Z2-FILLET": { "home": [0.433739, -0.507130, 0.777] }, "X1-Y4-Z1": { "home": [0.589908, -0.501033, 0.777] }, "X2-Y2-Z2-FILLET": { "home": [0.442505, -0.727271, 0.777] } } for model, model_info in MODELS_INFO.items(): pass #MODELS_INFO[model]["home"] = model_info["home"] + np.array([0.0, 0.10, 0.0]) for model, info in MODELS_INFO.items(): model_json_path = os.path.join(PKG_PATH, "..", "models", f"lego_{model}", "model.json") # make path absolute model_json_path = os.path.abspath(model_json_path) # check path exists if not os.path.exists(model_json_path): raise FileNotFoundError(f"Model file {model_json_path} not found") model_json = json.load(open(model_json_path, "r")) corners = np.array(model_json["corners"]) size_x = (np.max(corners[:, 0]) - np.min(corners[:, 0])) size_y = (np.max(corners[:, 1]) - np.min(corners[:, 1])) size_z = (np.max(corners[:, 2]) - np.min(corners[:, 2])) #print(f"{model}: {size_x:.3f} x {size_y:.3f} x {size_z:.3f}") MODELS_INFO[model]["size"] = (size_x, size_y, size_z) # Compensate for the interlocking height INTERLOCKING_OFFSET = 0.019 SAFE_X = -0.40 SAFE_Y = -0.13 SURFACE_Z = 0.774 # Resting orientation of the end effector DEFAULT_QUAT = PyQuaternion(axis=(0, 1, 0), angle=math.pi) # Resting position of the end effector DEFAULT_POS = (-0.1, -0.2, 1.2) DEFAULT_PATH_TOLERANCE = control_msgs.msg.JointTolerance() DEFAULT_PATH_TOLERANCE.name = "path_tolerance" DEFAULT_PATH_TOLERANCE.velocity = 10 def get_gazebo_model_name(model_name, vision_model_pose): """ Get the name of the model inside gazebo. It is needed for link attacher plugin. """ models = rospy.wait_for_message("/gazebo/model_states", ModelStates, timeout=None) epsilon = 0.05 for gazebo_model_name, model_pose in zip(models.name, models.pose): if model_name not in gazebo_model_name: continue # Get everything inside a square of side epsilon centered in vision_model_pose ds = abs(model_pose.position.x - vision_model_pose.position.x) + abs(model_pose.position.y - vision_model_pose.position.y) if ds <= epsilon: return gazebo_model_name raise ValueError(f"Model {model_name} at position {vision_model_pose.position.x} {vision_model_pose.position.y} was not found!") def get_model_name(gazebo_model_name): return gazebo_model_name.replace("lego_", "").split("_", maxsplit=1)[0] def get_legos_pos(vision=False): #get legos position reading vision topic if vision: # 从视觉识别主题获取数据 legos = rospy.wait_for_message("/lego_detections", ModelStates, timeout=None) else: # 从传感器主题获取数据 models = rospy.wait_for_message("/gazebo/model_states", ModelStates, timeout=None) legos = ModelStates() for name, pose in zip(models.name, models.pose): if "X" not in name: continue name = get_model_name(name) legos.name.append(name) legos.pose.append(pose) return [(lego_name, lego_pose) for lego_name, lego_pose in zip(legos.name, legos.pose)] def straighten(model_pose, gazebo_model_name): x = model_pose.position.x y = model_pose.position.y z = model_pose.position.z model_quat = PyQuaternion( x=model_pose.orientation.x, y=model_pose.orientation.y, z=model_pose.orientation.z, w=model_pose.orientation.w) model_size = MODELS_INFO[get_model_name(gazebo_model_name)]["size"] """ Calculate approach quaternion and target quaternion """ facing_direction = get_axis_facing_camera(model_quat) approach_angle = get_approach_angle(model_quat, facing_direction) print(f"Lego is facing {facing_direction}") print(f"Angle of approaching measures {approach_angle:.2f} deg") # Calculate approach quat approach_quat = get_approach_quat(facing_direction, approach_angle) # Get above the object controller.move_to(x, y, target_quat=approach_quat) # Calculate target quat regrip_quat = DEFAULT_QUAT if facing_direction == (1, 0, 0) or facing_direction == (0, 1, 0): # Side target_quat = DEFAULT_QUAT pitch_angle = -math.pi/2 + 0.2 if abs(approach_angle) < math.pi/2: target_quat = target_quat * PyQuaternion(axis=(0, 0, 1), angle=math.pi/2) else: target_quat = target_quat * PyQuaternion(axis=(0, 0, 1), angle=-math.pi/2) target_quat = PyQuaternion(axis=(0, 1, 0), angle=pitch_angle) * target_quat if facing_direction == (0, 1, 0): regrip_quat = PyQuaternion(axis=(0, 0, 1), angle=math.pi/2) * regrip_quat elif facing_direction == (0, 0, -1): """ Pre-positioning """ controller.move_to(z=z, target_quat=approach_quat) close_gripper(gazebo_model_name, model_size[0]) tmp_quat = PyQuaternion(axis=(0, 0, 1), angle=2*math.pi/6) * DEFAULT_QUAT controller.move_to(SAFE_X, SAFE_Y, z+0.05, target_quat=tmp_quat, z_raise=0.1) # Move to safe position controller.move_to(z=z) open_gripper(gazebo_model_name) approach_quat = tmp_quat * PyQuaternion(axis=(1, 0, 0), angle=math.pi/2) target_quat = approach_quat * PyQuaternion(axis=(0, 0, 1), angle=-math.pi) # Add a yaw rotation of 180 deg regrip_quat = tmp_quat * PyQuaternion(axis=(0, 0, 1), angle=math.pi) else: target_quat = DEFAULT_QUAT target_quat = target_quat * PyQuaternion(axis=(0, 0, 1), angle=-math.pi/2) """ Grip the model """ if facing_direction == (0, 0, 1) or facing_direction == (0, 0, -1): closure = model_size[0] z = SURFACE_Z + model_size[2] / 2 elif facing_direction == (1, 0, 0): closure = model_size[1] z = SURFACE_Z + model_size[0] / 2 elif facing_direction == (0, 1, 0): closure = model_size[0] z = SURFACE_Z + model_size[1] / 2 controller.move_to(z=z, target_quat=approach_quat) close_gripper(gazebo_model_name, closure) """ Straighten model if needed """ if facing_direction != (0, 0, 1): z = SURFACE_Z + model_size[2]/2 controller.move_to(z=z+0.05, target_quat=target_quat, z_raise=0.1) controller.move(dz=-0.05) open_gripper(gazebo_model_name) # Re grip the model controller.move_to(z=z, target_quat=regrip_quat, z_raise=0.1) close_gripper(gazebo_model_name, model_size[0]) def close_gripper(gazebo_model_name, closure=0): set_gripper(0.81-closure*10) rospy.sleep(0.5) # Create dynamic joint if gazebo_model_name is not None: req = AttachRequest() req.model_name_1 = gazebo_model_name req.link_name_1 = "link" req.model_name_2 = "robot" req.link_name_2 = "wrist_3_link" attach_srv.call(req) def open_gripper(gazebo_model_name=None): set_gripper(0.0) # Destroy dynamic joint if gazebo_model_name is not None: req = AttachRequest() req.model_name_1 = gazebo_model_name req.link_name_1 = "link" req.model_name_2 = "robot" req.link_name_2 = "wrist_3_link" detach_srv.call(req) def set_model_fixed(model_name): req = AttachRequest() req.model_name_1 = model_name req.link_name_1 = "link" req.model_name_2 = "ground_plane" req.link_name_2 = "link" attach_srv.call(req) req = SetStaticRequest() print("{} TO HOME".format(model_name)) req.model_name = model_name req.link_name = "link" req.set_static = True setstatic_srv.call(req) def get_approach_quat(facing_direction, approach_angle): quat = DEFAULT_QUAT if facing_direction == (0, 0, 1): pitch_angle = 0 yaw_angle = 0 elif facing_direction == (1, 0, 0) or facing_direction == (0, 1, 0): pitch_angle = + 0.2 if abs(approach_angle) < math.pi/2: yaw_angle = math.pi/2 else: yaw_angle = -math.pi/2 elif facing_direction == (0, 0, -1): pitch_angle = 0 yaw_angle = 0 else: raise ValueError(f"Invalid model state {facing_direction}") quat = quat * PyQuaternion(axis=(0, 1, 0), angle=pitch_angle) quat = quat * PyQuaternion(axis=(0, 0, 1), angle=yaw_angle) quat = PyQuaternion(axis=(0, 0, 1), angle=approach_angle+math.pi/2) * quat return quat def get_axis_facing_camera(quat): axis_x = np.array([1, 0, 0]) axis_y = np.array([0, 1, 0]) axis_z = np.array([0, 0, 1]) new_axis_x = quat.rotate(axis_x) new_axis_y = quat.rotate(axis_y) new_axis_z = quat.rotate(axis_z) # get angle between new_axis and axis_z angle = np.arccos(np.clip(np.dot(new_axis_z, axis_z), -1.0, 1.0)) # get if model is facing up, down or sideways if angle < np.pi / 3: return 0, 0, 1 elif angle < np.pi / 3 * 2 * 1.2: if abs(new_axis_x[2]) > abs(new_axis_y[2]): return 1, 0, 0 else: return 0, 1, 0 #else: # raise Exception(f"Invalid axis {new_axis_x}") else: return 0, 0, -1 def get_approach_angle(model_quat, facing_direction):#get gripper approach angle if facing_direction == (0, 0, 1): return model_quat.yaw_pitch_roll[0] - math.pi/2 #rotate gripper elif facing_direction == (1, 0, 0) or facing_direction == (0, 1, 0): axis_x = np.array([0, 1, 0]) axis_y = np.array([-1, 0, 0]) new_axis_z = model_quat.rotate(np.array([0, 0, 1])) #get z axis of lego # get angle between new_axis and axis_x dot = np.clip(np.dot(new_axis_z, axis_x), -1.0, 1.0) #sin angle between lego z axis and x axis in fixed frame det = np.clip(np.dot(new_axis_z, axis_y), -1.0, 1.0) #cos angle between lego z axis and x axis in fixed frame return math.atan2(det, dot) #get angle between lego z axis and x axis in fixed frame elif facing_direction == (0, 0, -1): return -(model_quat.yaw_pitch_roll[0] - math.pi/2) % math.pi - math.pi else: raise ValueError(f"Invalid model state {facing_direction}") def set_gripper(value): goal = control_msgs.msg.GripperCommandGoal() goal.command.position = value # From 0.0 to 0.8 goal.command.max_effort = -1 # # Do not limit the effort action_gripper.send_goal_and_wait(goal, rospy.Duration(10)) return action_gripper.get_result() if __name__ == "__main__": print("Initializing node of kinematics") rospy.init_node("send_joints") controller = ArmController() # Create an action client for the gripper action_gripper = actionlib.SimpleActionClient( "/gripper_controller/gripper_cmd", control_msgs.msg.GripperCommandAction ) print("Waiting for action of gripper controller") action_gripper.wait_for_server() setstatic_srv = rospy.ServiceProxy("/link_attacher_node/setstatic", SetStatic) attach_srv = rospy.ServiceProxy("/link_attacher_node/attach", Attach) detach_srv = rospy.ServiceProxy("/link_attacher_node/detach", Attach) setstatic_srv.wait_for_service() attach_srv.wait_for_service() detach_srv.wait_for_service() controller.move_to(*DEFAULT_POS, DEFAULT_QUAT) print("Waiting for detection of the models") rospy.sleep(0.5) legos = get_legos_pos(vision=True) legos.sort(reverse=True, key=lambda a: (a[1].position.x, a[1].position.y)) for model_name, model_pose in legos: open_gripper() try: model_home = MODELS_INFO[model_name]["home"] model_size = MODELS_INFO[model_name]["size"] except ValueError as e: print(f"Model name {model_name} was not recognized!") continue # Get actual model_name at model xyz coordinates try: gazebo_model_name = get_gazebo_model_name(model_name, model_pose) except ValueError as e: print(e) continue # Straighten lego straighten(model_pose, gazebo_model_name) controller.move(dz=0.15) """ Go to destination """ x, y, z = model_home z += model_size[2] / 2 +0.004 print(f"Moving model {model_name} to {x} {y} {z}") controller.move_to(x, y, target_quat=DEFAULT_QUAT * PyQuaternion(axis=[0, 0, 1], angle=math.pi / 2)) # Lower the object and release controller.move_to(x, y, z) set_model_fixed(gazebo_model_name) open_gripper(gazebo_model_name) controller.move(dz=0.15) if controller.gripper_pose[0][1] > -0.3 and controller.gripper_pose[0][0] > 0: controller.move_to(*DEFAULT_POS, DEFAULT_QUAT) # increment z in order to stack lego correctly MODELS_INFO[model_name]["home"][2] += model_size[2] - INTERLOCKING_OFFSET print("Moving to Default Position") controller.move_to(*DEFAULT_POS, DEFAULT_QUAT) open_gripper() rospy.sleep(0.4) 以下是视觉识别代码: #! /usr/bin/env python3 import cv2 as cv import numpy as np import torch import message_filters import rospy import sys import time from sensor_msgs.msg import Image from cv_bridge import CvBridge from rospkg import RosPack # get abs path from os import path # get home path from gazebo_msgs.msg import ModelStates from geometry_msgs.msg import * from pyquaternion import Quaternion as PyQuaternion bridge = CvBridge() # Global variables path_yolo = path.join(path.expanduser('~'), 'yolov5') path_vision = RosPack().get_path('vision') path_weigths = path.join(path_vision, 'weigths') cam_point = (-0.44, -0.5, 1.58) height_tavolo = 0.74 dist_tavolo = None origin = None model = None model_orientation = None legoClasses = ['X1-Y1-Z2', 'X1-Y2-Z1', 'X1-Y2-Z2', 'X1-Y2-Z2-CHAMFER', 'X1-Y2-Z2-TWINFILLET', 'X1-Y3-Z2', 'X1-Y3-Z2-FILLET', 'X1-Y4-Z1', 'X1-Y4-Z2', 'X2-Y2-Z2', 'X2-Y2-Z2-FILLET'] argv = sys.argv a_show = '-show' in argv # Utility Functions def get_dist_tavolo(depth, hsv, img_draw): global dist_tavolo #color = (120,1,190) #mask = get_lego_mask(color, hsv, (5, 5, 5)) #dist_tavolo = depth[mask].max() #if dist_tavolo > 1: dist_tavolo -= height_tavolo dist_tavolo = np.nanmax(depth) def get_origin(img): global origin origin = np.array(img.shape[1::-1]) // 2 def get_lego_distance(depth): return depth.min() def get_lego_color(center, rgb): return rgb[center].tolist() def get_lego_mask(color, hsv, toll = (20, 20, 255)): thresh = np.array(color) mintoll = thresh - np.array([toll[0], toll[1], min(thresh[2]-1, toll[2])]) maxtoll = thresh + np.array(toll) return cv.inRange(hsv, mintoll, maxtoll) def getDepthAxis(height, lego): X, Y, Z = (int(x) for x in lego[1:8:3]) #Z = (0.038, 0.057) X = (0.031, 0.063) Y = (0.031, 0.063, 0.095, 0.127) rapZ = height / 0.019 - 1 pinZ = round(rapZ) rapXY = height / 0.032 pinXY = round(rapXY) errZ = abs(pinZ - rapZ) + max(pinZ - 2, 0) errXY = abs(pinXY - rapXY) + max(pinXY - 4, 0) if errZ < errXY: return pinZ, 2, pinZ == Z # pin, is ax Z, match else: if pinXY == Y: return pinXY, 1, True else: return pinXY, 0, pinXY == X def point_distorption(point, height, origin): p = dist_tavolo / (dist_tavolo - height) point = point - origin return p * point + origin def point_inverse_distortption(point, height): p = dist_tavolo / (dist_tavolo - height) point = point - origin return point / p + origin def myimshow(title, img): def mouseCB(event,x,y,a,b): print(x, y, img[y, x], "\r",end='',flush=True) print("\033[K", end='') cv.imshow(title, img) cv.setMouseCallback(title, mouseCB) cv.waitKey() # ----------------- LOCALIZATION ----------------- # def process_item(imgs, item): #images rgb, hsv, depth, img_draw = imgs #obtaining Yolo informations (class, coordinates, center) x1, y1, x2, y2, cn, cl, nm = item.values() mar = 15 x1, y1 = max(mar, x1), max(mar, y1) x2, y2 = min(rgb.shape[1]-mar, x2), min(rgb.shape[0]-mar, y2) boxMin = np.array((x1-mar, y1-mar)) x1, y1, x2, y2 = np.int0((x1, y1, x2, y2)) boxCenter = (y2 + y1) // 2, (x2 + x1) // 2 color = get_lego_color(boxCenter, rgb) hsvcolor = get_lego_color(boxCenter, hsv) sliceBox = slice(y1-mar, y2+mar), slice(x1-mar, x2+mar) #crop img with coordinate bounding box; computing all imgs l_rgb = rgb[sliceBox] l_hsv = hsv[sliceBox] if a_show: cv.rectangle(img_draw, (x1,y1),(x2,y2), color, 2) l_depth = depth[sliceBox] l_mask = get_lego_mask(hsvcolor, l_hsv) # filter mask by color l_mask = np.where(l_depth < dist_tavolo, l_mask, 0) l_depth = np.where(l_mask != 0, l_depth, dist_tavolo) #myimshow("asda", hsv) #getting lego height from camera and table l_dist = get_lego_distance(l_depth) l_height = dist_tavolo - l_dist #masking l_top_mask = cv.inRange(l_depth, l_dist-0.002, l_dist+0.002) #cv.bitwise_xor(img_draw,img_draw,img_draw, mask=cv.inRange(depth, l_dist-0.002, l_dist+0.002)) #myimshow("hmask", l_top_mask) # model detect orientation depth_borded = np.zeros(depth.shape, dtype=np.float32) depth_borded[sliceBox] = l_depth depth_image = cv.normalize( depth_borded, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U ) depth_image = cv.cvtColor(depth_image, cv.COLOR_GRAY2RGB).astype(np.uint8) #yolo in order to keep te orientation #print("Model orientation: Start...", end='\r') model_orientation.conf = 0.7 results = model_orientation(depth_image) pandino = [] pandino = results.pandas().xyxy[0].to_dict(orient="records") n = len(pandino) #print("Model orientation: Finish", n) # Adjust prediction pinN, ax, isCorrect = getDepthAxis(l_height, nm) if not isCorrect and ax == 2: if cl in (1,2,3) and pinN in (1, 2): # X1-Y2-Z*, *1,2,2-CHAMFER cl = 1 if pinN == 1 else 2 # -> Z'pinN' elif cl in (7, 8) and pinN in (1, 2): # X1-Y4-Z*, *1,2 cl = 7 if pinN == 1 else 8 # -> Z'pinN' elif pinN == -1: nm = "{} -> {}".format(nm, "Target") else: print("[Warning] Error in classification") elif not isCorrect: ax = 1 if cl in (0, 2, 5, 8) and pinN <= 4: # X1-Y*-Z2, *1,2,3,4 cl = (2, 5, 8)[pinN-2] # -> Y'pinN' elif cl in (1, 7) and pinN in (2, 4): # X1-Y*-Z1, *2,4 cl = 1 if pinN == 2 else 7 # -> Y'pinN' elif cl == 9 and pinN == 1: # X2-Y2-Z2 cl = 2 # -> X1 ax = 0 else: print("[Warning] Error in classification") nm = legoClasses[cl] if n != 1: print("[Warning] Classification not found") or_cn, or_cl, or_nm = ['?']*3 or_nm = ('lato', 'lato', 'sopra/sotto')[ax] else: print() #obtaining Yolo informations (class, coordinates, center) or_item = pandino[0] or_cn, or_cl, or_nm = or_item['confidence'], or_item['class'], or_item['name'] if or_nm == 'sotto': ax = 2 if or_nm == 'lato' and ax == 2: ax = 1 #--- #creating silouette top surface lego contours, hierarchy = cv.findContours(l_top_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) top_center = np.zeros(2) for cnt in contours: tmp_center, top_size, top_angle = cv.minAreaRect(cnt) top_center += np.array(tmp_center) top_center = boxMin + top_center / len(contours) #creating silouette lego contours, hierarchy = cv.findContours(l_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) if len(contours) == 0: return None cnt = contours[0] l_center, l_size, l_angle = cv.minAreaRect(cnt) l_center += boxMin if ax != 2: l_angle = top_angle l_box = cv.boxPoints((l_center, l_size, l_angle)) if l_size[0] <=3 or l_size[1] <=3: cv.drawContours(img_draw, np.int0([l_box]), 0, (0,0,0), 2) return None # filter out artifacts if a_show: cv.drawContours(img_draw, np.int0([l_box]), 0, color, 2) # silouette distorption # get vertexs distance from origin top_box = l_box.copy() vertexs_norm = [(i, np.linalg.norm(vec - origin)) for vec, i in zip(l_box, range(4))] vertexs_norm.sort(key=lambda tup: tup[1]) # get closest vertex iver = vertexs_norm[0][0] vec = l_box[iver] # distorping closest vertex if or_nm == 'sopra': l_height -= 0.019 top_box[iver] = point_distorption(l_box[iver], l_height, origin) v0 = top_box[iver] - vec # adapt adiacents veretx v1 = l_box[iver - 3] - vec # i - 3 = i+1 % 4 v2 = l_box[iver - 1] - vec top_box[iver - 3] += np.dot(v0, v2) / np.dot(v2, v2) * v2 top_box[iver - 1] += np.dot(v0, v1) / np.dot(v1, v1) * v1 l_center = (top_box[0] + top_box[2]) / 2 if a_show: cv.drawContours(img_draw, np.int0([top_box]), 0, (5,5,5), 2) cv.circle(img_draw, np.int0(top_box[iver]),1, (0,0,255),1,cv.LINE_AA) #rotation and axis drawing if or_nm in ('sopra', 'sotto', 'sopra/sotto', '?'): # fin x, y directions (dirX, dirY) dirZ = np.array((0,0,1)) if or_nm == 'sotto': dirZ = np.array((0,0,-1)) projdir = l_center - top_center if np.linalg.norm(projdir) < l_size[0] / 10: dirY = top_box[0] - top_box[1] dirX = top_box[0] - top_box[-1] if np.linalg.norm(dirY) < np.linalg.norm(dirX): dirX, dirY = dirY, dirX projdir = dirY * np.dot(dirY, projdir) edgeFar = [ver for ver in top_box if np.dot(ver - l_center, projdir) >= 0][:2] dirY = (edgeFar[0] + edgeFar[1]) / 2 - l_center dirY /= np.linalg.norm(dirY) dirY = np.array((*dirY, 0)) dirX = np.cross(dirZ, dirY) elif or_nm == "lato": # find pin direction (dirZ) edgePin = [ver for ver in top_box if np.dot(ver - l_center, l_center - top_center) >= 0][:2] dirZ = (edgePin[0] + edgePin[1]) / 2 - l_center dirZ /= np.linalg.norm(dirZ) dirZ = np.array((*dirZ, 0)) if cl == 10: if top_size[1] > top_size[0]: top_size = top_size[::-1] if top_size[0] / top_size[1] < 1.7: ax = 0 if ax == 0: vx,vy,x,y = cv.fitLine(cnt, cv.DIST_L2,0,0.01,0.01) dir = np.array((vx, vy)) vertexs_distance = [abs(np.dot(ver - l_center, dir)) for ver in edgePin] iverFar = np.array(vertexs_distance).argmin() dirY = edgePin[iverFar] - edgePin[iverFar-1] dirY /= np.linalg.norm(dirY) dirY = np.array((*dirY, 0)) dirX = np.cross(dirZ, dirY) if a_show: cv.circle(img_draw, np.int0(edgePin[iverFar]), 5, (70,10,50), 1) #cv.line(img_draw, np.int0(l_center), np.int0(l_center+np.array([int(vx*100),int(vy*100)])),(0,0,255), 3) if ax == 1: dirY = np.array((0,0,1)) dirX = np.cross(dirZ, dirY) if a_show: cv.line(img_draw, *np.int0(edgePin), (255,255,0), 2) l_center = point_inverse_distortption(l_center, l_height) # post rotation extra theta = 0 if cl == 1 and ax == 1: theta = 1.715224 - np.pi / 2 if cl == 3 and or_nm == 'sotto': theta = 2.359515 - np.pi if cl == 4 and ax == 1: theta = 2.145295 - np.pi if cl == 6 and or_nm == 'sotto': theta = 2.645291 - np.pi if cl == 10 and or_nm == 'sotto': theta = 2.496793 - np.pi rotX = PyQuaternion(axis=dirX, angle=theta) dirY = rotX.rotate(dirY) dirZ = rotX.rotate(dirZ) if a_show: # draw frame lenFrame = 50 unit_z = 0.031 unit_x = 22 * 0.8039 / dist_tavolo x_to_z = lenFrame * unit_z/unit_x center = np.int0(l_center) origin_from_top = origin - l_center endX = point_distorption(lenFrame * dirX[:2], x_to_z * dirX[2], origin_from_top) frameX = (center, center + np.int0(endX)) endY = point_distorption(lenFrame * dirY[:2], x_to_z * dirY[2], origin_from_top) frameY = (center, center + np.int0(endY)) endZ = point_distorption(lenFrame * dirZ[:2], x_to_z * dirZ[2], origin_from_top) frameZ = (center, center + np.int0(endZ)) cv.line(img_draw, *frameX, (0,0,255), 2) cv.line(img_draw, *frameY, (0,255,0), 2) cv.line(img_draw, *frameZ, (255,0,0), 2) # --- # draw text if or_cl != '?': or_cn = ['SIDE', 'UP', 'DOWN'][or_cl] text = "{} {:.2f} {}".format(nm, cn, or_cn) (text_width, text_height) = cv.getTextSize(text, cv.FONT_HERSHEY_DUPLEX, 0.4, 1)[0] text_offset_x = boxCenter[1] - text_width // 2 text_offset_y = y1 - text_height box_coords = ((text_offset_x - 1, text_offset_y + 1), (text_offset_x + text_width + 1, text_offset_y - text_height - 1)) cv.rectangle(img_draw, box_coords[0], box_coords[1], (210,210,10), cv.FILLED) cv.putText(img_draw, text, (text_offset_x, text_offset_y), cv.FONT_HERSHEY_DUPLEX, 0.4, (255, 255, 255), 1) def getAngle(vec, ax): vec = np.array(vec) if not vec.any(): return 0 vec = vec / np.linalg.norm(vec) wise = 1 if vec[-1] >= 0 else -1 dotclamp = max(-1, min(1, np.dot(vec, np.array(ax)))) return wise * np.arccos(dotclamp) msg = ModelStates() msg.name = nm #fov = 1.047198 #rap = np.tan(fov) #print("rap: ", rap) xyz = np.array((l_center[0], l_center[1], l_height / 2 + height_tavolo)) xyz[:2] /= rgb.shape[1], rgb.shape[0] xyz[:2] -= 0.5 xyz[:2] *= (-0.968, 0.691) xyz[:2] *= dist_tavolo / 0.84 xyz[:2] += cam_point[:2] rdirX, rdirY, rdirZ = dirX, dirY, dirZ rdirX[0] *= -1 rdirY[0] *= -1 rdirZ[0] *= -1 qz1 = PyQuaternion(axis=(0,0,1), angle=-getAngle(dirZ[:2], (1,0))) rdirZ = qz1.rotate(dirZ) qy2 = PyQuaternion(axis=(0,1,0), angle=-getAngle((rdirZ[2],rdirZ[0]), (1,0))) rdirX = qy2.rotate(qz1.rotate(rdirX)) qz3 = PyQuaternion(axis=(0,0,1), angle=-getAngle(rdirX[:2], (1,0))) rot = qz3 * qy2 * qz1 rot = rot.inverse msg.pose = Pose(Point(*xyz), Quaternion(x=rot.x,y=rot.y,z=rot.z,w=rot.w)) #pub.publish(msg) #print(msg) return msg #image processing def process_image(rgb, depth): # img_draw = rgb.copy() # 创建RGB图像副本用于可视化[^4] # hsv = cv.cvtColor(rgb, cv.COLOR_BGR2HSV) # 转换为HSV色彩空间[^4] # get_dist_tavolo(depth, hsv, img_draw) # 深度信息与HSV结合处理 # get_origin(rgb) # 基于原始RGB图像确定坐标系原点 # #results collecting localization # #print("Model localization: Start...",end='\r') # model.conf = 0.6 # 设置检测置信度阈值 # results = model(rgb) # YOLO模型推理[^2] # pandino = results.pandas().xyxy[0].to_dict(orient="records") # 结果转为字典 # #print("Model localization: Finish ") # # ---- # if depth is not None: # imgs = (rgb, hsv, depth, img_draw) # 多源数据组合 # results = [process_item(imgs, item) for item in pandino] # 逐项处理 # # ---- # msg = ModelStates() # 创建ROS消息对象 # for point in results: # if point is not None: # msg.name.append(point.name) # 添加物体名称 # msg.pose.append(point.pose) # 添加位姿信息 # pub.publish(msg) #发布信息 # if a_show: # 显示控制标志 # cv.imshow("vision-results" \ # "", img_draw) # 显示结果图像 # cv.waitKey() #等待按键 # 移除原代码中的图像读取和显示等待逻辑 img_draw = rgb.copy() hsv = cv.cvtColor(rgb, cv.COLOR_BGR2HSV) # 简化深度处理(根据实际需求调整) if depth is not None: get_dist_tavolo(depth, hsv, img_draw) get_origin(rgb) # YOLO实时检测(调整置信度平衡速度精度) model.conf = 0.2 # 可降低至0.4提升速度 results = model(rgb) # 结果处理 pandino = results.pandas().xyxy[0].to_dict(orient="records") if depth is not None: imgs = (rgb, hsv, depth, img_draw) results = [process_item(imgs, item) for item in pandino] # 发布结果 msg = ModelStates() for point in results: if point is not None: msg.name.append(point.name) msg.pose.append(point.pose) pub.publish(msg) # 实时显示(可选) if a_show: cv.imshow("Camera", img_draw) cv.waitKey(25) & 0xFF # 关键修改:非阻塞等待 pass def process_CB(image_rgb, image_depth): t_start = time.time() #from standard message image to opencv image rgb = CvBridge().imgmsg_to_cv2(image_rgb, "bgr8") depth = CvBridge().imgmsg_to_cv2(image_depth, "32FC1") process_image(rgb, depth) print("Time:", time.time() - t_start) # rospy.signal_shutdown(0) pass #init node function def start_node(): global pub print("Starting Node Vision 1.0") rospy.init_node('vision') print("Subscribing to camera images") # viewer = ImageViewer() #topics subscription rgb = message_filters.Subscriber("/camera/color/image_raw", Image,queue_size=1) depth = message_filters.Subscriber("/camera/depth/image_raw", Image,queue_size=1) #publisher results pub=rospy.Publisher("lego_detections", ModelStates, queue_size=1) print("Localization is starting.. ") print("(Waiting for images..)", end='\r'), print(end='\033[K') #images synchronization syncro = message_filters.TimeSynchronizer([rgb, depth], 1, reset=True) syncro.registerCallback(process_CB) #keep node always alive rospy.spin() # Destroy OpenCV windows on shutdown cv.destroyAllWindows() pass def load_models(): global model, model_orientation #yolo model and weights classification print("Loading model best.pt") weight = path.join(path_weigths, 'best.pt') model = torch.hub.load(path_yolo,'custom',path=weight, source='local') #yolo model and weights orientation print("Loading model orientation.pt") weight = path.join(path_weigths, 'depth.pt') model_orientation = torch.hub.load(path_yolo,'custom',path=weight, source='local') pass # class ImageViewer: # def __init__(self): # # Initialize the CvBridge # self.bridge = CvBridge() # # Get parameters # self.view_image = rospy.get_param('~view_image', True) # self.input_image_topic = rospy.get_param('~input_image_topic', '/camera/color/image_raw') # # Subscribe to the image topic # self.image_sub = rospy.Subscriber(self.input_image_topic, Image, self.image_callback) # def image_callback(self, data): # if self.view_image: # # Convert the ROS Image message to OpenCV format # cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") # # Display the image using OpenCV # cv.imshow("Camera View", cv_image) # cv.waitKey(1) def image_callback(msg): """ROS图像消息回调函数""" try: # 将ROS图像消息转为OpenCV格式(BGR) cv_image = bridge.imgmsg_to_cv2(msg, "bgr8") # 调用处理函数 process_image(cv_image, None) # depth设为None except Exception as e: rospy.logerr(f"图像转换错误: {e}") if __name__ == '__main__': load_models() try: start_node() except rospy.ROSInterruptException: pass 要如何进行修改
最新发布
11-05
import math import os import struct from argparse import ArgumentParser import av import numpy as np import open3d as o3d import rosbag import yaml from sensor_msgs import point_cloud2 import subprocess from protoc.octopudata_localizationinfo_pb2 import LocalizationInfoFrame, LocalizationInfo from protoc.octopudata_trackedobject_pb2 import TrackedObjectFrame, Object, TrackedObject from protoc.octopusdata_controlcommand_pb2 import CommandFrame, ControlCommand from protoc.octopusdata_gnss_pb2 import GnssPoint, GnssPoints from protoc.octopusdata_plantrajectory_pb2 import Trajectory, TrajectoryPoint, PlanTrajectory from protoc.octopusdata_predictionobstacles_pb2 import PerceptionObstacle, \ Obstacle, PredictionTrajectory, PathPoint, PredictionObstacles from protoc.octopusdata_routingpath_pb2 import RoutingPath, Path, Point, RoutingFrames from protoc.octopusdata_vehicleinfo_pb2 import VehicleFrame, VehicleInfo av.logging.set_level(av.logging.PANIC) codec_ctx = av.codec.Codec('hevc','r') h265_code = codec_ctx.create() class Pose: def __init__(self, q0, q1, q2, q3, x, y, z): self.q0 = q0 self.q1 = q1 self.q2 = q2 self.q3 = q3 self.x = x self.y = y self.z = z def get_ts(secs, nsecs): return int(secs * 1000 + nsecs / 1000000) def get_modulo(x, y, z): return math.sqrt(x * x + y * y + z * z) def yaw_from_quaternions(w, x, y, z): a = 2 * (w * z + x * y) b = 1 - 2 * (y * y + z * z) return math.atan2(a, b) def pose_has_nan(p): return math.isnan(p.x) or math.isnan(p.y) or math.isnan(p.z) or \ math.isnan(p.q0) or math.isnan(p.q1) or math.isnan(p.q2) or \ math.isnan(p.q3) def get_t(q0, q1, q2, q3, x, y, z): aa = 1 - 2 * (q2 * q2 + q3 * q3) ab = 2 * (q1 * q2 - q0 * q3) ac = 2 * (q1 * q3 + q0 * q2) ba = 2 * (q1 * q2 + q0 * q3) bb = 1 - 2 * (q1 * q1 + q3 * q3) bc = 2 * (q2 * q3 - q0 * q1) ca = 2 * (q1 * q3 - q0 * q2) cb = 2 * (q2 * q3 + q0 * q1) cc = 1 - 2 * (q1 * q1 + q2 * q2) t = np.mat([[aa, ab, ac, x], [ba, bb, bc, y], [ca, cb, cc, z], [0, 0, 0, 1]]) return t def get_label(perception_typ, subtype): if perception_typ == 3: return 'Pedestrian' elif perception_typ == 4: return 'Bi_Tricycle' elif perception_typ == 5: if subtype == 5: return 'Truck' elif subtype == 6: return 'Bus' else: return 'Car' else: return 'unknow' def main(args): for file in os.listdir(args.input): if file.endswith('.bag'): bag_path = os.path.join(args.input, file) bag = rosbag.Bag(bag_path, "r") output_dir = os.getenv('output_dir') if not os.path.exists(os.path.join(output_dir, 'innoPtClound_A4')): os.makedirs(os.path.join(output_dir, 'innoPtClound_A4')) if not os.path.exists(os.path.join(output_dir, 'innoPtClound_B2')): os.makedirs(os.path.join(output_dir, 'innoPtClound_B2')) if not os.path.exists(os.path.join(output_dir, 'radar_track_array_0')): os.makedirs(os.path.join(output_dir, 'radar_track_array_0')) if not os.path.exists(os.path.join(output_dir, 'radar_track_array_3')): os.makedirs(os.path.join(output_dir, 'radar_track_array_3')) if not os.path.exists(os.path.join(output_dir, 'mdc_camera_instance_74')): os.makedirs(os.path.join(output_dir, 'mdc_camera_instance_74')) if not os.path.exists(os.path.join(output_dir, 'mdc_camera_instance_73')): os.makedirs(os.path.join(output_dir, 'mdc_camera_instance_73')) if not os.path.exists(os.path.join(output_dir, 'mdc_camera_instance_72')): os.makedirs(os.path.join(output_dir, 'mdc_camera_instance_72')) if not os.path.exists(os.path.join(output_dir, 'mdc_camera_instance_71')): os.makedirs(os.path.join(output_dir, 'mdc_camera_instance_71')) routes = [] controls = [] plans = [] preds = [] gnss = [] vehs = [] locs = [] objs = [] ego_pose = None has_camera_71 = False has_camera_72 = False has_camera_73 = False has_camera_74 = False has_lidar_A4 = False has_lidar_B2 = False has_radar_0 = False has_radar_3 = False lidar_num = 0 image_num = 0 radar_num = 0 for topic, msg, t in bag.read_messages(): time_stamp = int(t.to_sec() * 1000) # 以 rosbag 时间戳(t)为基准,转换为 13 位时间戳 if topic == '/innoPtClound_A4': ### 图达通 时间辍应该是13位数字,图达通雷达8位,华为96线6位 # file_path = os.path.join(output_dir, 'innoPtClound_A4', '{}.pcd'.format(int(msg.header.stamp.secs * 1000 + # msg.header.stamp.nsecs / 1000000))) file_path = os.path.join(output_dir, 'innoPtClound_A4', '{}.pcd'.format(time_stamp)) # print(file_path) # 提取点云数据,包括 x, y, z points = list(point_cloud2.read_points(msg, field_names=["x", "y", "z", "intensity"], skip_nans=True)) if points: # 转换为 numpy 数组,添加 intensity, ring, timestamp 字段 np_points = np.array(points) # (N, 3), 包含 x, y, z # 转换为 Open3D 格式点云 pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(np_points[:, :3]) # x, y, z pcd.colors = o3d.utility.Vector3dVector(np.tile(np_points[:, 3:4] / np_points[:, 3:4].max(), (1, 3))) # 用 intensity 作为灰度颜色 o3d.io.write_point_cloud(file_path, pcd) lidar_num += 1 has_lidar_A4 = True elif topic == '/innoPtClound_B2': ### 图达通 # file_path = os.path.join(output_dir, 'innoPtClound_B2', '{}.pcd'.format(int(msg.header.stamp.secs * 1000 + # msg.header.stamp.nsecs / 1000000))) file_path = os.path.join(output_dir, 'innoPtClound_B2', '{}.pcd'.format(time_stamp)) # print(file_path) # 提取点云数据,包括 x, y, z points = list(point_cloud2.read_points(msg, field_names=["x", "y", "z", "intensity"], skip_nans=True)) if points: # 转换为 numpy 数组,添加 intensity, ring, timestamp 字段 np_points = np.array(points) # (N, 3), 包含 x, y, z # 转换为 Open3D 格式点云 pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(np_points[:, :3]) # x, y, z pcd.colors = o3d.utility.Vector3dVector(np.tile(np_points[:, 3:4] / np_points[:, 3:4].max(), (1, 3))) # 用 intensity 作为灰度颜色 o3d.io.write_point_cloud(file_path, pcd) lidar_num += 1 has_lidar_B2 = True elif topic == 'mdc_camera_instance_74': ### 相机 时间辍应该是13位数字 # time_stamp = int(msg.header.stamp.secs * 1000 + # msg.header.stamp.nsecs / 1000000) file_path = os.path.join(output_dir, 'mdc_camera_instance_74', '{}.jpg'.format(time_stamp)) packet = av.packet.Packet(msg.data) try: out = h265_code.decode(packet) for frame in out: if frame.format.name != 'rgb24': frame = frame.reformat(format='rgb24') img = frame.to_image() img.save(file_path) image_num += 1 has_camera_74 = True except Exception as e: print("{} frame can not trans to jpg".format(time_stamp), e) elif topic == 'mdc_camera_instance_73': ### 相机 时间辍应该是13位数字 # time_stamp = int(msg.header.stamp.secs * 1000 + # msg.header.stamp.nsecs / 1000000) file_path = os.path.join(output_dir, 'mdc_camera_instance_73', '{}.jpg'.format(time_stamp)) packet = av.packet.Packet(msg.data) try: out = h265_code.decode(packet) for frame in out: if frame.format.name != 'rgb24': frame = frame.reformat(format='rgb24') img = frame.to_image() img.save(file_path) image_num += 1 has_camera_73 = True except Exception as e: print("{} frame can not trans to jpg".format(time_stamp), e) elif topic == 'mdc_camera_instance_72': ### 相机 # time_stamp = int(msg.header.stamp.secs * 1000 + # msg.header.stamp.nsecs / 1000000) file_path = os.path.join(output_dir, 'mdc_camera_instance_72', '{}.jpg'.format(time_stamp)) packet = av.packet.Packet(msg.data) try: out = h265_code.decode(packet) for frame in out: if frame.format.name != 'rgb24': frame = frame.reformat(format='rgb24') img = frame.to_image() img.save(file_path) image_num += 1 has_camera_72 = True except Exception as e: print("{} frame can not trans to jpg".format(time_stamp), e) elif topic == 'mdc_camera_instance_71': ### 相机 # time_stamp = int(msg.header.stamp.secs * 1000 + # msg.header.stamp.nsecs / 1000000) file_path = os.path.join(output_dir, 'mdc_camera_instance_71', '{}.jpg'.format(time_stamp)) packet = av.packet.Packet(msg.data) try: out = h265_code.decode(packet) for frame in out: if frame.format.name != 'rgb24': frame = frame.reformat(format='rgb24') img = frame.to_image() img.save(file_path) image_num += 1 has_camera_71 = True except Exception as e: print("{} frame can not trans to jpg".format(time_stamp), e) elif topic == '/radar_track_array_0': ### 大陆408 时间辍应该是13位数字 # file_path = os.path.join(output_dir, 'radar_track_array_0', '{}.pcd'.format(int(msg.header.stamp.secs * 1000 + # msg.header.stamp.nsecs / 1000000))) file_path = os.path.join(output_dir, 'radar_track_array_0', '{}.pcd'.format(time_stamp)) points = [] for track in msg.trackList: x, y, z = track.x, track.y, track.z vx, vy, ax, ay = track.vx, track.vy, track.ax, track.ay rcs, snr, yawRate = track.rcs, track.snr, track.yawRate obj_id, trackType, lifetime = track.id, track.trackType, track.lifetime # 过滤无效点 if np.isnan(x) or np.isnan(y) or np.isnan(z): continue points.append([x, y, z, vx, vy, ax, ay, rcs, snr, yawRate, obj_id, trackType, lifetime]) if not points: print("没有有效点云数据") continue points = np.array(points) # **写入 PCD 文件** with open(file_path, 'w') as f: # **写入 PCD 头部** f.write("# .PCD v0.7 - Point Cloud Data file format\n") f.write("VERSION 0.7\n") f.write(f"FIELDS x y z vx vy ax ay rcs snr yawRate id trackType lifetime\n") f.write("SIZE 4 4 4 4 4 4 4 4 4 4 4 4 4\n") f.write("TYPE F F F F F F F F F F I I I\n") # F = float, I = int f.write("COUNT 1 1 1 1 1 1 1 1 1 1 1 1 1\n") f.write(f"WIDTH {points.shape[0]}\n") f.write("HEIGHT 1\n") f.write("VIEWPOINT 0 0 0 1 0 0 0\n") f.write(f"POINTS {points.shape[0]}\n") f.write("DATA ascii\n") # **写入点云数据** np.savetxt(f, points, fmt="%.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %d %d %d") radar_num += 1 has_radar_0 = True elif topic == '/radar_track_array_3': ### 大陆408 # file_path = os.path.join(output_dir, 'radar_track_array_3', '{}.pcd'.format(int(msg.header.stamp.secs * 1000 + # msg.header.stamp.nsecs / 1000000))) file_path = os.path.join(output_dir, 'radar_track_array_3', '{}.pcd'.format(time_stamp)) points = [] for track in msg.trackList: x, y, z = track.x, track.y, track.z vx, vy, ax, ay = track.vx, track.vy, track.ax, track.ay rcs, snr, yawRate = track.rcs, track.snr, track.yawRate obj_id, trackType, lifetime = track.id, track.trackType, track.lifetime # 过滤无效点 if np.isnan(x) or np.isnan(y) or np.isnan(z): continue points.append([x, y, z, vx, vy, ax, ay, rcs, snr, yawRate, obj_id, trackType, lifetime]) if not points: print("没有有效点云数据") continue points = np.array(points) # **写入 PCD 文件** with open(file_path, 'w') as f: # **写入 PCD 头部** f.write("# .PCD v0.7 - Point Cloud Data file format\n") f.write("VERSION 0.7\n") f.write(f"FIELDS x y z vx vy ax ay rcs snr yawRate id trackType lifetime\n") f.write("SIZE 4 4 4 4 4 4 4 4 4 4 4 4 4\n") f.write("TYPE F F F F F F F F F F I I I\n") # F = float, I = int f.write("COUNT 1 1 1 1 1 1 1 1 1 1 1 1 1\n") f.write(f"WIDTH {points.shape[0]}\n") f.write("HEIGHT 1\n") f.write("VIEWPOINT 0 0 0 1 0 0 0\n") f.write(f"POINTS {points.shape[0]}\n") f.write("DATA ascii\n") # **写入点云数据** np.savetxt(f, points, fmt="%.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %d %d %d") radar_num += 1 has_radar_3 = True elif topic == '/routing/routing_response_viz': rv = RoutingPath() rv.timestamp = int(t.secs * 1000 + t.nsecs / 1000000) rv.stamp_secs = t.secs rv.stamp_nsecs = t.nsecs mark_list = list() for mark in msg.markers: path_pb = Path() path_pb.id = mark.id point_list = [] for point in mark.points: point_pb = Point() point_pb.x = point.x point_pb.y = point.y point_pb.z = point.z point_list.append(point_pb) path_pb.path_point.extend(point_list) mark_list.append(path_pb) rv.routing_path_info.extend(mark_list) routes.append(rv) elif topic == '/holo/ControlCommand': cf = CommandFrame() cf.timestamp = int(t.secs * 1000 + t.nsecs / 1000000) cf.stamp_secs = t.secs cf.stamp_nsecs = t.nsecs cf.acceleration = msg.acceleration cf.front_wheel_angle = msg.front_wheel_angle cf.gear = msg.gear controls.append(cf) elif topic == '/planning/trajectory': tj = Trajectory() tj.timestamp = int(t.secs * 1000 + t.nsecs / 1000000) tj.stamp_secs = t.secs tj.stamp_nsecs = t.nsecs p_list = [] for point in msg.trajectory_points: p = TrajectoryPoint() p.x = point.path_point.point.x p.y = point.path_point.point.y p.z = point.path_point.point.z p.theta = point.path_point.theta p.kappa = point.path_point.kappa p.v = point.v p.a = point.a p.relative_time = point.relative_time p_list.append(p) tj.trajectory_points.extend(p_list) plans.append(tj) elif topic == '/prediction/prediction_obstacles': tr_pb = PerceptionObstacle() tr_pb.timestamp = int(msg.header.stamp.secs * 1000 + msg.header.stamp.nsecs / 1000000) tr_pb.stamp_secs = msg.header.stamp.secs tr_pb.stamp_nsecs = msg.header.stamp.nsecs obj_list = list() for obj in msg.prediction_obstacle: ob_pb = Obstacle() ob_pb.obstacle_timestamp = int(obj.timestamp * 1000) ob_pb.id = obj.perception_obstacle.id ob_pb.x = obj.perception_obstacle.position.x ob_pb.y = obj.perception_obstacle.position.y ob_pb.z = obj.perception_obstacle.position.z traj_pbs = [] for traj in obj.trajectory: traj_pb = PredictionTrajectory() points_pbs = [] for trajectory_point in traj.trajectory_points: point_pb = PathPoint() point_pb.x = trajectory_point.path_point.point.x point_pb.y = trajectory_point.path_point.point.y point_pb.z = trajectory_point.path_point.point.z point_pb.theta = trajectory_point.path_point.theta point_pb.kappa = trajectory_point.path_point.kappa point_pb.lane_id = trajectory_point.path_point.lane_id point_pb.v = trajectory_point.v point_pb.a = trajectory_point.a point_pb.relative_time = trajectory_point.relative_time points_pbs.append(point_pb) traj_pb.path_point.extend(points_pbs) traj_pbs.append(traj_pb) ob_pb.prediction_trajectory.extend(traj_pbs) obj_list.append(ob_pb) tr_pb.obstacle_info.extend(obj_list) preds.append(tr_pb) elif topic == '/inspvax': pb_loc_gnss = GnssPoint() pb_loc_gnss.stamp_secs = msg.header.stamp.secs # 1 pb_loc_gnss.stamp_nsecs = msg.header.stamp.nsecs # 2 pb_loc_gnss.timestamp = get_ts(msg.header.stamp.secs, msg.header.stamp.nsecs) pb_loc_gnss.latitude = msg.latitude # 3 pb_loc_gnss.longitude = msg.longitude # 4 pb_loc_gnss.elevation = msg.altitude gnss.append(pb_loc_gnss) elif topic == '/holo/VehicleInfoMagotan': veh_pb = VehicleFrame() veh_pb.stamp_secs = msg.timestamp.secs # 1 veh_pb.stamp_nsecs = msg.timestamp.nsecs # 2 veh_pb.timestamp = get_ts(veh_pb.stamp_secs, veh_pb.stamp_nsecs) veh_pb.gear_value = msg.gear # 4 veh_pb.vehicle_speed = msg.vehicle_speed * 3.6 # 5 veh_pb.steering_angle = msg.steering_angle # 6 veh_pb.longitude_acc = msg.longitude_acc veh_pb.lateral_acc = msg.lateral_acc veh_pb.turn_left_light = msg.turn_left_light veh_pb.turn_right_light = msg.turn_right_light veh_pb.brake = msg.brake_torque veh_pb.autonomy_status = 0 vehs.append(veh_pb) elif topic == '/localization/localization_info': lo_pb = LocalizationInfoFrame() lo_pb.timestamp = get_ts(msg.header.stamp.secs, msg.header.stamp.nsecs) lo_pb.stamp_secs = msg.header.stamp.secs lo_pb.stamp_nsecs = msg.header.stamp.nsecs lo_pb.pose_position_x = msg.pose.position.x lo_pb.pose_position_y = msg.pose.position.y lo_pb.pose_position_z = msg.pose.position.z lo_pb.pose_orientation_x = msg.pose.orientation.x lo_pb.pose_orientation_y = msg.pose.orientation.y lo_pb.pose_orientation_z = msg.pose.orientation.z lo_pb.pose_orientation_w = msg.pose.orientation.w lo_pb.pose_orientation_yaw = \ yaw_from_quaternions(msg.pose.orientation.w, msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z) lo_pb.velocity_linear = get_modulo(msg.pose.linear_velocity.x, msg.pose.linear_velocity.y, msg.pose.linear_velocity.z) lo_pb.velocity_angular = get_modulo(msg.pose.angular_velocity.x, msg.pose.angular_velocity.y, msg.pose.angular_velocity.z) lo_pb.acceleration_linear = get_modulo(msg.pose.linear_acceleration_vrf.x, msg.pose.linear_acceleration_vrf.y, msg.pose.linear_acceleration_vrf.z) lo_pb.acceleration_angular = get_modulo(msg.pose.angular_velocity_vrf.x, msg.pose.angular_velocity_vrf.y, msg.pose.angular_velocity_vrf.z) locs.append(lo_pb) ego_pose = Pose(msg.pose.orientation.w, msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.position.x, msg.pose.position.y, msg.pose.position.z) elif topic == '/perception/perception_obstacles': if ego_pose is None or pose_has_nan(ego_pose): continue tr_pb = TrackedObjectFrame() tr_pb.timestamp = get_ts(msg.header.stamp.secs, msg.header.stamp.nsecs) tr_pb.stamp_secs = msg.header.stamp.secs tr_pb.stamp_nsecs = msg.header.stamp.nsecs obj_list = list() for object in msg.perception_obstacle: ob_pb = Object() ob_pb.id = object.id ob_pb.label = get_label(object.type, object.sub_type) ob_pb.pose_position_x = object.position.x ob_pb.pose_position_y = object.position.y ob_pb.pose_position_z = object.position.z ob_pb.pose_orientation_x = 0 ob_pb.pose_orientation_y = 0 ob_pb.pose_orientation_z = math.sin(object.theta / 2) ob_pb.pose_orientation_w = math.cos(object.theta / 2) ob_pb.pose_orientation_yaw = object.theta ob_pb.dimensions_x = object.length ob_pb.dimensions_y = object.width ob_pb.dimensions_z = object.height ob_pb.speed_vector_linear_x = object.velocity.x ob_pb.speed_vector_linear_y = object.velocity.y ob_pb.speed_vector_linear_z = object.velocity.z world_obj = np.transpose(np.array([[object.position.x, object.position.y, object.position.z, 1]])) world_ego_t = get_t(ego_pose.q0, ego_pose.q1, ego_pose.q2, ego_pose.q3, ego_pose.x, ego_pose.y, ego_pose.z) try: world_ego_invt = np.linalg.pinv(world_ego_t) except Exception as err: print('pinv failed:', world_ego_t) raise err vehicle_obj = world_ego_invt * world_obj ob_pb.relative_position_x = vehicle_obj[0] ob_pb.relative_position_y = vehicle_obj[1] ob_pb.relative_position_z = vehicle_obj[2] obj_list.append(ob_pb) tr_pb.objects.extend(obj_list) objs.append(tr_pb) print(f"lidar_num : {lidar_num}") print(f"image_num : {image_num}") print(f"radar_num : {radar_num}") folders = [] if len(routes) > 0: os.makedirs(os.path.join(output_dir, 'routing_routing_response_viz')) folders.append({'folder': 'routing_routing_response_viz', 'sensor_type': 'routing_path'}) route_out = RoutingFrames() route_out.routing_frame.extend(routes) with open(os.path.join(output_dir, 'routing_routing_response_viz', 'route.pb'), "wb") as c: c.write(route_out.SerializeToString()) if len(controls) > 0: os.makedirs(os.path.join(output_dir, 'holo_ControlCommand')) folders.append({'folder': 'holo_ControlCommand', 'sensor_type': 'control'}) ctl_cmd_pb_out = ControlCommand() ctl_cmd_pb_out.command_frame.extend(controls) with open(os.path.join(output_dir, 'holo_ControlCommand', 'control.pb'), "wb") as c: c.write(ctl_cmd_pb_out.SerializeToString()) if len(plans) > 0: os.makedirs(os.path.join(output_dir, 'planning_trajectory')) folders.append({'folder': 'planning_trajectory', 'sensor_type': 'planning_trajectory'}) plan_traj_pb_out = PlanTrajectory() plan_traj_pb_out.trajectory_info.extend(plans) with open(os.path.join(output_dir, 'planning_trajectory', 'planning.pb'), "wb") as p: p.write(plan_traj_pb_out.SerializeToString()) if len(preds) > 0: os.makedirs(os.path.join(output_dir, 'prediction_prediction_obstacles')) folders.append({'folder': 'prediction_prediction_obstacles', 'sensor_type': 'predicted_objects'}) pred_obstacles_pb_out = PredictionObstacles() pred_obstacles_pb_out.perception_obstacle.extend(preds) with open(os.path.join(output_dir, 'prediction_prediction_obstacles', 'predicted.pb'), "wb") as p: p.write(pred_obstacles_pb_out.SerializeToString()) if len(gnss) > 0: os.makedirs(os.path.join(output_dir, 'inspvax')) folders.append({'folder': 'inspvax', 'sensor_type': 'gnss'}) gn_pb_out = GnssPoints() gn_pb_out.gnss_points.extend(gnss) with open(os.path.join(output_dir, 'inspvax', 'gnss.pb'), "wb") as g: g.write(gn_pb_out.SerializeToString()) if len(vehs) > 0: os.makedirs(os.path.join(output_dir, 'holo_VehicleInfoMagotan')) folders.append({'folder': 'holo_VehicleInfoMagotan', 'sensor_type': 'vehicle'}) veh_pb_out = VehicleInfo() veh_pb_out.vehicle_info.extend(vehs) with open(os.path.join(output_dir, 'holo_VehicleInfoMagotan', 'vehicle.pb'), "wb") as v: v.write(veh_pb_out.SerializeToString()) if len(locs) > 0: os.makedirs(os.path.join(output_dir, 'localization_localization_info')) folders.append({'folder': 'localization_localization_info', 'sensor_type': 'ego_tf'}) lo_pb_out = LocalizationInfo() lo_pb_out.localization_info.extend(locs) with open(os.path.join(output_dir, 'localization_localization_info', 'ego_tf.pb'), "wb") as lo: lo.write(lo_pb_out.SerializeToString()) if len(objs) > 0: os.makedirs(os.path.join(output_dir, 'perception_perception_obstacles')) folders.append({'folder': 'perception_perception_obstacles', 'sensor_type': 'object_array_vision'}) tr_pb_out = TrackedObject() tr_pb_out.tracked_object.extend(objs) with open(os.path.join(output_dir, 'perception_perception_obstacles', 'object_array_vision.pb'), "wb") as tr: tr.write(tr_pb_out.SerializeToString()) if has_camera_74: folders.append({'folder': 'mdc_camera_instance_74', 'sensor_type': 'camera'}) if has_camera_73: folders.append({'folder': 'mdc_camera_instance_73', 'sensor_type': 'camera'}) if has_camera_72: folders.append({'folder': 'mdc_camera_instance_72', 'sensor_type': 'camera'}) if has_camera_71: folders.append({'folder': 'mdc_camera_instance_71', 'sensor_type': 'camera'}) if has_lidar_A4: if args.calibration_id: folders.append({'folder': 'innoPtClound_A4', 'sensor_type': 'lidar', 'calibration_item_id': args.calibration_id}) else: folders.append({'folder': 'innoPtClound_A4', 'sensor_type': 'lidar'}) if has_lidar_B2: if args.calibration_id: folders.append({'folder': 'innoPtClound_B2', 'sensor_type': 'lidar', 'calibration_item_id': args.calibration_id}) else: folders.append({'folder': 'innoPtClound_B2', 'sensor_type': 'lidar'}) if has_radar_0: folders.append({'folder': 'radar_track_array_0', 'sensor_type': 'radar'}) if has_radar_3: folders.append({'folder': 'radar_track_array_3', 'sensor_type': 'radar'}) collect_yaml = {'folders': folders} with open(os.path.join(output_dir, "opendata_to_platform.yaml"), 'w', encoding='utf-8') as collect_file: yaml.safe_dump(collect_yaml, collect_file) with open(os.path.join(os.getenv('output_dir'), '_SUCCESS'), 'w') as f: f.write("") os.system('chmod -R a+r ${output_dir}/*') if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('-i', '--input', help="input bag path", default=os.getenv('rosbag_path')) parser.add_argument('-o', '--output', default=os.getenv('output_dir'), help="result output directory, default to ./bags/") parser.add_argument('-ci', '--calibration_id', type=int) params = parser.parse_args() main(params)
07-20
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值