基于JAKA机械臂搭配D455相机二次开发的自动标定和自主采摘程序

去年做了大半年的机械臂二次开发,其实程序和逻辑上都比较简单,但是对于出入门的来说网上的很多资料没有提供一个可以直接复刻的例子。后面对照着JAKA二次开发手册以及Opencv库的使用,完成了机械臂搭配相机识别做采摘运动的功能实现。

现在写论文写到无聊,回顾一下程序写一个经验之谈,说不定能帮助初次使用但是找不到教程的同学带来信息。

由于都是基于JAKA的第三方库jkrc库开发的,里面的使用函数在手册中均有讲解,这里就直接上一段搭配相机做自动标定的代码。首先需要拖动机械臂到10个(通常10个以上)相机能够拍摄到标定板的位置,记录机械臂6个关节的角度(代码中是 angle10l.txt)。只需要有txt文件和相机畸变参数的yaml文件,就可以直接运行代码。

# -*- coding: utf-8 -*-

import cv2
import math
import numpy as np
import struct
import json

import pyrealsense2 as rs
from utils.CamOnbase_External_calibration import read_arm_pose, myRPY2R_robot, get_RT_from_chessboard
import jkrc


def pack_transfer_matrix_data(qw, qx, qy, qz, x, y, z):
    feadback = b'A'
    qw = qw  ##d
    qx = qx  ##d
    qy = qy  ##d
    qz = qz  ##d
    x = x  ##d
    y = y  ##d
    z = z  ##d
    frame_data = struct.pack("<cddddddd", feadback,
                             qw, qx, qy, qz, x, y, z)  # 打包结果
    return frame_data

# 初始化参数
PI = 3.1415926
# 坐标系
COORD_BASE = 0
COORD_JOINT = 1
COORD_TOOL = 2

IO_CABINET = 0  # 控制柜面板 IO
IO_TOOL = 1  # 工具 IO
IO_EXTEND = 2  # 扩展 IO

# 运动模式
ABS = 0
INCR = 1
def robot_run():
    result_dict = {}
    with open('./config/eye_on_hand.json', 'w') as json_file:
        json_file.write('')  # 写入空内容
    # 返回机器人对象
    robot = jkrc.RC("192.168.1.100")
    # 查询 sdk 版本号
    ret = robot.get_sdk_version()
    print("SDK versionrobot.logout()is:", ret)
    # 登录
    robot.login()
    # 上电
    robot.power_on()
    robot.enable_robot()
    print("开始自检----")
    print("机器人准备就绪!")
    print("自动标定开始!")

    '''
    此处joint_pos_init需要修改
    '''
    joint_pos_init = [PI * 0 / 180, PI * 60 / 180, -PI * 120 / 180, PI * 150 / 180, PI * 0 / 180, PI * 180 / 180]
    robot.joint_move_extend(joint_pos_init, ABS, True, speed=1, acc=math.pi * 0.5, tol=0.1)
    # 打开相机并设置相机参数
    # 相机初始化参数
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_device("123456789")  # 这是金银花相机D455的序列号
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
    # 开启相机
    pipeline.start(config)

    # 读取机械臂关节角度
    with open("config/angle10l.txt", "r") as file:
        joint_pos_degrees = [[round(float(num), 3) for num in line.strip().split()] for line in file]
    # 将角度转换为弧度
    joint_pos = [[round(math.radians(angle), 3) for angle in row] for row in joint_pos_degrees]

    for i in range(len(joint_pos)):
        print(f"第{i+1}次机械臂运动")
        # 机械臂运动
        robot.joint_move_extend(joint_pos[i], ABS, True, speed=1, acc=1, tol=0.1)
        print(f"第{i+1}次标定拍摄")
        # 读取相机图像
        frames = pipeline.wait_for_frames()
        # RGB图像
        color_frame = frames.get_color_frame()
        color_image = np.asanyarray(color_frame.get_data())
        if not ret:
            print("Error: Failed to capture image")
            robot.joint_move_extend(joint_pos_init, ABS, True, speed=1, acc=1, tol=0.1)
            break
        # 保存图像
        cv2.imwrite("./calibration/image/image{}.jpg".format(i+1), cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR))
        # 读取机械臂末端xyzrpy
        xyzrpy = robot.kine_forward(joint_pos[i])
        xyzrpy = [float(i / 1000) for i in list(xyzrpy[1][:3])] + list(xyzrpy[1][3:6])
        # 写入xyzrpy
        values = xyzrpy
        # 构造一个新的字典,用于写入 JSON 文件
        result_dict["image{}.jpg".format(i+1)] = values  # 注意:这里文件名从1开始,而不是0

        # 写入 JSON 文件
    with open('./config/eye_on_hand.json', 'w') as json_file:
        json.dump(result_dict, json_file, indent=2)  # indent 参数用于指定缩进空格数,使 JSON 文件更易读
    # 回到初始位置
    robot.joint_move_extend(joint_pos_init, ABS, True, speed=1, acc=1, tol=0.1)
    robot.disable_robot()
    print("机器人下使能")
    robot.power_off()
    print("控制柜关闭")
    robot.logout()  # 登出
    print("采摘运动完成!!!")
    # 关闭相机
    pipeline.stop()
    cv2.destroyAllWindows()

def calibration_run():
    # 开始计算手眼标定矩阵
    chess_board_x_num = 7  # 棋盘格x方向角点数(宽)
    chess_board_y_num = 10  # 棋盘格y方向角点数(高)
    chess_board_len = 0.015  # 单位棋盘格长度,m
    # ------------------------------------------------
    #   相机的内参和畸变矫正参数
    # ------------------------------------------------
    # 读取标定文件中保存的结果(相机内参矩阵和畸变系数)到对应的变量中
    intrinsic_file = cv2.FileStorage("./config/intrinsic_calibration.yaml", cv2.FileStorage_READ)
    K = intrinsic_file.getNode("cameraMatrix").mat()  # 内参矩阵
    # D = intrinsic_file.getNode("distCoeffs").mat()  # 畸变系数
    D = np.array([0, 0, 0, 0, 0]).astype("float32")
    intrinsic_file.release()

    pose_data = read_arm_pose('./config/eye_on_hand.json')  # 每个时刻arm相对于base的坐标
    R_all_cam2chess = []
    t_all_cam2chess = []
    R_all_base2end = []
    t_all_base2end = []
    i = 1
    save_path = './calibration/image/'
    for img_pth, arm_pose in pose_data.items():
        img_path = save_path + img_pth
        image = cv2.imread(img_path)

        R_base2end = myRPY2R_robot(arm_pose[3], arm_pose[4], arm_pose[5])
        t_base2end = np.array([arm_pose[0], arm_pose[1], arm_pose[2]])  # 末端在基座下的表达,转换为m

        RT_base2end = np.column_stack((R_base2end, t_base2end))
        RT_base2end = np.row_stack((RT_base2end, np.array([0, 0, 0, 1])))  # 即为cam to end变换矩阵

        R_base2end = RT_base2end[:3, :3]
        t_base2end = RT_base2end[:3, 3]

        # 标定板相对于相机坐标系的变换矩阵
        Rt_cam2chess, ret = get_RT_from_chessboard(image, chess_board_x_num, chess_board_y_num, K, D,
                                                   chess_board_len)
        if ret:
            print("第{}张图像时,机器人姿态为{},图像棋盘格检测成功!".format((i), arm_pose))
            R_all_base2end.append(R_base2end)
            t_all_base2end.append(t_base2end)
            R_all_cam2chess.append(Rt_cam2chess[:3, :3])
            t_all_cam2chess.append(Rt_cam2chess[:3, 3].reshape((3, 1)))

        else:
            print("第{}张图像棋盘格检测失败!".format((i), arm_pose))
        i += 1

    # 得到相机相对于底座的相对坐标 (眼在手外标定时输入为base在末端坐标系中的坐标  棋盘格在相机坐标系中的坐标)
    R, T = cv2.calibrateHandEye(R_all_base2end, t_all_base2end, R_all_cam2chess, t_all_cam2chess)  # 手眼标定
    RT = np.column_stack((R, T))
    RT = np.row_stack((RT, np.array([0, 0, 0, 1])))  # 即为base—>cam变换矩阵

    print("手眼标定结束, 参数存放在:config/CamOnHand_External_calibration.yaml")
    intrinsic_file = cv2.FileStorage("./config/CamOnHand_External_calibration.yaml", cv2.FileStorage_WRITE)
    intrinsic_file.write("poseMatrix", RT)
    intrinsic_file.release()
    # 将手眼标定矩阵写入txt
    with open('config/hand_eye_mat1.txt', 'w') as file:
        # 遍历矩阵的每一行
        for row in RT:
            # 将每一行的元素转换为字符串,使用制表符连接起来
            line = '\t'.join(map(str, row)) + '\n'
            # 写入文件
            file.write(line)

if __name__ == "__main__":
    robot_run()
    calibration_run()

程序计算的结果还是比较好的,定位上和相机有一定的关联,我们使用的D455调整到640*480的像素后进行拍摄。这个程序主要还是用来方便每一次的标定,不然每次都需要手动记录和拍照太麻烦了。

其中的关键的CamOnbase_External_calibration.py文件如下:

#--coding:utf-8--
#!/usr/bin/env python
# -*- encoding: utf-8 -*-

import numpy as np
from math import *
import cv2
import logging
import os
import json
from cv2 import aruco

# 读取机械臂的位姿
def read_arm_pose(file_name):
    data_dict = {}
    with open(file_name, "r") as fp:
        json_data = json.load(fp)

    for js_data in json_data:
        data_dict[str(js_data)] = json_data[js_data]

    return data_dict


#用于根据欧拉角计算旋转矩阵
def myRPY2R_robot(x, y, z):
    Rx = np.array([[1, 0, 0], [0, cos(x), -sin(x)], [0, sin(x), cos(x)]])
    Ry = np.array([[cos(y), 0, sin(y)], [0, 1, 0], [-sin(y), 0, cos(y)]])
    Rz = np.array([[cos(z), -sin(z), 0], [sin(z), cos(z), 0], [0, 0, 1]])
    R = Rz@Ry@Rx     # 先绕x轴 再绕y轴 最后绕z轴


    # R = Rz @ Rx @ Ry  # 先绕x轴 再绕y轴 最后绕z轴
    # R = Rx @ Ry @ Rz  # 先绕x轴 再绕y轴 最后绕z轴
    # R = Rx @ Rz @ Ry  # 先绕x轴 再绕y轴 最后绕z轴
    # R = Ry @ Rz @ Rx  # 先绕x轴 再绕y轴 最后绕z轴
    # R = Ry @ Rx @ Rz  # 先绕x轴 再绕y轴 最后绕z轴
    # R = Rx@Ry@Rz
    return R

def rmat_to_quaternion(rmat):
    rmat = np.float64(rmat)
    m11, m12, m13, m21, m22, m23, m31, m32, m33 = rmat.reshape(-1)
    # trace是矩阵的迹, 是矩阵主对角元素之和
    # trace(rmat) = m11 + m22 + m33
    trace_rmat = m11 + m22 + m33
    if trace_rmat > 0:
        # 注: q0不能是0, 否则就变成了纯四元数了
        # 就不是旋转四元数了
        # S = 4q0
        s = np.sqrt(trace_rmat + 1) * 2.0  # S = 4*qw
        inv_s = 1.0 / s
        qw = 0.25 * s
        qx = (m32 - m23) * inv_s
        qy = (m13 - m31) * inv_s
        qz = (m21 - m12) * inv_s
    elif m11 > m22 and m11 > m33:
        s = np.sqrt(1.0 + m11 - m22 - m33) * 2  # S = 4*qx
        inv_s = 1.0 / s
        qw = (m32 - m23) * inv_s
        qx = 0.25 * s
        qy = (m12 + m21) * inv_s
        qz = (m13 + m31) * inv_s
    elif m22 > m33:
        s = np.sqrt(1.0 - m11 + m22 - m33) * 2  # S = 4*qy
        inv_s = 1.0 / s
        qw = (m13 - m31) * inv_s
        qx = (m12 + m21) * inv_s
        qy = 0.25 * s
        qz = (m23 + m32) * inv_s
    else:
        s = np.sqrt(1.0 - m11 - m22 + m33) * 2  # S = 4*qz
        inv_s = 1.0 / s
        qw = (m21 - m12)
        qx = (m13 + m31)
        qy = (m23 + m32)
        qz = 0.25 * s
    # self.from_wxyz(qw, qx, qy, qz)
    return qw, qx, qy, qz


# 此为使用棋盘格时所用的代码
def get_RT_from_chessboard(img, chess_board_x_num, chess_board_y_num, K, D,chess_board_len):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # 寻找角点 9*6的网格角点54个 corners (54, 1, 2)
    ret, corners = cv2.findChessboardCorners(gray, (chess_board_x_num, chess_board_y_num), None)
    # 将角点的转换成[2,54]的数据格式,方便后续的运算
    if ret:

        corner_points = np.zeros((corners.shape[0],2), dtype=np.float64)
        for i in range(corners.shape[0]):
            corner_points[i, :] = corners[i, 0, :]
        # 处理下棋盘的真实物理尺寸, Z轴为0
        object_points = np.zeros((3, chess_board_x_num * chess_board_y_num), dtype=np.float64)  # [3, 54]
        flag = 0
        for i in range(chess_board_y_num):
            for j in range(chess_board_x_num):
                object_points[:2, flag] = np.array([i * chess_board_len, j * chess_board_len])  # 2cm的格子物理尺寸
                flag += 1


        # print(object_points.T) rvec, tvec 计算出 标定板坐标系 ==> 相机坐标系 R T
        retval, rvec, tvec = cv2.solvePnP(object_points.T, corner_points, K, distCoeffs=D)

        # cv2.Rodrigues 旋转矩阵和旋转向量之间的相互转化
        RT = np.column_stack(((cv2.Rodrigues(rvec))[0], tvec))
        RT = np.row_stack((RT, np.array([0, 0, 0, 1])))

    return RT ,ret


# 此为使用aruco码获得相机位姿
def get_RT_from_aruco(frame, aruco_size, intrinsic, distortion):
    if frame is None:
        print("图像未成功加载")
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # 设置预定义的字典
    # aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_7X7_1000)
    # 使用默认值初始化检测器参数
    parameters = aruco.DetectorParameters()
    # 使用aruco.detectMarkers()函数可以检测到marker,返回ID和标志板的4个角点坐标
    detector = aruco.ArucoDetector(aruco_dict, parameters)
    corners, ids, rejectedImgPoints = detector.detectMarkers(gray)
    # if ids.any() == None:(此表达太局限)
    if ids is not None:
        # 虽然在标定过程中,只是用了一个aruco码标定,但此函数能检测出一个图像中的多个aruco码,故在棋盘格中输出为二维数组, 而在aruco码中为三维数组,多的一维时aruco码个数
        rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, aruco_size, intrinsic, distortion)
        rvec0 = rvec[0].reshape(3, 1)
        tvec0 = tvec[0].reshape(3, 1)
        # print(rvec)
        # print(rvec[0])
        # print(rvec0)
        # print(tvec)
        # print(tvec[0])
        # print(tvec0)
        # RT = np.column_stack(((cv2.Rodrigues(rvec0))[0], tvec0))
        # RT = np.row_stack((RT, np.array([0, 0, 0, 1])))

        rmat = cv2.Rodrigues(rvec[0])
        # print(rmat)
        RT=np.eye(4)
        RT[:3, :3] = rmat[0]
        RT[:3, 3] = tvec[0].reshape(-1)

        # print("")


    return RT, ids

if __name__ == "__main__":

    os.makedirs(f"./log", exist_ok=True)
    #log设置
    logging.basicConfig(level=logging.DEBUG,  # 控制台打印的日志级别
                        filename='./log/CamOnBase_external_Rt.log',
                        filemode='w',  #模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志
                        # a是追加模式,默认如果不写的话,就是追加模式
                        format=
                        '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
                        # 日志格式
                        )

    #------------------------------------------------
    #   相机的内参和畸变矫正参数
    #------------------------------------------------
    # 读取标定文件中保存的结果(相机内参矩阵和畸变系数)到对应的变量中
    intrinsic_file = cv2.FileStorage("../config/intrinsic_calibration.yaml", cv2.FileStorage_READ)
    K = intrinsic_file.getNode("cameraMatrix").mat()   #内参矩阵
    D = intrinsic_file.getNode("distCoeffs").mat()     #畸变系数
    intrinsic_file.release()


    pose_data = read_arm_pose('./robot_arm/eye_on_base.json') #每个时刻arm相对于base的坐标

    print("相机内参K:\n", K)
    print("相机畸变D:\n", D)


    chess_board_x_num = 11                    # 棋盘格x方向角点数(宽)
    chess_board_y_num = 8                    # 棋盘格y方向角点数(高)
    chess_board_len   = 0.003                   # 单位棋盘格长度,m

    R_all_end2base = []
    t_all_end2base = []
    R_all_cam2chess = []
    t_all_cam2chess = []
    # R_all_base2end = []
    # t_all_base2end = []
    # R_all_chess2cam = []
    # t_all_chess2cam = []
    i = 1
    for img_pth, arm_pose in pose_data.items():
        img_path = "./image/"+img_pth
        image = cv2.imread(img_path)

        R_base2end= myRPY2R_robot(arm_pose[3],arm_pose[4],arm_pose[5])
        t_base2end = np.array([arm_pose[0], arm_pose[1], arm_pose[2]])   #末端在基座下的表达,转换为m

        RT_base2end = np.column_stack((R_base2end, t_base2end))
        RT_base2end = np.row_stack((RT_base2end, np.array([0, 0, 0, 1])))  # 即为cam to end变换矩阵

        RT_end2base = np.linalg.inv(RT_base2end)     #眼在手上时使用 得到基座相对于末端的坐标
        R_end2base = RT_end2base[:3,:3]
        t_end2base = RT_end2base[:3,3]


        #标定板相对于相机坐标系的变换矩阵
        Rt_cam2chess ,ret= get_RT_from_chessboard(image,chess_board_x_num,chess_board_y_num,K,D,chess_board_len)
        
        # Rt_chess2cam = np.linalg.inv(Rt_cam2chess)    #得到相对于标定板的变换矩阵
        if ret:
            print("第{}张图像时,机器人姿态为{},图像棋盘格检测成功!".format((i), arm_pose))
            # R_all_base2end.append(R_base2end)
            # t_all_base2end.append(t_base2end)
            R_all_end2base.append(R_end2base)
            t_all_end2base.append(t_end2base.reshape((3,1)))

            R_all_cam2chess.append(Rt_cam2chess[:3,:3])
            t_all_cam2chess.append(Rt_cam2chess[:3,3].reshape((3,1)))
        else:
             print("第{}张图像棋盘格检测失败!".format((i), arm_pose))
        i+=1

    #得到相机相对于底座的相对坐标 (眼在手外标定时输入为base在末端坐标系中的坐标  棋盘格在相机坐标系中的坐标)
    R, T = cv2.calibrateHandEye(R_all_end2base, t_all_end2base, R_all_cam2chess,t_all_cam2chess)  # 手眼标定
    RT = np.column_stack((R, T))
    RT = np.row_stack((RT, np.array([0, 0, 0, 1])))  # 即为base—>cam变换矩阵
    print('相机相对于机器人基座标系的变换矩阵为:')
    print(RT)

    print("手眼标定结束, 参数存放在:config/CamOnBase_External_calibration.yaml")
    intrinsic_file = cv2.FileStorage("./config/CamOnBase_External_calibration.yaml", cv2.FileStorage_WRITE)
    intrinsic_file.write("poseMatrix", RT)
    intrinsic_file.release()
    #------------------------------------------------
    #    结果验证,原则上来说,每次结果相差较小
    #------------------------------------------------

    for i in range(len(R_all_end2base)):
        RT_end_to_base = np.column_stack((R_all_end2base[i],t_all_end2base[i]))
        RT_end_to_base = np.row_stack((RT_end_to_base,np.array([0,0,0,1])))
      #  print("第{}次的机器人基坐标系到机器人末端的变换矩阵为{}".format(i,RT_end_to_base))
      #  logging.info("第{}次的机器人基坐标系到机器人末端的变换矩阵为{}".format(i,RT_end_to_base))
        RT_cam_to_chess = np.column_stack((R_all_cam2chess[i],t_all_cam2chess[i]))
        RT_cam_to_chess = np.row_stack((RT_cam_to_chess,np.array([0,0,0,1])))
      #  print("第{}次的棋盘到相机坐标系的变换矩阵为{}".format(i, RT_chess_to_cam))
      #  logging.info("第{}次的棋盘到相机坐标系的变换矩阵为{}".format(i, RT_chess_to_cam))
        RT_base_to_cam = np.column_stack((R,T))
        RT_base_to_cam = np.row_stack((RT_base_to_cam,np.array([0,0,0,1])))
       # print("第{}次的基坐标系到相机坐标系的变换矩阵为{}".format(i, RT_base_to_cam))
        logging.info("第{}次的从基座标到相机坐标系的变换矩阵为{}".format(i, RT_base_to_cam))

以上就是自动标定的程序了,其实还是很简单的,使用的是“眼在手上”的标定方案。

下面是我开发的机械臂搭配相机采摘苹果的程序。因为是双臂采摘,所有设定了2个实例化的对象,整体的逻辑还是比较简单的,就是“复位-识别-采摘-放置-复位”这样一个循环的过程。中间有试过“复位-识别-连续采摘-连续放置-复位”的流程,但是由于树晃动的比较厉害,这个方案还是排除了。

# -*- coding: utf-8 -*-
# Author: Super Xiang
# Time: 2024.5.18
import time
import torch.multiprocessing
from sklearn.cluster import DBSCAN
import pyrealsense2 as rs
import cv2
from utils.model_init import yolodetect
from multiprocessing import Process
import math
from utils.robot_utils import *
import numpy as np
from sklearn.preprocessing import StandardScaler

import sys
sys.path.insert(0, '.\jaka_driver_windows')
import jkrc


class Robot:
    def __init__(self, info):
        self.ip = info.get('ip')
        self.sn = info.get('sn')
        self.matrix_location = info.get('matrix_location')
        self.y_max = info.get('y_max')
        self.joint_pos_init = info.get('joint_pos_init')
        self.cluster_points_list = info.get('cluster_points_list')
        self.rpy = info.get('rpy')
        self.video_name = info.get('video_name')
        self.translation = info.get('translation')
        self.rotation_radians = info.get('rotation_radians')
        self.speed = info.get('speed')
        self.logger = info.get('logger')
        self.num_point = info.get('num_point')

        # 运动模式
        self.ABS = 0
        self.INCR = 1

    def catch(self):
        self.logger.info(f"线程 {self.video_name} 开始运行")
        # 初始化模型
        yolo = yolodetect()
        # 初始化 DBSCAN 的参数
        epsilon = 2
        min_samples = 2
        # 创建 DBSCAN 实例
        dbscan = DBSCAN(eps=epsilon, min_samples=min_samples)
        # 读取手眼标定矩阵
        hand_eye_matrix = np.loadtxt(self.matrix_location)
        # 相机初始化参数
        pipeline = rs.pipeline()
        config = rs.config()
        config.enable_device(self.sn)  # 这是相机D455的序列号
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
        # 开启相机
        pipeline.start(config)
        align_to_color = rs.align(rs.stream.color)
        # 返回机器人对象
        robot = jkrc.RC(self.ip)
        # 查询 sdk 版本号
        self.logger.info(f"开始自检")
        robot.login()
        ret = robot.get_sdk_version()
        self.logger.info(f"SDK versionrobot.logout is:{ret}")
        self.logger.info("开始上电")
        robot.power_on()
        self.logger.info("上电成功")
        self.logger.info("开始上使能")
        robot.enable_robot()
        self.logger.info("上使能成功")
        self.logger.info("开始复位")
        robot.joint_move_extend(self.joint_pos_init, self.ABS, True, speed=self.speed, acc=math.pi, tol=0.1)
        self.logger.info("机器人准备就绪!")
        while True:
            data_points = []
            # 等待画面
            frames = pipeline.wait_for_frames()
            frames = align_to_color.process(frames)
            # 深度图和rgb图
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            # 相机内参
            depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
            if not depth_frame or not color_frame:
                continue
            # rgb图像转np
            color_image = np.asanyarray(color_frame.get_data())
            # 识别并得到中心点坐标
            s_detect = time.time()
            image_plot, center_points, w = yolo.detect(color_image)
            # self.logger.info(f"识别用时:{time.time() - s_detect}")
            cv2.imshow(self.video_name, cv2.cvtColor(image_plot, cv2.COLOR_RGB2BGR))
            # 没有下面三行代码就没有画面
            key = cv2.waitKey(1)
            if key & 0xFF == ord('q'):
                break

            for i, data_point in enumerate(center_points):
                count = 0
                data_point_xyz = wh2xyz(data_point, depth_frame, depth_intrin)
                if math.sqrt(data_point_xyz[0] ** 2 + data_point_xyz[1] ** 2 + data_point_xyz[2] ** 2) <= 1150:
                    data_points.append(data_point_xyz)
                    continue
                else:
                    while (10 * count) < w(i):
                        count += 1
                        pre_point_left = [data_point_xyz[0] - 10, data_point_xyz[1]]
                        pre_point_right = [data_point_xyz[0] + 10, data_point_xyz[1]]
                        if math.sqrt(pre_point_left[0] ** 2 + pre_point_left[1] ** 2 + pre_point_left[2] ** 2) <= 1150:
                            data_points.append(pre_point_left)
                            break
                        elif math.sqrt(
                                pre_point_right[0] ** 2 + pre_point_right[1] ** 2 + pre_point_right[2] ** 2) <= 1150:
                            data_points.append(pre_point_right)
                            break
                        else:
                            continue
            # 开始进行聚类
            if len(self.cluster_points_list) == 0:
                cluster_points_list = [point for point in data_points if
                                       math.sqrt(point[0] ** 2 + point[1] ** 2 + point[2] ** 2) != 0]
                cluster_points_list = [item for item in cluster_points_list if item[1] <= self.y_max]
                # 机械臂范围是0.954m,由于设定了手爪姿态,直接排除0.9m以外的点

                cluster_points_list = [point for point in cluster_points_list
                                       if math.sqrt(point[0] ** 2 + point[1] ** 2 + point[2] ** 2) <= 900]
                if len(cluster_points_list) == 0:
                    continue
            points_xyz = txtxyz2xyz([], cluster_points_list, hand_eye_matrix, self.translation,
                                    self.rotation_radians)
            # 数据标准化
            scaler = StandardScaler()
            data_scaled = scaler.fit_transform(points_xyz)
            # 进行聚类
            clusters = dbscan.fit_predict(data_scaled)
            print("5:", cluster_points_list)
            # 根据聚类结果绘制不同颜色的点
            cluster_points_list = []
            for cluster_label in np.unique(clusters):
                if cluster_label == -1:
                    continue  # 忽略噪声点
                cluster_points = data_points[clusters == cluster_label]
                cluster_points_list.append(cluster_points.tolist())
            print("6:", cluster_points_list)
            if not cluster_points_list:
                self.logger.info("没有有效的聚类点,跳过当前循环")
                continue
            for i, points_list in enumerate(cluster_points_list):
                e_catch_time = time.time()
                if len(points_list) < self.num_point:
                    self.logger.info(f"采摘点个数未达到{self.num_point}个")
                    continue
                else:
                    self.logger.info(f"识别到{len(points_list)}个金银花")
                    self.logger.info("开始采摘")
                    cv2.imwrite("./calibration/figures/figure.jpg", cv2.cvtColor(image_plot, cv2.COLOR_RGB2BGR))
                    for j, position in enumerate(points_list):
                        self.logger.info(f"第{j + 1}个点开始采摘")
                        self.logger.info(f"采摘点坐标为:{position}")
                        # 当前关节角度
                        local_joint = robot.get_joint_position()
                        # 目标点位姿
                        end_pos = [position[0], position[1], position[2], self.rpy[0], self.rpy[1], self.rpy[2]]
                        # 目标点关节角度
                        end_joint = robot.kine_inverse(list(local_joint[1]), end_pos)

                        end_joint = [float('%.3f' % k) for k in list(end_joint[1])]
                        try:
                            # 开始连续采摘
                            robot.joint_move_extend(end_joint, self.ABS, True, speed=self.speed, acc=math.pi, tol=0.1)
                        except Exception as e:
                            # 捕获所有异常,并打印异常信息
                            self.logger.info(f"采摘第{j + 1}个点出现错误:{e}")
                            continue
                        else:
                            self.logger.info(f"第{j + 1}个点采摘完成")
                    self.logger.info(f"第{i + 1}簇采摘完成,采摘用时:{time.time() - e_catch_time}")
                    time.sleep(1)
                    self.logger.info("1s后开始下一簇采摘")
                    continue
            continue
        # robot.disable_robot()
        # self.logger.info("机器人下使能")
        # robot.power_off()
        # self.logger.info("控制柜关闭")
        # robot.logout()
        # self.logger.info("采摘运动完成!!!")


if __name__ == "__main__":
    # 配置两个不同的日志记录器
    logger_left = setup_logger("Thread_arm_left", "./config/thread_arm_left.log")
    logger_right = setup_logger("Thread_arm_right", "./config/thread_arm_right.log")

    # 使用Process函数所必要的
    torch.multiprocessing.set_start_method('spawn')
    # 右侧机械臂
    info_right = {
        'ip': '192.168.1.102',
        'sn': "",
        'matrix_location': './config/hand_eye_mat2.txt',
        'y_max': 100,
        'joint_pos_init': [-math.pi * 180 / 180, math.pi * 120 / 180, math.pi * 120 / 180,
                           math.pi * 30 / 180, math.pi * 0 / 180, math.pi * 90 / 180],
        # 'cluster_points_list': [[-514.186, 34.209, 527.828], [-514.186, 24.209, 500.828], [-514.186, 30.281, 511.929],
        #                         [-514.186, 15.191, 520.191], [-513.195, 11.818, 514.919], [-510.999, 34.281, 519.494],
        #                         [-458.060, -355.239, 696.419], [-458.661, -365.919, 689.191],
        #                         [-458.919, -356.919, 672.191]],
        'cluster_points_list': [],
        'rpy': [-math.pi * 0.5, math.pi * 0.5, math.pi * 0.5],
        'video_name': "Thread_arm_left",
        'translation': np.array([0.010157, -0.175661, 0.511812]),
        'rotation_radians': np.array([-math.pi * 20.451 / 180, math.pi * 59.665 / 180, math.pi * 159.552 / 180]),
        'speed': math.pi,
        'logger': logger_left,
        'num_point': 4
    }
    # 左侧机械臂
    info_left = {
        'ip': '192.168.1.100',
        'sn': "",
        'matrix_location': './config/hand_eye_mat1.txt',
        'y_max': 100,
        'joint_pos_init': [math.pi * 0 / 180, math.pi * 60 / 180, -math.pi * 120 / 180,
                           math.pi * 150 / 180, math.pi * 0 / 180, math.pi * 180 / 180],
        'cluster_points_list': [],
        'rpy': [math.pi * 0.5, 0, math.pi * 0.5],
        'video_name': "Thread_arm_left",
        'translation': np.array([-0.007354, -0.173283, 0.512925]),
        'rotation_radians': np.array([-math.pi * 90 / 180, math.pi * 0 / 180, math.pi * 90 / 180]),
        'speed': math.pi,
        'logger': logger_right,
        'num_point': 4
    }
    # 实例化对象
    jaka_robot_left = Robot(info_left)
    jaka_robot_right = Robot(info_right)
    # 初始化线程
    thread_arm_left = Process(target=jaka_robot_left.catch)
    # thread_arm_right = Process(target=jaka_robot_right.catch)
    # 开启线程
    # thread_arm_left.start()
    # thread_arm_right.start()

代码上倒是没什么难度,其中最需要注意的是'translation'参数,它是指的机械臂复位时,末端相对于基坐标的xyz偏移,单位都是米。然后是robot_utils.py文件比较重要,里面写了关于6自由度机械臂的坐标系变换方法,对照着基本原理进行一遍复现即可,但是网上有很多不好用且感觉上不太正确的教学,因此自己写了一个,还是比较简单的。

import numpy as np
import logging
import pyrealsense2 as rs

def wh2xyz(mid_pos, dpf, dpl):
    dis = dpf.get_distance(int(mid_pos[0]), int(mid_pos[1]))
    camera_coordinate = rs.rs2_deproject_pixel_to_point(dpl, mid_pos, dis)
    x_cam = camera_coordinate[0]
    y_cam = camera_coordinate[1]
    z_cam = camera_coordinate[2]
    detections_list = [x_cam, y_cam, z_cam]
    return detections_list


def setup_logger(name, log_file, level=logging.INFO):
    """创建一个日志记录器,并配置文件处理器"""
    logger = logging.getLogger(name)
    logger.setLevel(level)

    # 创建文件处理器
    file_handler = logging.FileHandler(log_file)
    file_handler.setLevel(level)

    # 创建日志格式
    formatter = logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(formatter)

    # 添加处理器到日志记录器
    if not logger.handlers:  # 防止重复添加处理器
        logger.addHandler(file_handler)

    return logger


def xyzrpy_to_homogeneous(camera_coordinates):
    # 提取位置和欧拉角信息
    x = camera_coordinates[0]
    y = camera_coordinates[1]
    z = camera_coordinates[2]
    # roll = camera_coordinates[4]
    # pitch = camera_coordinates[5]
    # yaw = camera_coordinates[6]
    point_cam_homogeneous = np.array([x, y, z, 1.0])
    return point_cam_homogeneous


def transform_camera_to_end_effector(camera_coordinates, hand_eye_matrix):
    # 使用手眼标定矩阵将相机坐标系下的点转换到末端坐标系下
    homogeneous_end_effector_pose = np.dot(hand_eye_matrix, camera_coordinates)

    # 转换为末端坐标系下的点
    end_effector_coordinates = homogeneous_end_effector_pose[:3]

    return end_effector_coordinates


def txtxyz2xyz(xyz_num, cluster_points_list, hand_eye_matrix, translation, rotation_radians):
    for point_end_homogeneous in cluster_points_list:  # 遍历第一个聚类点的所有点
        # 坐标转化=
        homogeneous_matrix = xyzrpy_to_homogeneous(point_end_homogeneous)
        point_end_homogeneous = np.dot(hand_eye_matrix, homogeneous_matrix)

        # 从齐次坐标中提取xyz坐标
        x = point_end_homogeneous[0]
        y = point_end_homogeneous[1]
        z = point_end_homogeneous[2] - 0.25

        point_end = np.array([x, y, z, 1])

        # 构建平移矩阵 T
        T = np.array([
            [1, 0, 0, translation[0]],
            [0, 1, 0, translation[1]],
            [0, 0, 1, translation[2]],
            [0, 0, 0, 1]
        ])

        # 构建旋转矩阵 R
        Rx = np.array([
            [1, 0, 0],
            [0, np.cos(rotation_radians[0]), -np.sin(rotation_radians[0])],
            [0, np.sin(rotation_radians[0]), np.cos(rotation_radians[0])]
        ])

        Ry = np.array([
            [np.cos(rotation_radians[1]), 0, np.sin(rotation_radians[1])],
            [0, 1, 0],
            [-np.sin(rotation_radians[1]), 0, np.cos(rotation_radians[1])]
        ])

        Rz = np.array([
            [np.cos(rotation_radians[2]), -np.sin(rotation_radians[2]), 0],
            [np.sin(rotation_radians[2]), np.cos(rotation_radians[2]), 0],
            [0, 0, 1]
        ])

        # 获取基坐标系下目标的位置信息
        rotation_matrix = np.dot(np.dot(Rz, Ry), Rx)

        transformation_matrix = np.vstack([np.column_stack([rotation_matrix, translation]), [0, 0, 0, 1]])

        transformed_point = np.dot(transformation_matrix, point_end)

        # 提取变换后的xyz坐标
        x = transformed_point[0] * 1000
        y = transformed_point[1] * 1000
        z = transformed_point[2] * 1000

        xyz_num.append([x, y, z])
    return xyz_num

整体上还是比较简单的,但是对于新手来说,还是需要花很多时间去研究,中间遇到了各种各样的问题,但是后面都解决了。这段经历还是非常不错的,时隔一年突然想了起来,决定写一个帖子简单记录一下。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值