ros-python中关于订阅两个话题并进行数据交互的问题

1.使用消息滤波器

核心代码

    rospy.init_node('cloud_img_process', anonymous=True)
    print('初始化')
    # im = Image.open(r'/home/ubuntu/Desktop/img_lidar_data_12.19/image/1.jpg')
    # im.show()
    sub = message_filters.Subscriber("/pcl_processed", PointCloud2)
    # sub = message_filters.Subscriber("/uv_array", Float32MultiArray, queue_size=700000)   # 获取c++输出的数组
    sub_img = message_filters.Subscriber("/sync/_img", Image)
    # print(1)
    ts = message_filters.ApproximateTimeSynchronizer([sub, sub_img], 10, 0.1, allow_headerless=True)
    ts.registerCallback(call_back)

#!/usr/bin/python
# -*- coding:utf-8 -*-
# 用于订阅c++程序中的点云数组数据,并进行投影转换,订阅点云话题以及图像话题进行处理
# 使用python2 在终端运行即可,避免ros与python3冲突
import rospy
import matplotlib.pyplot as plt  # 导入模块 matplotlib.pyplot,并简写成 plt
import numpy as np  # 导入模块 numpy,并简写成 np
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import Image
import message_filters
from std_msgs.msg import Header
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
import time

# import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')     # 终端执行时候需要加入,否则报错,将ros依赖的python2去除
import cv2

def cloud_img_process():
    rospy.init_node('cloud_img_process', anonymous=True)
    print('初始化')
    # im = Image.open(r'/home/ubuntu/Desktop/img_lidar_data_12.19/image/1.jpg')
    # im.show()
    sub = message_filters.Subscriber("/pcl_processed", PointCloud2)
    # sub = message_filters.Subscriber("/uv_array", Float32MultiArray, queue_size=700000)   # 获取c++输出的数组
    sub_img = message_filters.Subscriber("/sync/_img", Image)
    # print(1)
    ts = message_filters.ApproximateTimeSynchronizer([sub, sub_img], 10, 0.1, allow_headerless=True)
    ts.registerCallback(call_back)


    rospy.spin()  # spin() simply keeps python from exiting until this node is stopped


def call_back(data, image):
    # 基于ros将图像数据进行发布
    image_pub = rospy.Publisher('/fusion_result', Image)
    # print(type(data))  # <class 'sensor_msgs.msg._PointCloud2.PointCloud2'>
    points = point_cloud2.read_points_list(data)  # points 为list数据类型,获取点云数据
    num = len(points)  # 2061
    print(num)
    # print(type(points[0].x))

    t1 = time.time()
    point_size = 1
    thickness = 4  # 可以为 0 、4、8
    ros_image = np.frombuffer(image.data, dtype=np.uint8).reshape(1080, 1920, -1)  # rgb格式
    # ros_image = ros_image[:, :, [2, 1, 0]]
    # ros_image = cv2.cvtColor(ros_image, cv2.COLOR_RGB2BGR)  # 改成bgr
    # 绘制投影点云至图像上
    u_ls = []
    v_ls = []

    for i in range(num):
        z_c = 0.9994 * points[i].x - 0.0167 * points[i].y - 0.0292 * points[i].z - 0.261
        u = int((997.6261 * points[i].x - 880.0802 * points[i].y - 28.5851 * points[i].z - 270.8294) / z_c)
        v = int((497.8608 * points[i].x - 9.2752 * points[i].y - 877.5064 * points[i].z - 218.1024) / z_c)
        cv2.circle(ros_image, (u, v), point_size, (255 / 10 * points[i].x, 0, 255 / points[i].x * 5), thickness)
        u_ls.append(u)
        v_ls.append(v)
    print(u_ls[0], v_ls[0])

    # 结合yolov5输出的bbox左上角,右下角坐标获取该框内的点云三维坐标
    # 例如:boundingbox([870,270]、[1160,720]),使用两个for循环,外循环放点云,内循环放bbox,一个一个框进行判断,在框内则保留
    # object 数量与框数量一致
    # 投影后的非目标物体上的点云,直接对x轴的数值取平均,将x值大于平均值的点进行剔除
    object = []  # 保存符合要求的点云索引
    c1, c2 = (870, 300), (1160, 720)
    cv2.rectangle(ros_image, c1, c2, color=(0, 255, 0), thickness=3)
    for num, point in enumerate(zip(u_ls, v_ls)):  # num为记录的点在列表中的位置,point为对应的坐标像素点
        if 870 < point[0] < 1160 and 300 < point[1] < 720:  # for box in boxes:
            object.append(num)
    print('long:', len(object))

    if len(object):
        # bbox必须存在点云投影,才进行框中点的剔除,删除不属于该物体的点云
        new_object = []
        x_points = []  # 获取在框内的点云(三维)
        for i in object:
            # print(points[i].x)
            x_points.append(points[i].x)

        x_mean = sum(sorted(x_points)[ : int( np.ceil(len(x_points) / 5) )]) / (int(np.ceil( len(x_points) / 5 )))  # 取对点云x排序后所有点的前20%点的均值,作为分割点云的阈值
        print(x_mean)
        for i in object:
            if (x_mean + 0.2) > points[i].x > (x_mean - 0.2):  # 阈值为0.1m
                new_object.append(i)

        # 绘制bbox中的点(去除背景,利用x均值)
        for i in new_object:
            cv2.circle(ros_image, (u_ls[i], v_ls[i]), point_size, (0, 255, 0), thickness)

    t2 = time.time()
    print("runtime:{}".format(t2 - t1))
    #    for point in [(100,200)]:
    #        cv2.circle(ros_image, point, point_size, point_color, thickness)

    # 以数组发布发布图像
    image_temp = Image()
    header = Header(stamp=rospy.Time.now())
    header.frame_id = 'rslidar'
    image_temp.height = 1080
    image_temp.width = 1920
    image_temp.encoding = 'rgb8'
    image_temp.data = np.array(ros_image).tostring()
    # print(imgdata)
    # image_temp.is_bigendian=True
    image_temp.header = header
    image_temp.step = 1920*3   #  Full row length in bytes
    image_pub.publish(image_temp)

    # cv2.namedWindow("result", 0)
    # cv2.resizeWindow("result", (1280, 960))
    # cv2.imshow('result', ros_image)
    # if cv2.waitKey(1) & 0xFF == 27:
    #     cv2.destroyAllWindows()


if __name__ == '__main__':
    cloud_img_process()

2. 使用全局变量传输数据即可,直接一个订阅存在一个回调函数

核心代码
注意global的使用

def call_back1(data):
    global point_data  # 将点云变为全局变量发出,在图像callback中被调用直接使用
    point_data = data


def call_back2(image):
    global point_data

#!/usr/bin/python
# -*- coding:utf-8 -*-
# 用于订阅c++程序中的点云数组数据,并进行投影转换,订阅点云话题以及图像话题进行处理
import rospy
import matplotlib.pyplot as plt  # 导入模块 matplotlib.pyplot,并简写成 plt
import numpy as np  # 导入模块 numpy,并简写成 np
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import Image
import message_filters
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
import time
import cv2


def cloud_img_process():
    rospy.init_node('cloud_img_process', anonymous=True)
    print('初始化')
    # im = Image.open(r'/home/ubuntu/Desktop/img_lidar_data_12.19/image/1.jpg')
    # im.show()
    sub = rospy.Subscriber("/pcl_processed", PointCloud2, call_back1)
    # sub = message_filters.Subscriber("/uv_array", Float32MultiArray, queue_size=700000)   # 获取c++输出的数组
    sub_img = rospy.Subscriber("/sync/_img", Image, call_back2)
    # print(1)
    # ts = message_filters.ApproximateTimeSynchronizer([sub, sub_img], 10, 0.1, allow_headerless=True)
    # ts.registerCallback(call_back)
    rospy.spin()  # spin() simply keeps python from exiting until this node is stopped


def call_back1(data):
    global point_data  # 将点云变为全局变量发出,在图像callback中被调用直接使用
    point_data = data


def call_back2(image):
    global point_data
    # print(type(data))  # <class 'sensor_msgs.msg._PointCloud2.PointCloud2'>
    points = point_cloud2.read_points_list(point_data)  # points 为list数据类型,获取点云数据
    num = len(points)  # 2061
    print(num)
    print(type(points[0].x))

    t1 = time.time()
    point_size = 1
    thickness = 4  # 可以为 0 、4、8
    ros_image = np.frombuffer(image.data, dtype=np.uint8).reshape(1080, 1920, -1)  # rgb格式
    # ros_image = ros_image[:, :, [2, 1, 0]]
    ros_image = cv2.cvtColor(ros_image, cv2.COLOR_RGB2BGR)  # 改成bgr
    # 绘制投影点云至图像上
    u_ls = []
    v_ls = []

    for i in range(num):
        z_c = 0.9994 * points[i].x - 0.0167 * points[i].y - 0.0292 * points[i].z - 0.261
        u = int((997.6261 * points[i].x - 880.0802 * points[i].y - 28.5851 * points[i].z - 270.8294) / z_c)
        v = int((497.8608 * points[i].x - 9.2752 * points[i].y - 877.5064 * points[i].z - 218.1024) / z_c)
        cv2.circle(ros_image, (u, v), point_size, (255 / 10 * points[i].x, 0, 255 / points[i].x * 5), thickness)
        u_ls.append(u)
        v_ls.append(v)
    print(u_ls[0], v_ls[0])

    # 结合yolov5输出的bbox左上角,右下角坐标获取该框内的点云三维坐标
    # 例如:boundingbox([870,270]、[1160,720]),使用两个for循环,外循环放点云,内循环放bbox,一个一个框进行判断,在框内则保留
    # object 数量与框数量一致
    # 投影后的非目标物体上的点云,直接对x轴的数值取平均,将x值大于平均值的点进行剔除
    object = []  # 保存符合要求的点云索引
    for num, point in enumerate(zip(u_ls, v_ls)):  # num为记录的点在列表中的位置,point为对应的坐标像素点
        if 870 < point[0] < 1160 and 300 < point[1] < 720:  # for box in boxes:
            object.append(num)
    print('long:', len(object))
    # 进行点剔除
    new_object = []
    x_points = []  # 获取在框内的点云(三维)
    for i in object:
        # print(points[i].x)
        x_points.append(points[i].x)

    x_mean = sum(sorted(x_points)[:len(x_points) / 5]) / (len(x_points) / 5)  # 取对点云x排序后所有点的前20%点的均值,作为分割点云的阈值
    print(x_mean)
    for i in object:
        if (x_mean + 0.2) > points[i].x > (x_mean - 0.2):  # 阈值为0.1m
            new_object.append(i)

    # 绘制bbox中的点(去除背景,利用x均值)
    for i in new_object:
        cv2.circle(ros_image, (u_ls[i], v_ls[i]), point_size, (0, 255, 0), thickness)

    t2 = time.time()
    print("runtime:{}".format(t2 - t1))
    #    for point in [(100,200)]:
    #        cv2.circle(ros_image, point, point_size, point_color, thickness)
    cv2.namedWindow("result", 0)
    cv2.resizeWindow("result", (1280, 960))
    cv2.imshow('result', ros_image)
    cv2.waitKey(1)



if __name__ == '__main__':
    cloud_img_process()

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

sjh_sjh_sjh

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值