控制移动机器人自主导航到yolo识别出来的目标前
文章目录
前言
这个demo一共分为四个部分,第一部分为通过yolo识别出需要检测的物体(例如:人)。第二部分为将所得到的物体像素坐标转化到三维实际坐标中。 第三部分将所得的三维坐标进行tf变化,得到物体在/map下的位置。第四部分为让小车自主导航到标签前面。同时还要感谢@跃动的风。其中的第二部分的代码参考了此链接: Realsense_2d_to_3d.
一、darknet_ros
关于yolo的相关资料网站上也比较多。首先需要git下来darknet_ros的功能包。链接: darknet_ros. 再将下面的cpp文件放入到你的功能包里就能实现自主导航到yolo识别出的person面前。
二、实现代码
1.move_object.cpp
代码如下:
/**********************
Detect the object with yolo, then change the position of the object in camera to real position.
Finay move the agv to the object
author: wxw and zzt
email: wangxianwei1994@foxmail.com, qq44642754@gmail.com
time: 2021-3-1
**********************/
#include "move_object.h"
MoveObject::MoveObject() : it_(nh_), detect_object(false), move_finish(false), get_object_position(false),
getflagsuccess(false), robot_move(false), move_base("move_base", true)
{
//topic sub:
pose = nh_.serviceClient<airface_drive_msgs::getpose>("/airface_driver/get_tf_pose");
Object_sub = nh_.subscribe("/darknet_ros/bounding_boxes", 1, &MoveObject::ObjectCallback, this);
image_sub_depth = it_.subscribe("/camera/depth/image_rect_raw", 1, &MoveObject::imageDepthCb, this);
image_sub_color = it_.subscribe("/camera/color/image_raw", 1, &MoveObject::imageColorCb, this);
camera_info_sub_ = nh_.subscribe("/camera/depth/camera_info", 1, &MoveObject::cameraInfoCb, this);
}
MoveObject::~MoveObject()
{
ROS_INFO("delete the class");
}
void MoveObject::cameraInfoCb(const sensor_msgs::CameraInfo &msg)
{
camera_info = msg;
}
void MoveObject::imageDepthCb(const sensor_msgs::ImageConstPtr &msg)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr =
cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::TYPE_16UC1);
depthImage = cv_ptr->image;
}
catch (cv_bridge::Exception &e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
void MoveObject::ObjectCallback(const darknet_ros_msgs::BoundingBoxes &object_tmp)
{
if (robot_move == false)
{
string Object_class;
Object_class = object_tmp.bounding_boxes[0].Class;
if (strcmp(Object_class.c_str(), "person") == 0)
{
mousepos.x = (object_tmp.bounding_boxes[0].xmin + object_tmp.bounding_boxes[0].xmax) / 2;
mousepos.y = (object_tmp.bounding_boxes[0].ymin + object_tmp.bounding_boxes[0].ymax) / 2;
if (mousepos.x != 0)
{
detect_object = true;
ROS_INFO("detected object");
}
}
}
}
void MoveObject::imageColorCb(const sensor_msgs::ImageConstPtr &msg)
{
if (detect_object && robot_move == false)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
colorImage = cv_ptr->image;
}
catch (cv_bridge::Exception &e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
//先查询对齐的深度图像的深度信息,根据读取的camera info内参矩阵求解对应三维坐标
real_z = 0.001 * depthImage.at<u_int16_t>(mousepos.y, mousepos.x);
real_x =
(mousepos.x - camera_info.K.at(2)) / camera_info.K.at(0) * real_z;
real_y =
(mousepos.y - camera_info.K.at(5)) / camera_info.K.at(4) * real_z;
if (real_x != 0 && real_y != 0)
{
get_object_position = true;
Object_pose_tmp.header.frame_id = "camera_color_optical_frame";
Object_pose_tmp.header.stamp = ros::Time::now();
Object_pose_tmp.pose.position.x = real_x;
Object_pose_tmp.pose.position.y = real_y;
Object_pose_tmp.pose.position.z = real_z - 0.8;
Object_pose_tmp.pose.orientation = tf::createQuaternionMsgFromYaw(0);
ROS_INFO("get the position of object");
}
}
}
void MoveObject::transformTf()
{
try
{
listener.transformPose("/map", Object_pose_tmp, Object_pose);
getflagsuccess = true;
robot_move = true;
ROS_INFO("Transform successed");
}
catch (tf::TransformException &ex)
{
ros::Duration(0.5).sleep();
getflagsuccess = false;
std::cout << "Transform failed, other try" << std::endl;
}
}
void MoveObject::goObject()
{
//connet to the Server, 5s limit
while (!move_base.waitForServer(ros::Duration(5.0)))
{
ROS_INFO("Waiting for move_base action server...");
}
ROS_INFO("Connected to move base server");
//set the targetpose
move_base_msgs::MoveBaseGoal goal;
airface_drive_msgs::getpose g;
pose.call(g);
goal.target_pose.header.frame_id = "map";
goal.target_pose.header.stamp = ros::Time::now();
goal.target_pose.pose.position.x = Object_pose.pose.position.x;
goal.target_pose.pose.position.y = Object_pose.pose.position.y;
cout << goal.target_pose.pose.position.x << endl;
cout << goal.target_pose.pose.position.y << endl;
goal.target_pose.pose.orientation = tf::createQuaternionMsgFromYaw(g.response.yaw);
//goal.target_pose.pose.orientation.z = -0.9961;
//goal.target_pose.pose.orientation.w = 0.0877;
ROS_INFO("Sending goal");
move_base.sendGoal(goal);
move_base.waitForResult();
if (move_base.getState() == actionlib::SimpleClientGoalState::SUCCEEDED)
ROS_INFO("Goal succeeded!");
else
ROS_INFO("Goal failed");
}
void MoveObject::initMove()
{
ros::AsyncSpinner spinner(1);
spinner.start();
if (get_object_position)
{
transformTf();
sleep(1);
}
if (getflagsuccess)
{
goObject();
move_finish = true;
}
}
int main(int argc, char **argv)
{
ros::init(argc, argv, "move_object");
MoveObject moveobject;
while (ros::ok())
{
ros::spinOnce();
moveobject.initMove();
if (moveobject.move_finish == true)
{
ros::shutdown();
}
}
return 0;
}
2.move_obj.h
代码如下:
#ifndef MOVE_OBJECT_H
#define MOVE_OBJECt_H
#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>
#include <geometry_msgs/PointStamped.h>
#include <image_transport/image_transport.h>
#include <sensor_msgs/CameraInfo.h>
#include <sensor_msgs/Image.h>
#include "darknet_ros_msgs/BoundingBoxes.h"
#include <move_base_msgs/MoveBaseAction.h>
#include <actionlib/client/simple_action_client.h>
#include "tf/transform_datatypes.h"
#include "tf/transform_listener.h"
#include <airface_drive_msgs/getpose.h>
using namespace cv;
using namespace std;
class MoveObject
{
private:
ros::NodeHandle nh_;
image_transport::ImageTransport it_;
image_transport::Subscriber image_sub_color;
image_transport::Subscriber image_sub_depth;
ros::Subscriber Object_sub;
ros::Subscriber camera_info_sub_; //subscribe the topic, which pubbed by depth image
sensor_msgs::CameraInfo camera_info;
geometry_msgs::PointStamped output_point;
/* Mat depthImage,colorImage; */
Mat colorImage;
Mat depthImage = Mat::zeros(480, 640, CV_16UC1); //size of image
Point mousepos = Point(0, 0); /* mousepoint to be map */
actionlib::SimpleActionClient<move_base_msgs::MoveBaseAction> move_base;
ros::ServiceClient pose;
tf::TransformListener listener;
float real_x, real_y, real_z;
bool detect_object , get_object_position, getflagsuccess, robot_move; //running flag
geometry_msgs::PoseStamped Object_pose, Object_pose_tmp;
public:
MoveObject();
~MoveObject();
bool move_finish;
void ObjectCallback(const darknet_ros_msgs::BoundingBoxes &object_tmp);
void cameraInfoCb(const sensor_msgs::CameraInfo &msg);
void imageDepthCb(const sensor_msgs::ImageConstPtr &msg);
void imageColorCb(const sensor_msgs::ImageConstPtr &msg);
void goObject();
void transformTf();
void initMove();
};
#endif
3.CMakeList.txt
cmake_minimum_required(VERSION 2.8.3)
project(learn_action)
## Compile as C++11, supported in ROS Kinetic and newer
add_compile_options(-std=c++11)
find_package(OpenCV REQUIRED)
find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
message_generation
actionlib
actionlib_msgs
airface_drive_msgs
tf
rostime
sensor_msgs
message_filters
cv_bridge
image_transport
compressed_image_transport
compressed_depth_image_transport
geometry_msgs
)
find_package(Boost REQUIRED)
add_action_files(
FILES
TurtleMove.action
)
generate_messages(
DEPENDENCIES
std_msgs
actionlib_msgs
)
catkin_package(
CATKIN_DEPENDS roscpp rospy std_msgs message_runtime
)
include_directories(
include
${catkin_INCLUDE_DIRS}
${boost_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
# ${TinyXML_INCLUDE_DIRS}
"/usr/include/eigen3"
)
add_executable(TurtleMove_client src/TurtleMove_client.cpp)
target_link_libraries(TurtleMove_client ${catkin_LIBRARIES} ${boost_LIBRARIES})
add_dependencies(TurtleMove_client ${PROJECT_NAME}_gencpp)
add_executable(TurtleMove_server src/TurtleMove_server.cpp)
target_link_libraries(TurtleMove_server ${catkin_LIBRARIES} ${boost_LIBRARIES})
add_dependencies(TurtleMove_server ${PROJECT_NAME}_gencpp)
add_executable(move_test src/move_test.cpp)
target_link_libraries(move_test ${catkin_LIBRARIES} ${boost_LIBRARIES})
add_dependencies(move_test ${PROJECT_NAME}_gencpp)
add_executable(move_object src/move_object.cpp)
target_link_libraries(move_object
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
)
4.package.xml
<?xml version="1.0"?>
<package format="2">
<name>learn_action</name>
<version>0.0.1</version>
<description>The learn_action package</description>
<maintainer email="qq44642754gmail.com">patience</maintainer>
<license>TODO</license>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roscpp</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>message_generation</build_depend>
<build_depend>actionlib</build_depend>
<build_depend>actionlib_msgs</build_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>message_runtime</exec_depend>
<exec_depend>actionlib</exec_depend>
<exec_depend>actionlib_msgs</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>
总结
以上就是控制移动机器人自主导航到yolo识别出来的目标前的代码,由于刚接触Ros3个月所以可能比较粗糙,之后会更新一些机械臂相关的比较有意思的demo。有问题可以私信留言。