版本:VS2019、Kinect3.0、相机传感器SDK1.4.0、OpenCV3.4.1、PCL1.10.1
参考文章:
基于Azure Kinect SDK获取物体rgb图、深度图、红外IR图和点云数据并保存到本地
RGB-D相机(Azure Kinect DK)RGB图、深度图的获取,配准与保存
项目配置
OpenCV和PCL安装
VS2019配置PCL(windows)
VS2019配置OpenCV(windows)
相机安装
1、将相机连接电脑
2、下载并安装传感器SDK
下载地址:https://github.com/microsoft/Azure-Kinect-Sensor-SDK/blob/develop/docs/usage.md
下载完成后点击安装即可
配置到VS项目中
1、选择项目中的引用,右键选择管理NuGet包
2、在浏览中,选择下图所示包,点击下载安装即可
3、在项目中添加头文件
#include <k4a/k4a.h>
#include <k4arecord/record.h>
#include <k4arecord/playback.h>
获取RGB、Depth、IR图并保存
需要在脚本目录下创建rgb、depth、ir三个文件夹才可以保存图片
代码
// C++
#include <iostream>
#include <chrono>
#include <string>
// OpenCV
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
// Kinect DK
#include <k4a/k4a.hpp>
using namespace cv;
using namespace std;
// 宏
// 方便控制是否 std::cout 信息
#define DEBUG_std_cout 0
int main(int argc, char* argv[]) {
/*
找到并打开 Azure Kinect 设备
*/
// 发现已连接的设备数
const uint32_t device_count = k4a::device::get_installed_count();
if (0 == device_count) {
std::cout << "Error: no K4A devices found. " << std::endl;
return -1;
}
else {
std::cout << "Found " << device_count << " connected devices. " << std::endl;
if (1 != device_count)// 超过1个设备,也输出错误信息。
{
std::cout << "Error: more than one K4A devices found. " << std::endl;
return -1;
}
else// 该示例代码仅限对1个设备操作
{
std::cout << "Done: found 1 K4A device. " << std::endl;
}
}
// 打开(默认)设备
k4a::device device = k4a::device::open(K4A_DEVICE_DEFAULT);
std::cout << "Done: open device. " << std::endl;
/*
检索并保存 Azure Kinect 图像数据
*/
// 配置并启动设备
k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
config.camera_fps = K4A_FRAMES_PER_SECOND_30;
config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
config.color_resolution = K4A_COLOR_RESOLUTION_1080P;
config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED;
//config.depth_mode = K4A_DEPTH_MODE_WFOV_2X2BINNED;
config.synchronized_images_only = true;// ensures that depth and color images are both available in the capture
device.start_cameras(&config);
std::cout << "Done: start camera." << std::endl;
//写入txt文件流
ofstream rgb_out;
ofstream d_out;
ofstream ir_out;
rgb_out.open("./rgb.txt");
d_out.open("./depth.txt");
ir_out.open("./ir.txt");
rgb_out << "# color images" << endl;
rgb_out << "# file: rgbd_dataset" << endl;
rgb_out << "# timestamp" << " " << "filename" << endl;
d_out << "# depth images" << endl;
d_out << "# file: rgbd_dataset" << endl;
d_out << "# timestamp" << " " << "filename" << endl;
ir_out << "# ir images" << endl;
ir_out << "# file: rgbd_dataset" << endl;
ir_out << "# timestamp" << " " << "filename" << endl;
rgb_out << flush;
d_out << flush;
// 稳定化
k4a::capture capture;
int iAuto = 0;//用来稳定,类似自动曝光
int iAutoError = 0;// 统计自动曝光的失败次数
while (true) {
if (device.get_capture(&capture)) {
std::cout << iAuto << ". Capture several frames to give auto-exposure" << std::endl;
// 跳过前 n 个(成功的数据采集)循环,用来稳定
if (iAuto != 30) {
iAuto++;
continue;
}
else {
std::cout << "Done: auto-exposure" << std::endl;
break;// 跳出该循环,完成相机的稳定过程
}
}
else {
std::cout << iAutoError << ". K4A_WAIT_RESULT_TIMEOUT." << std::endl;
if (iAutoError != 30) {
iAutoError++;
continue;
}
else {
std::cout << "Error: failed to give auto-exposure. " << std::endl;
return -1;
}
}
}
std::cout << "-----------------------------------" << std::endl;
std::cout << "----- Have Started Kinect DK. -----" << std::endl;
std::cout << "-----------------------------------" << std::endl;
// 从设备获取捕获
k4a::image rgbImage;
k4a::image depthImage;
k4a::image irImage;
k4a::image transformed_depthImage;
cv::Mat cv_rgbImage_with_alpha;
cv::Mat cv_rgbImage_no_alpha;
cv::Mat cv_depth;
cv::Mat cv_depth_8U;
cv::Mat cv_irImage;
cv::Mat cv_irImage_8U;
while (true)
// for (size_t i = 0; i < 100; i++)
{
// if (device.get_capture(&capture, std::chrono::milliseconds(0)))
if (device.get_capture(&capture)) {
// rgb
// * Each pixel of BGRA32 data is four bytes. The first three bytes represent Blue, Green,
// * and Red data. The fourth byte is the alpha channel and is unused in the Azure Kinect APIs.
rgbImage = capture.get_color_image();
#if DEBUG_std_cout == 1
std::cout << "[rgb] " << "\n"
<< "format: " << rgbImage.get_format() << "\n"
<< "device_timestamp: " << rgbImage.get_device_timestamp().count() << "\n"
<< "system_timestamp: " << rgbImage.get_system_timestamp().count() << "\n"
<< "height*width: " << rgbImage.get_height_pixels() << ", " << rgbImage.get_width_pixels()
<< std::endl;
#endif
// depth
// * Each pixel of DEPTH16 data is two bytes of little endian unsigned depth data. The unit of the data is in
// * millimeters from the origin of the camera.
depthImage = capture.get_depth_image();
#if DEBUG_std_cout == 1
std::cout << "[depth] " << "\n"
<< "format: " << depthImage.get_format() << "\n"
&