API 使用指南
api查找地址:https://intelrealsense.github.io/librealsense/doxygen/annotated.html
建议编辑
待办事项:
获取帧的宽度、高度、每像素字节数及字节步长
获取帧时间戳及每帧元数据
获取与深度对齐的彩色帧及反之
1.获取设备数量
rs::context ctx;
if (ctx.get_device_count() == 0)
throw std::runtime_error("No device detected. Is it plugged in?");
rs::device & dev = *ctx.get_device(0);
或
rs2::context ctx;
auto list = ctx.query_devices(); // Get a snapshot of currently connected devices
if (list.size() == 0)
throw std::runtime_error("No device detected. Is it plugged in?");
rs2::device dev = list.front();
2.使用默认配置开始流式查传输
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::color, rs::preset::best_quality);
dev.enable_stream(rs::stream::depth, rs::preset::best_quality);
dev.start();
或:
rs2::pipeline pipe;
pipe.start();
3. 启动左右红外成像器流式传输
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::infrared, rs::preset::best_quality);
dev.enable_stream(rs::stream::infrared2, rs::preset::best_quality);
dev.start();
或:
rs2::config cfg;
cfg.enable_stream(RS2_STREAM_INFRARED, 1);
cfg.enable_stream(RS2_STREAM_INFRARED, 2);
rs2::pipeline pipe;
pipe.start(cfg);
4.等待一组同步帧
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::color, rs::preset::best_quality);
dev.enable_stream(rs::stream::depth, rs::preset::best_quality);
dev.start();
dev.wait_for_frames();
dev.get_frame_data(rs::stream::depth); // Pointer to depth pixels,
// invalidated by next call to wait/poll for frames
或:
rs2::pipeline pipe;
pipe.start();
rs2::frameset frames = pipe.wait_for_frames();
rs2::frame frame = frames.first(RS2_STREAM_DEPTH);
if (frame)
frame.get_data(); // Pointer to depth pixels,
// invalidated when last copy of frame goes out of scope
5.轮询数据
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::color, rs::preset::best_quality);
dev.enable_stream(rs::stream::depth, rs::preset::best_quality);
dev.start();
if (dev.poll_for_frames())
dev.get_frame_data(rs::stream::depth);
or:
rs2::pipeline pipe;
pipe.start();
rs2::frameset frames;
if (pipe.poll_for_frames(&frames))
{
rs2::frame depth_frame = frames.first(RS2_STREAM_DEPTH);
depth_frame.get_data();
}
6. 在后台线程进行处理
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::depth, rs::preset::best_quality);
dev.start();
std::mutex m;
std::deque<std::vector<uint8_t>> queue;
std::thread t([&]() {
while (true)
{
std::vector<uint8_t> frame_copy;
{
std::lock_guard<std::mutex> lock(m);
frame_copy = queue.front();
queue.front();
}
frame_copy.data();
// Do processing on the frame
}
});
t.detach();
while (true)
{
dev.wait_for_frames();
const auto stream = rs::stream::depth;
const auto CAPACITY = 5; // allow max latency of 5 frames
uint8_t bytes_per_pixel = 1;
switch (dev.get_stream_format(stream))
{
case rs::format::raw8:
case rs::format::y8:
bytes_per_pixel = 1;
break;
case rs::format::z16:
case rs::format::disparity16:
case rs::format::yuyv:
case rs::format::y16:
case rs::format::raw10:
case rs::format::raw16:
bytes_per_pixel = 2;
break;
case rs::format::rgb8:
case rs::format::bgr8:
bytes_per_pixel = 3;
break;
case rs::format::rgba8:
case rs::format::bgra8:
case rs::format::xyz32f:
bytes_per_pixel = 4;
break;
}
auto ptr = (uint8_t*)dev.get_frame_data(rs::stream::depth);
std::vector<uint8_t> frame_copy( // Deep-copy the frame data
ptr, ptr + dev.get_stream_width(stream) *
dev.get_stream_height(stream) *
bytes_per_pixel);
{
std::lock_guard<std::mutex> lock(m);
if (queue.size() < CAPACITY)
queue.push_back(frame_copy);
}
}
or:
rs2::pipeline pipe;
pipe.start();
const auto CAPACITY = 5; // allow max latency of 5 frames
rs2::frame_queue queue(CAPACITY);
std::thread t([&]() {
while (true)
{
rs2::depth_frame frame;
if (queue.poll_for_frame(&frame))
{
frame.get_data();
// Do processing on the frame
}
}
});
t.detach();
while (true)
{
auto frames = pipe.wait_for_frames();
queue.enqueue(frames.get_depth_frame());
}
7.获取并应用深度-色彩的坐标变换参数
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::color, rs::preset::best_quality);
dev.enable_stream(rs::stream::depth, rs::preset::best_quality);
dev.start();
rs::extrinsics e = dev.get_extrinsics(rs::stream::depth, rs::stream::color);
// Apply extrinsics to the origin
auto color_point = e.transform({ 0.f, 0.f, 0.f });
或:
rs2::pipeline pipe;
rs2::pipeline_profile selection = pipe.start();
auto depth_stream = selection.get_stream(RS2_STREAM_DEPTH);
auto color_stream = selection.get_stream(RS2_STREAM_COLOR);
rs2_extrinsics e = depth_stream.get_extrinsics_to(color_stream);
// Apply extrinsics to the origin
float origin[3] { 0.f, 0.f, 0.f };
float target[3];
rs2_transform_point_to_point(target, &e, origin);
8. 获取视差基线值
(技术说明:该参数表示深度传感器中左右红外摄像头的光心水平距离,单位为毫米,用于视差(disparity)到深度(depth)的换算,公式为:深度 = (焦距 × 基线) / 视差)
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::infrared, rs::preset::best_quality);
dev.enable_stream(rs::stream::infrared2, rs::preset::best_quality);
dev.start();
rs_extrinsics e = dev.get_extrinsics(rs::stream::infrared, rs::stream::infrared2);
auto baseline = e.translation[0];
或:
rs2::config cfg;
cfg.enable_stream(RS2_STREAM_INFRARED, 1);
cfg.enable_stream(RS2_STREAM_INFRARED, 2);
rs2::pipeline pipe;
rs2::pipeline_profile selection = pipe.start(cfg);
auto ir1_stream = selection.get_stream(RS2_STREAM_INFRARED, 1);
auto ir2_stream = selection.get_stream(RS2_STREAM_INFRARED, 2);
rs2_extrinsics e = ir1_stream.get_extrinsics_to(ir2_stream);
auto baseline = e.translation[0];
9. 获取视频流内参
(技术说明:该操作返回相机内参矩阵,包含焦距(fx/fy)、光学中心(cx/cy)和畸变系数,用于2D像素坐标与3D空间坐标的相互转换)
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::infrared, rs::preset::best_quality);
dev.enable_stream(rs::stream::infrared2, rs::preset::best_quality);
dev.start();
rs::intrinsics i = dev.get_stream_intrinsics(rs::stream::depth);
auto principal_point = std::make_pair(i.ppx, i.ppy);
auto focal_length = std::make_pair(i.fx, i.fy);
auto resolution = std::make_pair(i.width, i.height);
rs_distortion model = i.model;
或:
rs2::pipeline pipe;
rs2::pipeline_profile selection = pipe.start();
auto depth_stream = selection.get_stream(RS2_STREAM_DEPTH)
.as<rs2::video_stream_profile>();
auto resolution = std::make_pair(depth_stream.width(), depth_stream.height());
auto i = depth_stream.get_intrinsics();
auto principal_point = std::make_pair(i.ppx, i.ppy);
auto focal_length = std::make_pair(i.fx, i.fy);
rs2_distortion model = i.model;
10.获取视场角
(技术说明:该操作返回相机的水平(Horizontal)、垂直(Vertical)和对角线(Diagonal)视场角度数,单位为度,用于确定成像范围的空间覆盖范围)
典型输出格式:
HFOV: 水平视场角 (如86°)
VFOV: 垂直视场角 (如57°)
DFOV: 对角线视场角 (如94°)
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::infrared, rs::preset::best_quality);
dev.enable_stream(rs::stream::infrared2, rs::preset::best_quality);
dev.start();
rs::intrinsics i = dev.get_stream_intrinsics(rs::stream::depth);
float fov[2] = { i.hfov, i.vfov };
或:
rs2::pipeline pipe;
rs2::pipeline_profile selection = pipe.start();
auto depth_stream = selection.get_stream(RS2_STREAM_DEPTH)
.as<rs2::video_stream_profile>();
auto i = depth_stream.get_intrinsics();
float fov[2]; // X, Y fov
rs2_fov(&i, fov);
11.获取深度单位
(技术说明:该操作返回深度值的单位换算系数,表示每个深度值单位对应的实际物理距离(通常为毫米或米),用于将原始深度数据转换为实际物理距离)
典型返回值:
0.001 表示1个深度单位=1毫米
1.0 表示1个深度单位=1米
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
dev.enable_stream(rs::stream::infrared, rs::preset::best_quality);
dev.enable_stream(rs::stream::infrared2, rs::preset::best_quality);
dev.start();
auto scale = dev.get_depth_scale();
或
rs2::pipeline pipe;
rs2::pipeline_profile selection = pipe.start();
// Find first depth sensor (devices can have zero or more then one)
auto sensor = selection.get_device().first<rs2::depth_sensor>();
auto scale = sensor.get_depth_scale();
12.
控制激光器
(技术说明:该操作用于动态调节深度传感器激光发射器的功率或开关状态,以平衡精度与功耗需求)
典型控制参数:
激光功率(0-100%):
cpp
device.set_option(RS2_OPTION_LASER_POWER, value); // 设置功率百分比
激光开关:
cpp
device.set_option(RS2_OPTION_EMITTER_ENABLED, 0/1); // 0关闭,1开启
应用场景:
高精度模式(100%功率)
低功耗模式(≤15%功率)
完全关闭(减少红外干扰)
安全注意:
避免直视激活的激光发射器
超过15%功率时需符合IEC 60825-1 Class 1安全标准
rs::context ctx;
rs::device & dev = *ctx.get_device(0);
if (dev.supports_option(rs::option::r200_emitter_enabled)) // For R200 family
{
dev.set_option(rs::option::r200_emitter_enabled, 1.f); // Enable laser emitter
dev.set_option(rs::option::r200_emitter_enabled, 0.f); // Disable laser
}
if (dev.supports_option(rs::option::f200_laser_power)) // For F200 and SR300
{
double min, max, step; // Query min and max values:
dev.get_option_range(rs::option::f200_laser_power, min, max, step);
dev.set_option(rs::option::f200_laser_power, max); // Enable laser (max power)
dev.set_option(rs::option::f200_laser_power, 0.f); // Disable laser emitter
}
或:
rs2::pipeline pipe;
rs2::pipeline_profile selection = pipe.start();
rs2::device selected_device = selection.get_device();
auto depth_sensor = selected_device.first<rs2::depth_sensor>();
if (depth_sensor.supports(RS2_OPTION_EMITTER_ENABLED))
{
depth_sensor.set_option(RS2_OPTION_EMITTER_ENABLED, 1.f); // Enable emitter
depth_sensor.set_option(RS2_OPTION_EMITTER_ENABLED, 0.f); // Disable emitter
}
if (depth_sensor.supports(RS2_OPTION_LASER_POWER))
{
// Query min and max values:
auto range = depth_sensor.get_option_range(RS2_OPTION_LASER_POWER);
depth_sensor.set_option(RS2_OPTION_LASER_POWER, range.max); // Set max power
depth_sensor.set_option(RS2_OPTION_LASER_POWER, 0.f); // Disable laser
}
1万+

被折叠的 条评论
为什么被折叠?



